From 98d0a21a6068fe12b0fb1651ecc741988c1cfb1c Mon Sep 17 00:00:00 2001 From: github-actions Date: Tue, 19 Sep 2023 00:30:06 +0000 Subject: [PATCH] Deployed 3726f4a08 to dev with MkDocs 1.5.3 and mike 1.1.2 --- dev/404.html | 2 +- dev/api/active/EntropySampler/index.html | 2 +- .../base/ActiveLearningClassifier/index.html | 2 +- dev/api/anomaly/GaussianScorer/index.html | 2 +- dev/api/anomaly/HalfSpaceTrees/index.html | 2 +- dev/api/anomaly/LocalOutlierFactor/index.html | 2 +- dev/api/anomaly/OneClassSVM/index.html | 2 +- dev/api/anomaly/QuantileFilter/index.html | 2 +- dev/api/anomaly/ThresholdFilter/index.html | 2 +- .../anomaly/base/AnomalyDetector/index.html | 2 +- dev/api/anomaly/base/AnomalyFilter/index.html | 2 +- .../base/SupervisedAnomalyDetector/index.html | 2 +- dev/api/bandit/BayesUCB/index.html | 2 +- dev/api/bandit/EpsilonGreedy/index.html | 2 +- dev/api/bandit/Exp3/index.html | 2 +- dev/api/bandit/LinUCBDisjoint/index.html | 2 +- dev/api/bandit/RandomPolicy/index.html | 2 +- dev/api/bandit/ThompsonSampling/index.html | 2 +- dev/api/bandit/UCB/index.html | 2 +- .../bandit/base/ContextualPolicy/index.html | 2 +- dev/api/bandit/base/Policy/index.html | 2 +- .../bandit/datasets/BanditDataset/index.html | 2 +- .../bandit/datasets/NewsArticles/index.html | 2 +- .../bandit/envs/CandyCaneContest/index.html | 2 +- dev/api/bandit/envs/KArmedTestbed/index.html | 2 +- dev/api/bandit/evaluate-offline/index.html | 2 +- dev/api/bandit/evaluate/index.html | 2 +- dev/api/base/Base/index.html | 2 +- .../BinaryDriftAndWarningDetector/index.html | 2 +- dev/api/base/BinaryDriftDetector/index.html | 2 +- dev/api/base/Classifier/index.html | 2 +- dev/api/base/Clusterer/index.html | 2 +- .../base/DriftAndWarningDetector/index.html | 4 +- dev/api/base/DriftDetector/index.html | 4 +- dev/api/base/Ensemble/index.html | 2 +- dev/api/base/Estimator/index.html | 2 +- dev/api/base/MiniBatchClassifier/index.html | 2 +- dev/api/base/MiniBatchRegressor/index.html | 2 +- .../MiniBatchSupervisedTransformer/index.html | 2 +- dev/api/base/MiniBatchTransformer/index.html | 2 +- dev/api/base/MultiLabelClassifier/index.html | 2 +- dev/api/base/MultiTargetRegressor/index.html | 2 +- dev/api/base/Regressor/index.html | 2 +- dev/api/base/SupervisedTransformer/index.html | 2 +- dev/api/base/Transformer/index.html | 2 +- dev/api/base/Wrapper/index.html | 2 +- dev/api/base/WrapperEnsemble/index.html | 2 +- dev/api/cluster/CluStream/index.html | 2 +- dev/api/cluster/DBSTREAM/index.html | 2 +- dev/api/cluster/DenStream/index.html | 2 +- dev/api/cluster/KMeans/index.html | 2 +- dev/api/cluster/STREAMKMeans/index.html | 2 +- dev/api/cluster/TextClust/index.html | 2 +- dev/api/compat/River2SKLClassifier/index.html | 2 +- dev/api/compat/River2SKLClusterer/index.html | 2 +- dev/api/compat/River2SKLRegressor/index.html | 2 +- .../compat/River2SKLTransformer/index.html | 2 +- dev/api/compat/SKL2RiverClassifier/index.html | 2 +- dev/api/compat/SKL2RiverRegressor/index.html | 2 +- .../convert-river-to-sklearn/index.html | 2 +- .../convert-sklearn-to-river/index.html | 2 +- dev/api/compose/Discard/index.html | 2 +- dev/api/compose/FuncTransformer/index.html | 2 +- dev/api/compose/Grouper/index.html | 2 +- dev/api/compose/Pipeline/index.html | 2 +- dev/api/compose/Prefixer/index.html | 2 +- dev/api/compose/Renamer/index.html | 2 +- dev/api/compose/Select/index.html | 2 +- dev/api/compose/SelectType/index.html | 2 +- dev/api/compose/Suffixer/index.html | 2 +- .../TargetTransformRegressor/index.html | 2 +- dev/api/compose/TransformerProduct/index.html | 2 +- dev/api/compose/TransformerUnion/index.html | 2 +- .../compose/learn-during-predict/index.html | 2 +- dev/api/conf/Interval/index.html | 2 +- dev/api/conf/RegressionJackknife/index.html | 2 +- .../covariance/EmpiricalCovariance/index.html | 2 +- .../covariance/EmpiricalPrecision/index.html | 2 +- dev/api/datasets/AirlinePassengers/index.html | 2 +- dev/api/datasets/Bananas/index.html | 2 +- dev/api/datasets/Bikes/index.html | 2 +- dev/api/datasets/ChickWeights/index.html | 2 +- dev/api/datasets/CreditCard/index.html | 2 +- dev/api/datasets/Elec2/index.html | 2 +- dev/api/datasets/HTTP/index.html | 2 +- dev/api/datasets/Higgs/index.html | 2 +- dev/api/datasets/ImageSegments/index.html | 2 +- dev/api/datasets/Insects/index.html | 2 +- dev/api/datasets/Keystroke/index.html | 2 +- dev/api/datasets/MaliciousURL/index.html | 2 +- dev/api/datasets/MovieLens100K/index.html | 2 +- dev/api/datasets/Music/index.html | 2 +- dev/api/datasets/Phishing/index.html | 2 +- dev/api/datasets/Restaurants/index.html | 2 +- dev/api/datasets/SMSSpam/index.html | 2 +- dev/api/datasets/SMTP/index.html | 2 +- dev/api/datasets/SolarFlare/index.html | 2 +- dev/api/datasets/TREC07/index.html | 2 +- dev/api/datasets/Taxis/index.html | 2 +- dev/api/datasets/TrumpApproval/index.html | 2 +- dev/api/datasets/WaterFlow/index.html | 2 +- dev/api/datasets/base/Dataset/index.html | 2 +- dev/api/datasets/base/FileDataset/index.html | 2 +- .../datasets/base/RemoteDataset/index.html | 2 +- .../datasets/base/SyntheticDataset/index.html | 2 +- dev/api/datasets/synth/Agrawal/index.html | 2 +- dev/api/datasets/synth/AnomalySine/index.html | 2 +- .../synth/ConceptDriftStream/index.html | 2 +- dev/api/datasets/synth/Friedman/index.html | 2 +- .../datasets/synth/FriedmanDrift/index.html | 2 +- dev/api/datasets/synth/Hyperplane/index.html | 2 +- dev/api/datasets/synth/LED/index.html | 2 +- dev/api/datasets/synth/LEDDrift/index.html | 2 +- dev/api/datasets/synth/Logical/index.html | 2 +- dev/api/datasets/synth/Mixed/index.html | 2 +- dev/api/datasets/synth/Mv/index.html | 2 +- dev/api/datasets/synth/Planes2D/index.html | 2 +- dev/api/datasets/synth/RandomRBF/index.html | 2 +- .../datasets/synth/RandomRBFDrift/index.html | 2 +- dev/api/datasets/synth/RandomTree/index.html | 2 +- dev/api/datasets/synth/SEA/index.html | 2 +- dev/api/datasets/synth/STAGGER/index.html | 2 +- dev/api/datasets/synth/Sine/index.html | 2 +- dev/api/datasets/synth/Waveform/index.html | 2 +- dev/api/drift/ADWIN/index.html | 4 +- .../DriftRetrainingClassifier/index.html | 2 +- dev/api/drift/DummyDriftDetector/index.html | 4 +- dev/api/drift/KSWIN/index.html | 4 +- dev/api/drift/PageHinkley/index.html | 4 +- dev/api/drift/binary/DDM/index.html | 2 +- dev/api/drift/binary/EDDM/index.html | 2 +- dev/api/drift/binary/HDDM-A/index.html | 2 +- dev/api/drift/binary/HDDM-W/index.html | 2 +- .../datasets/AirlinePassengers/index.html | 2 +- dev/api/drift/datasets/Apple/index.html | 2 +- dev/api/drift/datasets/Bitcoin/index.html | 2 +- .../drift/datasets/BrentSpotPrice/index.html | 2 +- dev/api/drift/datasets/Occupancy/index.html | 2 +- dev/api/drift/datasets/RunLog/index.html | 2 +- .../drift/datasets/UKCoalEmploy/index.html | 2 +- dev/api/dummy/NoChangeClassifier/index.html | 2 +- dev/api/dummy/PriorClassifier/index.html | 2 +- dev/api/dummy/StatisticRegressor/index.html | 2 +- .../ADWINBaggingClassifier/index.html | 2 +- .../ADWINBoostingClassifier/index.html | 2 +- .../ensemble/AdaBoostClassifier/index.html | 2 +- dev/api/ensemble/BOLEClassifier/index.html | 2 +- dev/api/ensemble/BaggingClassifier/index.html | 2 +- dev/api/ensemble/BaggingRegressor/index.html | 2 +- dev/api/ensemble/EWARegressor/index.html | 2 +- .../LeveragingBaggingClassifier/index.html | 2 +- dev/api/ensemble/SRPClassifier/index.html | 2 +- dev/api/ensemble/SRPRegressor/index.html | 2 +- .../ensemble/StackingClassifier/index.html | 2 +- dev/api/ensemble/VotingClassifier/index.html | 2 +- .../BinaryClassificationTrack/index.html | 2 +- .../MultiClassClassificationTrack/index.html | 2 +- dev/api/evaluate/RegressionTrack/index.html | 2 +- dev/api/evaluate/Track/index.html | 2 +- .../iter-progressive-val-score/index.html | 2 +- .../evaluate/progressive-val-score/index.html | 2 +- dev/api/facto/FFMClassifier/index.html | 2 +- dev/api/facto/FFMRegressor/index.html | 2 +- dev/api/facto/FMClassifier/index.html | 2 +- dev/api/facto/FMRegressor/index.html | 2 +- dev/api/facto/FwFMClassifier/index.html | 2 +- dev/api/facto/FwFMRegressor/index.html | 2 +- dev/api/facto/HOFMClassifier/index.html | 2 +- dev/api/facto/HOFMRegressor/index.html | 2 +- dev/api/feature-extraction/Agg/index.html | 2 +- .../feature-extraction/BagOfWords/index.html | 2 +- .../PolynomialExtender/index.html | 2 +- .../feature-extraction/RBFSampler/index.html | 2 +- dev/api/feature-extraction/TFIDF/index.html | 2 +- .../feature-extraction/TargetAgg/index.html | 2 +- .../PoissonInclusion/index.html | 2 +- .../feature-selection/SelectKBest/index.html | 2 +- .../VarianceThreshold/index.html | 2 +- dev/api/forest/AMFClassifier/index.html | 2 +- dev/api/forest/AMFRegressor/index.html | 2 +- dev/api/forest/ARFClassifier/index.html | 43 +- dev/api/forest/ARFRegressor/index.html | 28 +- dev/api/forest/OXTRegressor/index.html | 2 +- .../imblearn/ChebyshevOverSampler/index.html | 2 +- .../imblearn/ChebyshevUnderSampler/index.html | 2 +- .../HardSamplingClassifier/index.html | 2 +- .../imblearn/HardSamplingRegressor/index.html | 2 +- dev/api/imblearn/RandomOverSampler/index.html | 2 +- dev/api/imblearn/RandomSampler/index.html | 2 +- .../imblearn/RandomUnderSampler/index.html | 2 +- .../linear-model/ALMAClassifier/index.html | 2 +- .../BayesianLinearRegression/index.html | 2 +- .../linear-model/LinearRegression/index.html | 2 +- .../LogisticRegression/index.html | 2 +- dev/api/linear-model/PAClassifier/index.html | 2 +- dev/api/linear-model/PARegressor/index.html | 2 +- dev/api/linear-model/Perceptron/index.html | 2 +- .../linear-model/SoftmaxRegression/index.html | 2 +- dev/api/linear-model/base/GLM/index.html | 2 +- dev/api/metrics/Accuracy/index.html | 2 +- dev/api/metrics/AdjustedMutualInfo/index.html | 2 +- dev/api/metrics/AdjustedRand/index.html | 2 +- dev/api/metrics/BalancedAccuracy/index.html | 2 +- .../metrics/ClassificationReport/index.html | 2 +- dev/api/metrics/CohenKappa/index.html | 2 +- dev/api/metrics/Completeness/index.html | 2 +- dev/api/metrics/ConfusionMatrix/index.html | 2 +- dev/api/metrics/CrossEntropy/index.html | 2 +- dev/api/metrics/F1/index.html | 2 +- dev/api/metrics/FBeta/index.html | 2 +- dev/api/metrics/FowlkesMallows/index.html | 2 +- dev/api/metrics/GeometricMean/index.html | 2 +- dev/api/metrics/Homogeneity/index.html | 2 +- dev/api/metrics/Jaccard/index.html | 2 +- dev/api/metrics/LogLoss/index.html | 2 +- dev/api/metrics/MAE/index.html | 2 +- dev/api/metrics/MAPE/index.html | 2 +- dev/api/metrics/MCC/index.html | 2 +- dev/api/metrics/MSE/index.html | 2 +- dev/api/metrics/MacroF1/index.html | 2 +- dev/api/metrics/MacroFBeta/index.html | 2 +- dev/api/metrics/MacroJaccard/index.html | 2 +- dev/api/metrics/MacroPrecision/index.html | 2 +- dev/api/metrics/MacroRecall/index.html | 2 +- dev/api/metrics/MicroF1/index.html | 2 +- dev/api/metrics/MicroFBeta/index.html | 2 +- dev/api/metrics/MicroJaccard/index.html | 2 +- dev/api/metrics/MicroPrecision/index.html | 2 +- dev/api/metrics/MicroRecall/index.html | 2 +- dev/api/metrics/MultiFBeta/index.html | 2 +- dev/api/metrics/MutualInfo/index.html | 2 +- .../metrics/NormalizedMutualInfo/index.html | 2 +- dev/api/metrics/Precision/index.html | 2 +- dev/api/metrics/R2/index.html | 2 +- dev/api/metrics/RMSE/index.html | 2 +- dev/api/metrics/RMSLE/index.html | 2 +- dev/api/metrics/ROCAUC/index.html | 2 +- dev/api/metrics/Rand/index.html | 2 +- dev/api/metrics/Recall/index.html | 2 +- dev/api/metrics/RollingROCAUC/index.html | 2 +- dev/api/metrics/SMAPE/index.html | 2 +- dev/api/metrics/Silhouette/index.html | 2 +- dev/api/metrics/VBeta/index.html | 2 +- dev/api/metrics/WeightedF1/index.html | 2 +- dev/api/metrics/WeightedFBeta/index.html | 2 +- dev/api/metrics/WeightedJaccard/index.html | 2 +- dev/api/metrics/WeightedPrecision/index.html | 2 +- dev/api/metrics/WeightedRecall/index.html | 2 +- dev/api/metrics/base/BinaryMetric/index.html | 2 +- .../base/ClassificationMetric/index.html | 2 +- dev/api/metrics/base/Metric/index.html | 2 +- dev/api/metrics/base/Metrics/index.html | 2 +- .../metrics/base/MultiClassMetric/index.html | 2 +- .../metrics/base/RegressionMetric/index.html | 2 +- dev/api/metrics/base/WrapperMetric/index.html | 2 +- .../metrics/multioutput/ExactMatch/index.html | 2 +- .../multioutput/MacroAverage/index.html | 2 +- .../multioutput/MicroAverage/index.html | 2 +- .../MultiLabelConfusionMatrix/index.html | 2 +- .../metrics/multioutput/PerOutput/index.html | 2 +- .../multioutput/SampleAverage/index.html | 2 +- .../index.html | 2 +- .../MultiOutputRegressionMetric/index.html | 2 +- dev/api/misc/SDFT/index.html | 2 +- dev/api/misc/Skyline/index.html | 2 +- .../BanditClassifier/index.html | 2 +- .../BanditRegressor/index.html | 2 +- .../GreedyRegressor/index.html | 2 +- .../SuccessiveHalvingClassifier/index.html | 2 +- .../SuccessiveHalvingRegressor/index.html | 2 +- .../base/ModelSelectionClassifier/index.html | 2 +- .../base/ModelSelectionRegressor/index.html | 2 +- .../multiclass/OneVsOneClassifier/index.html | 2 +- .../multiclass/OneVsRestClassifier/index.html | 2 +- .../OutputCodeClassifier/index.html | 2 +- .../multioutput/ClassifierChain/index.html | 2 +- .../MonteCarloClassifierChain/index.html | 2 +- .../multioutput/MultiClassEncoder/index.html | 2 +- .../ProbabilisticClassifierChain/index.html | 2 +- dev/api/multioutput/RegressorChain/index.html | 2 +- dev/api/naive-bayes/BernoulliNB/index.html | 2 +- dev/api/naive-bayes/ComplementNB/index.html | 2 +- dev/api/naive-bayes/GaussianNB/index.html | 2 +- dev/api/naive-bayes/MultinomialNB/index.html | 2 +- dev/api/neighbors/KNNClassifier/index.html | 2 +- dev/api/neighbors/KNNRegressor/index.html | 2 +- dev/api/neighbors/LazySearch/index.html | 2 +- dev/api/neighbors/SWINN/index.html | 2 +- dev/api/neural-net/MLPRegressor/index.html | 2 +- .../activations/Identity/index.html | 2 +- .../neural-net/activations/ReLU/index.html | 2 +- .../neural-net/activations/Sigmoid/index.html | 2 +- dev/api/optim/AMSGrad/index.html | 2 +- dev/api/optim/AdaBound/index.html | 2 +- dev/api/optim/AdaDelta/index.html | 2 +- dev/api/optim/AdaGrad/index.html | 2 +- dev/api/optim/AdaMax/index.html | 2 +- dev/api/optim/Adam/index.html | 2 +- dev/api/optim/Averager/index.html | 2 +- dev/api/optim/FTRLProximal/index.html | 2 +- dev/api/optim/Momentum/index.html | 2 +- dev/api/optim/Nadam/index.html | 2 +- dev/api/optim/NesterovMomentum/index.html | 2 +- dev/api/optim/RMSProp/index.html | 2 +- dev/api/optim/SGD/index.html | 2 +- dev/api/optim/base/Initializer/index.html | 2 +- dev/api/optim/base/Loss/index.html | 2 +- dev/api/optim/base/Optimizer/index.html | 2 +- dev/api/optim/base/Scheduler/index.html | 2 +- .../optim/initializers/Constant/index.html | 2 +- dev/api/optim/initializers/Normal/index.html | 2 +- dev/api/optim/initializers/Zeros/index.html | 2 +- dev/api/optim/losses/Absolute/index.html | 2 +- .../optim/losses/BinaryFocalLoss/index.html | 2 +- dev/api/optim/losses/BinaryLoss/index.html | 2 +- dev/api/optim/losses/Cauchy/index.html | 2 +- dev/api/optim/losses/CrossEntropy/index.html | 2 +- .../losses/EpsilonInsensitiveHinge/index.html | 2 +- dev/api/optim/losses/Hinge/index.html | 2 +- dev/api/optim/losses/Huber/index.html | 2 +- dev/api/optim/losses/Log/index.html | 2 +- .../optim/losses/MultiClassLoss/index.html | 2 +- dev/api/optim/losses/Poisson/index.html | 2 +- dev/api/optim/losses/Quantile/index.html | 2 +- .../optim/losses/RegressionLoss/index.html | 2 +- dev/api/optim/losses/Squared/index.html | 2 +- dev/api/optim/schedulers/Constant/index.html | 2 +- .../schedulers/InverseScaling/index.html | 2 +- dev/api/optim/schedulers/Optimal/index.html | 2 +- dev/api/overview/index.html | 2 +- .../AdaptiveStandardScaler/index.html | 2 +- dev/api/preprocessing/Binarizer/index.html | 2 +- .../preprocessing/FeatureHasher/index.html | 2 +- .../GaussianRandomProjector/index.html | 2 +- dev/api/preprocessing/LDA/index.html | 2 +- dev/api/preprocessing/MaxAbsScaler/index.html | 2 +- dev/api/preprocessing/MinMaxScaler/index.html | 2 +- dev/api/preprocessing/Normalizer/index.html | 2 +- .../preprocessing/OneHotEncoder/index.html | 2 +- .../preprocessing/OrdinalEncoder/index.html | 2 +- dev/api/preprocessing/PredClipper/index.html | 2 +- .../preprocessing/PreviousImputer/index.html | 2 +- dev/api/preprocessing/RobustScaler/index.html | 2 +- .../SparseRandomProjector/index.html | 2 +- .../preprocessing/StandardScaler/index.html | 2 +- dev/api/preprocessing/StatImputer/index.html | 2 +- .../TargetMinMaxScaler/index.html | 2 +- .../TargetStandardScaler/index.html | 2 +- dev/api/proba/Beta/index.html | 2 +- dev/api/proba/Gaussian/index.html | 2 +- dev/api/proba/Multinomial/index.html | 2 +- dev/api/proba/MultivariateGaussian/index.html | 2 +- .../proba/base/BinaryDistribution/index.html | 2 +- .../base/ContinuousDistribution/index.html | 2 +- .../base/DiscreteDistribution/index.html | 2 +- dev/api/proba/base/Distribution/index.html | 2 +- dev/api/reco/Baseline/index.html | 2 +- dev/api/reco/BiasedMF/index.html | 2 +- dev/api/reco/FunkMF/index.html | 2 +- dev/api/reco/RandomNormal/index.html | 2 +- dev/api/reco/base/Ranker/index.html | 2 +- dev/api/rules/AMRules/index.html | 2 +- dev/api/sketch/Counter/index.html | 2 +- dev/api/sketch/HeavyHitters/index.html | 2 +- dev/api/sketch/Histogram/index.html | 2 +- dev/api/sketch/Set/index.html | 2 +- dev/api/stats/AbsMax/index.html | 2 +- dev/api/stats/AutoCorr/index.html | 2 +- dev/api/stats/BayesianMean/index.html | 2 +- dev/api/stats/Count/index.html | 2 +- dev/api/stats/Cov/index.html | 2 +- dev/api/stats/EWMean/index.html | 2 +- dev/api/stats/EWVar/index.html | 2 +- dev/api/stats/Entropy/index.html | 2 +- dev/api/stats/IQR/index.html | 2 +- dev/api/stats/Kurtosis/index.html | 2 +- dev/api/stats/Link/index.html | 2 +- dev/api/stats/MAD/index.html | 2 +- dev/api/stats/Max/index.html | 2 +- dev/api/stats/Mean/index.html | 2 +- dev/api/stats/Min/index.html | 2 +- dev/api/stats/Mode/index.html | 2 +- dev/api/stats/NUnique/index.html | 2 +- dev/api/stats/PeakToPeak/index.html | 2 +- dev/api/stats/PearsonCorr/index.html | 2 +- dev/api/stats/Quantile/index.html | 2 +- dev/api/stats/RollingAbsMax/index.html | 2 +- dev/api/stats/RollingIQR/index.html | 2 +- dev/api/stats/RollingMax/index.html | 2 +- dev/api/stats/RollingMin/index.html | 2 +- dev/api/stats/RollingMode/index.html | 2 +- dev/api/stats/RollingPeakToPeak/index.html | 2 +- dev/api/stats/RollingQuantile/index.html | 2 +- dev/api/stats/SEM/index.html | 2 +- dev/api/stats/Shift/index.html | 2 +- dev/api/stats/Skew/index.html | 2 +- dev/api/stats/Sum/index.html | 2 +- dev/api/stats/Var/index.html | 2 +- dev/api/stats/base/Bivariate/index.html | 2 +- dev/api/stats/base/Univariate/index.html | 2 +- dev/api/stream/Cache/index.html | 2 +- dev/api/stream/TwitchChatStream/index.html | 2 +- dev/api/stream/TwitterLiveStream/index.html | 2 +- dev/api/stream/iter-arff/index.html | 2 +- dev/api/stream/iter-array/index.html | 2 +- dev/api/stream/iter-csv/index.html | 2 +- dev/api/stream/iter-libsvm/index.html | 2 +- dev/api/stream/iter-pandas/index.html | 2 +- .../stream/iter-sklearn-dataset/index.html | 2 +- dev/api/stream/iter-sql/index.html | 2 +- dev/api/stream/shuffle/index.html | 2 +- dev/api/stream/simulate-qa/index.html | 2 +- .../time-series/ForecastingMetric/index.html | 2 +- dev/api/time-series/HoltWinters/index.html | 2 +- .../time-series/HorizonAggMetric/index.html | 2 +- dev/api/time-series/HorizonMetric/index.html | 2 +- dev/api/time-series/SNARIMAX/index.html | 2 +- .../time-series/base/Forecaster/index.html | 2 +- dev/api/time-series/evaluate/index.html | 2 +- dev/api/time-series/iter-evaluate/index.html | 2 +- .../index.html | 2 +- .../index.html | 2 +- .../HoeffdingAdaptiveTreeRegressor/index.html | 2 +- .../tree/HoeffdingTreeClassifier/index.html | 2 +- .../tree/HoeffdingTreeRegressor/index.html | 2 +- dev/api/tree/SGTClassifier/index.html | 2 +- dev/api/tree/SGTRegressor/index.html | 2 +- dev/api/tree/base/Branch/index.html | 2 +- dev/api/tree/base/Leaf/index.html | 2 +- dev/api/tree/iSOUPTreeRegressor/index.html | 2 +- .../tree/splitter/DynamicQuantizer/index.html | 2 +- dev/api/tree/splitter/EBSTSplitter/index.html | 2 +- .../splitter/ExhaustiveSplitter/index.html | 2 +- .../tree/splitter/GaussianSplitter/index.html | 2 +- .../splitter/HistogramSplitter/index.html | 2 +- dev/api/tree/splitter/QOSplitter/index.html | 2 +- dev/api/tree/splitter/Quantizer/index.html | 2 +- dev/api/tree/splitter/Splitter/index.html | 2 +- .../tree/splitter/StaticQuantizer/index.html | 2 +- .../tree/splitter/TEBSTSplitter/index.html | 2 +- dev/api/utils/Rolling/index.html | 2 +- dev/api/utils/SortedWindow/index.html | 2 +- dev/api/utils/TimeRolling/index.html | 2 +- dev/api/utils/VectorDict/index.html | 2 +- dev/api/utils/dict2numpy/index.html | 2 +- dev/api/utils/expand-param-grid/index.html | 2 +- dev/api/utils/log-method-calls/index.html | 2 +- dev/api/utils/math/argmax/index.html | 2 +- dev/api/utils/math/chain-dot/index.html | 2 +- dev/api/utils/math/clamp/index.html | 2 +- dev/api/utils/math/dot/index.html | 2 +- dev/api/utils/math/dotvecmat/index.html | 2 +- dev/api/utils/math/log-sum-2-exp/index.html | 2 +- dev/api/utils/math/matmul2d/index.html | 2 +- .../utils/math/minkowski-distance/index.html | 2 +- dev/api/utils/math/norm/index.html | 2 +- dev/api/utils/math/outer/index.html | 2 +- dev/api/utils/math/prod/index.html | 2 +- .../utils/math/sherman-morrison/index.html | 2 +- dev/api/utils/math/sigmoid/index.html | 2 +- dev/api/utils/math/sign/index.html | 2 +- dev/api/utils/math/softmax/index.html | 2 +- dev/api/utils/math/woodbury-matrix/index.html | 2 +- .../norm/normalize-values-in-dict/index.html | 2 +- .../norm/scale-values-in-dict/index.html | 2 +- dev/api/utils/numpy2dict/index.html | 2 +- .../utils/pretty/humanize-bytes/index.html | 2 +- dev/api/utils/pretty/print-table/index.html | 2 +- dev/api/utils/random/exponential/index.html | 2 +- dev/api/utils/random/poisson/index.html | 2 +- .../Binary classification/index.html | 6 +- .../Multiclass classification/index.html | 6 +- dev/benchmarks/Regression/index.html | 6 +- dev/examples/batch-to-online/index.html | 2 +- .../bike-sharing-forecasting/index.html | 2 +- .../index.html | 2 +- .../content-personalization/index.html | 2 +- dev/examples/debugging-a-pipeline/index.html | 2 +- dev/examples/imbalanced-learning/index.html | 2 +- .../part-1/index.html | 2 +- .../part-2/index.html | 2 +- .../part-3/index.html | 2 +- .../index.html | 2 +- .../sentence-classification/index.html | 2 +- .../the-art-of-using-pipelines/index.html | 2 +- dev/faq/index.html | 2 +- dev/index.html | 2 +- dev/introduction/basic-concepts/index.html | 2 +- .../binary-classification/index.html | 2 +- .../concept-drift-detection/index.html | 2 +- .../multiclass-classification/index.html | 2 +- .../getting-started/regression/index.html | 2 +- dev/introduction/installation/index.html | 2 +- dev/introduction/next-steps/index.html | 2 +- dev/introduction/related-projects/index.html | 2 +- dev/introduction/why-use-river/index.html | 2 +- .../__pycache__/__main__.cpython-311.pyc | Bin 29635 -> 29635 bytes dev/recipes/active-learning/index.html | 2 +- dev/recipes/bandits-101/index.html | 2 +- dev/recipes/cloning-and-mutating/index.html | 2 +- dev/recipes/feature-extraction/index.html | 2 +- dev/recipes/hyperparameter-tuning/index.html | 2 +- dev/recipes/mini-batching/index.html | 2 +- dev/recipes/model-evaluation/index.html | 2 +- dev/recipes/on-hoeffding-trees/index.html | 2 +- dev/recipes/pipelines/index.html | 2 +- dev/recipes/reading-data/index.html | 2 +- dev/recipes/rolling-computations/index.html | 2 +- dev/releases/0.0.2/index.html | 2 +- dev/releases/0.0.3/index.html | 2 +- dev/releases/0.1.0/index.html | 2 +- dev/releases/0.10.0/index.html | 2 +- dev/releases/0.10.1/index.html | 2 +- dev/releases/0.11.0/index.html | 2 +- dev/releases/0.11.1/index.html | 2 +- dev/releases/0.12.0/index.html | 2 +- dev/releases/0.12.1/index.html | 2 +- dev/releases/0.13.0/index.html | 2 +- dev/releases/0.14.0/index.html | 2 +- dev/releases/0.15.0/index.html | 2 +- dev/releases/0.16.0/index.html | 2 +- dev/releases/0.17.0/index.html | 2 +- dev/releases/0.18.0/index.html | 2 +- dev/releases/0.19.0/index.html | 2 +- dev/releases/0.2.0/index.html | 2 +- dev/releases/0.3.0/index.html | 2 +- dev/releases/0.4.1/index.html | 2 +- dev/releases/0.4.3/index.html | 2 +- dev/releases/0.4.4/index.html | 2 +- dev/releases/0.5.0/index.html | 2 +- dev/releases/0.5.1/index.html | 2 +- dev/releases/0.6.0/index.html | 2 +- dev/releases/0.6.1/index.html | 2 +- dev/releases/0.7.0/index.html | 2 +- dev/releases/0.7.1/index.html | 2 +- dev/releases/0.8.0/index.html | 2 +- dev/releases/0.9.0/index.html | 2 +- dev/releases/unreleased/index.html | 20 +- dev/search/search_index.json | 2 +- dev/sitemap.xml | 1072 ++++++++--------- dev/sitemap.xml.gz | Bin 4343 -> 4343 bytes 541 files changed, 1169 insertions(+), 1088 deletions(-) diff --git a/dev/404.html b/dev/404.html index bc14e93483..ed3dfdd67c 100644 --- a/dev/404.html +++ b/dev/404.html @@ -16,7 +16,7 @@ - + diff --git a/dev/api/active/EntropySampler/index.html b/dev/api/active/EntropySampler/index.html index d9d9ad9bc7..4ca6e9e26c 100644 --- a/dev/api/active/EntropySampler/index.html +++ b/dev/api/active/EntropySampler/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/active/base/ActiveLearningClassifier/index.html b/dev/api/active/base/ActiveLearningClassifier/index.html index f4970fbd43..2e249796b9 100644 --- a/dev/api/active/base/ActiveLearningClassifier/index.html +++ b/dev/api/active/base/ActiveLearningClassifier/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/anomaly/GaussianScorer/index.html b/dev/api/anomaly/GaussianScorer/index.html index bd78ebcb9e..dbe5a766b2 100644 --- a/dev/api/anomaly/GaussianScorer/index.html +++ b/dev/api/anomaly/GaussianScorer/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/anomaly/HalfSpaceTrees/index.html b/dev/api/anomaly/HalfSpaceTrees/index.html index e6f021bd9e..9eeea4f867 100644 --- a/dev/api/anomaly/HalfSpaceTrees/index.html +++ b/dev/api/anomaly/HalfSpaceTrees/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/anomaly/LocalOutlierFactor/index.html b/dev/api/anomaly/LocalOutlierFactor/index.html index d57f2c9c9e..ab08c7792a 100644 --- a/dev/api/anomaly/LocalOutlierFactor/index.html +++ b/dev/api/anomaly/LocalOutlierFactor/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/anomaly/OneClassSVM/index.html b/dev/api/anomaly/OneClassSVM/index.html index 05c13a6c67..79111e119b 100644 --- a/dev/api/anomaly/OneClassSVM/index.html +++ b/dev/api/anomaly/OneClassSVM/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/anomaly/QuantileFilter/index.html b/dev/api/anomaly/QuantileFilter/index.html index c2496ffbb5..313deea61e 100644 --- a/dev/api/anomaly/QuantileFilter/index.html +++ b/dev/api/anomaly/QuantileFilter/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/anomaly/ThresholdFilter/index.html b/dev/api/anomaly/ThresholdFilter/index.html index c3aba593df..b15b582830 100644 --- a/dev/api/anomaly/ThresholdFilter/index.html +++ b/dev/api/anomaly/ThresholdFilter/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/anomaly/base/AnomalyDetector/index.html b/dev/api/anomaly/base/AnomalyDetector/index.html index 27e01d2689..f0f0f0ab92 100644 --- a/dev/api/anomaly/base/AnomalyDetector/index.html +++ b/dev/api/anomaly/base/AnomalyDetector/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/anomaly/base/AnomalyFilter/index.html b/dev/api/anomaly/base/AnomalyFilter/index.html index fb1f332b77..b58591dd6e 100644 --- a/dev/api/anomaly/base/AnomalyFilter/index.html +++ b/dev/api/anomaly/base/AnomalyFilter/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/anomaly/base/SupervisedAnomalyDetector/index.html b/dev/api/anomaly/base/SupervisedAnomalyDetector/index.html index 65407b8cfb..06a6e677d7 100644 --- a/dev/api/anomaly/base/SupervisedAnomalyDetector/index.html +++ b/dev/api/anomaly/base/SupervisedAnomalyDetector/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/bandit/BayesUCB/index.html b/dev/api/bandit/BayesUCB/index.html index 6c111f492f..d25a10d58f 100644 --- a/dev/api/bandit/BayesUCB/index.html +++ b/dev/api/bandit/BayesUCB/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/bandit/EpsilonGreedy/index.html b/dev/api/bandit/EpsilonGreedy/index.html index 0c683d9f3a..52f921c527 100644 --- a/dev/api/bandit/EpsilonGreedy/index.html +++ b/dev/api/bandit/EpsilonGreedy/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/bandit/Exp3/index.html b/dev/api/bandit/Exp3/index.html index 9f1b84e17c..7ab15f340c 100644 --- a/dev/api/bandit/Exp3/index.html +++ b/dev/api/bandit/Exp3/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/bandit/LinUCBDisjoint/index.html b/dev/api/bandit/LinUCBDisjoint/index.html index f1d6b066bb..7f0a1f4214 100644 --- a/dev/api/bandit/LinUCBDisjoint/index.html +++ b/dev/api/bandit/LinUCBDisjoint/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/bandit/RandomPolicy/index.html b/dev/api/bandit/RandomPolicy/index.html index 4cefe5f9b8..babcca9a83 100644 --- a/dev/api/bandit/RandomPolicy/index.html +++ b/dev/api/bandit/RandomPolicy/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/bandit/ThompsonSampling/index.html b/dev/api/bandit/ThompsonSampling/index.html index f472424503..a53dd04dde 100644 --- a/dev/api/bandit/ThompsonSampling/index.html +++ b/dev/api/bandit/ThompsonSampling/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/bandit/UCB/index.html b/dev/api/bandit/UCB/index.html index 5219a369a4..76634a65fc 100644 --- a/dev/api/bandit/UCB/index.html +++ b/dev/api/bandit/UCB/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/bandit/base/ContextualPolicy/index.html b/dev/api/bandit/base/ContextualPolicy/index.html index faaa1b451e..8e8cd93355 100644 --- a/dev/api/bandit/base/ContextualPolicy/index.html +++ b/dev/api/bandit/base/ContextualPolicy/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/bandit/base/Policy/index.html b/dev/api/bandit/base/Policy/index.html index ff3148e45c..739823170f 100644 --- a/dev/api/bandit/base/Policy/index.html +++ b/dev/api/bandit/base/Policy/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/bandit/datasets/BanditDataset/index.html b/dev/api/bandit/datasets/BanditDataset/index.html index 8d6ef5f327..5988ddd2c6 100644 --- a/dev/api/bandit/datasets/BanditDataset/index.html +++ b/dev/api/bandit/datasets/BanditDataset/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/bandit/datasets/NewsArticles/index.html b/dev/api/bandit/datasets/NewsArticles/index.html index 48e3ddef82..f896f84f1e 100644 --- a/dev/api/bandit/datasets/NewsArticles/index.html +++ b/dev/api/bandit/datasets/NewsArticles/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/bandit/envs/CandyCaneContest/index.html b/dev/api/bandit/envs/CandyCaneContest/index.html index 0278a5e6f5..b365315fe1 100644 --- a/dev/api/bandit/envs/CandyCaneContest/index.html +++ b/dev/api/bandit/envs/CandyCaneContest/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/bandit/envs/KArmedTestbed/index.html b/dev/api/bandit/envs/KArmedTestbed/index.html index d3be2e069c..6d0d927671 100644 --- a/dev/api/bandit/envs/KArmedTestbed/index.html +++ b/dev/api/bandit/envs/KArmedTestbed/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/bandit/evaluate-offline/index.html b/dev/api/bandit/evaluate-offline/index.html index 5618e6d835..920620daf3 100644 --- a/dev/api/bandit/evaluate-offline/index.html +++ b/dev/api/bandit/evaluate-offline/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/bandit/evaluate/index.html b/dev/api/bandit/evaluate/index.html index 9ceb00913f..359556386f 100644 --- a/dev/api/bandit/evaluate/index.html +++ b/dev/api/bandit/evaluate/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/base/Base/index.html b/dev/api/base/Base/index.html index 9e9d7d338c..cb997d4d1b 100644 --- a/dev/api/base/Base/index.html +++ b/dev/api/base/Base/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/base/BinaryDriftAndWarningDetector/index.html b/dev/api/base/BinaryDriftAndWarningDetector/index.html index 10c3add344..a0315882c0 100644 --- a/dev/api/base/BinaryDriftAndWarningDetector/index.html +++ b/dev/api/base/BinaryDriftAndWarningDetector/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/base/BinaryDriftDetector/index.html b/dev/api/base/BinaryDriftDetector/index.html index ce70f57074..ae0499d8d9 100644 --- a/dev/api/base/BinaryDriftDetector/index.html +++ b/dev/api/base/BinaryDriftDetector/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/base/Classifier/index.html b/dev/api/base/Classifier/index.html index 1692db5661..15b5d8f637 100644 --- a/dev/api/base/Classifier/index.html +++ b/dev/api/base/Classifier/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/base/Clusterer/index.html b/dev/api/base/Clusterer/index.html index b17873f236..ab9e560d19 100644 --- a/dev/api/base/Clusterer/index.html +++ b/dev/api/base/Clusterer/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/base/DriftAndWarningDetector/index.html b/dev/api/base/DriftAndWarningDetector/index.html index a5707ca988..d84edbe0f9 100644 --- a/dev/api/base/DriftAndWarningDetector/index.html +++ b/dev/api/base/DriftAndWarningDetector/index.html @@ -22,7 +22,7 @@ - + @@ -3661,7 +3661,7 @@

Methods - + @@ -3657,7 +3657,7 @@

Methods - + diff --git a/dev/api/base/Estimator/index.html b/dev/api/base/Estimator/index.html index f08d476251..e04e061eba 100644 --- a/dev/api/base/Estimator/index.html +++ b/dev/api/base/Estimator/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/base/MiniBatchClassifier/index.html b/dev/api/base/MiniBatchClassifier/index.html index 7cab4a7372..d3c3cb37e8 100644 --- a/dev/api/base/MiniBatchClassifier/index.html +++ b/dev/api/base/MiniBatchClassifier/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/base/MiniBatchRegressor/index.html b/dev/api/base/MiniBatchRegressor/index.html index 7a775f47dc..0f50d18117 100644 --- a/dev/api/base/MiniBatchRegressor/index.html +++ b/dev/api/base/MiniBatchRegressor/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/base/MiniBatchSupervisedTransformer/index.html b/dev/api/base/MiniBatchSupervisedTransformer/index.html index 0d2d07d7ee..29f482836f 100644 --- a/dev/api/base/MiniBatchSupervisedTransformer/index.html +++ b/dev/api/base/MiniBatchSupervisedTransformer/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/base/MiniBatchTransformer/index.html b/dev/api/base/MiniBatchTransformer/index.html index 2d6aa8a4ab..11b85e17bc 100644 --- a/dev/api/base/MiniBatchTransformer/index.html +++ b/dev/api/base/MiniBatchTransformer/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/base/MultiLabelClassifier/index.html b/dev/api/base/MultiLabelClassifier/index.html index 1dc7f1e33b..9d7ce1ae94 100644 --- a/dev/api/base/MultiLabelClassifier/index.html +++ b/dev/api/base/MultiLabelClassifier/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/base/MultiTargetRegressor/index.html b/dev/api/base/MultiTargetRegressor/index.html index 1ff1523a2b..7f5fc238d1 100644 --- a/dev/api/base/MultiTargetRegressor/index.html +++ b/dev/api/base/MultiTargetRegressor/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/base/Regressor/index.html b/dev/api/base/Regressor/index.html index 20b8f7eb0a..30020fd15f 100644 --- a/dev/api/base/Regressor/index.html +++ b/dev/api/base/Regressor/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/base/SupervisedTransformer/index.html b/dev/api/base/SupervisedTransformer/index.html index 8df42ae81d..c69a3c0da9 100644 --- a/dev/api/base/SupervisedTransformer/index.html +++ b/dev/api/base/SupervisedTransformer/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/base/Transformer/index.html b/dev/api/base/Transformer/index.html index d7b2016f64..a5f7b3ac8e 100644 --- a/dev/api/base/Transformer/index.html +++ b/dev/api/base/Transformer/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/base/Wrapper/index.html b/dev/api/base/Wrapper/index.html index 0a9b4555ad..8380dc8cd8 100644 --- a/dev/api/base/Wrapper/index.html +++ b/dev/api/base/Wrapper/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/base/WrapperEnsemble/index.html b/dev/api/base/WrapperEnsemble/index.html index f1b4f0f5cf..5f7773b5c0 100644 --- a/dev/api/base/WrapperEnsemble/index.html +++ b/dev/api/base/WrapperEnsemble/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/cluster/CluStream/index.html b/dev/api/cluster/CluStream/index.html index 3da79a6eab..f9fa89fd86 100644 --- a/dev/api/cluster/CluStream/index.html +++ b/dev/api/cluster/CluStream/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/cluster/DBSTREAM/index.html b/dev/api/cluster/DBSTREAM/index.html index 640a785910..b43f2afbe8 100644 --- a/dev/api/cluster/DBSTREAM/index.html +++ b/dev/api/cluster/DBSTREAM/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/cluster/DenStream/index.html b/dev/api/cluster/DenStream/index.html index 3c3150ad3e..8cc867bdf8 100644 --- a/dev/api/cluster/DenStream/index.html +++ b/dev/api/cluster/DenStream/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/cluster/KMeans/index.html b/dev/api/cluster/KMeans/index.html index e3ae394158..8df2b0bb0e 100644 --- a/dev/api/cluster/KMeans/index.html +++ b/dev/api/cluster/KMeans/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/cluster/STREAMKMeans/index.html b/dev/api/cluster/STREAMKMeans/index.html index d7e3e97b4e..77e7dc3300 100644 --- a/dev/api/cluster/STREAMKMeans/index.html +++ b/dev/api/cluster/STREAMKMeans/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/cluster/TextClust/index.html b/dev/api/cluster/TextClust/index.html index f6b67f5d93..5eb93cbbb8 100644 --- a/dev/api/cluster/TextClust/index.html +++ b/dev/api/cluster/TextClust/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/compat/River2SKLClassifier/index.html b/dev/api/compat/River2SKLClassifier/index.html index 92c1889ab5..16f2b99760 100644 --- a/dev/api/compat/River2SKLClassifier/index.html +++ b/dev/api/compat/River2SKLClassifier/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/compat/River2SKLClusterer/index.html b/dev/api/compat/River2SKLClusterer/index.html index f9375d48ea..ca69f4c14b 100644 --- a/dev/api/compat/River2SKLClusterer/index.html +++ b/dev/api/compat/River2SKLClusterer/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/compat/River2SKLRegressor/index.html b/dev/api/compat/River2SKLRegressor/index.html index 788918a0df..1511ee5b7a 100644 --- a/dev/api/compat/River2SKLRegressor/index.html +++ b/dev/api/compat/River2SKLRegressor/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/compat/River2SKLTransformer/index.html b/dev/api/compat/River2SKLTransformer/index.html index 75842085a8..a79c1a56ab 100644 --- a/dev/api/compat/River2SKLTransformer/index.html +++ b/dev/api/compat/River2SKLTransformer/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/compat/SKL2RiverClassifier/index.html b/dev/api/compat/SKL2RiverClassifier/index.html index 18abbb829e..aa3fee54a0 100644 --- a/dev/api/compat/SKL2RiverClassifier/index.html +++ b/dev/api/compat/SKL2RiverClassifier/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/compat/SKL2RiverRegressor/index.html b/dev/api/compat/SKL2RiverRegressor/index.html index 830a5a3626..05490b295e 100644 --- a/dev/api/compat/SKL2RiverRegressor/index.html +++ b/dev/api/compat/SKL2RiverRegressor/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/compat/convert-river-to-sklearn/index.html b/dev/api/compat/convert-river-to-sklearn/index.html index bcad1fe2d9..b2ca0c0ac9 100644 --- a/dev/api/compat/convert-river-to-sklearn/index.html +++ b/dev/api/compat/convert-river-to-sklearn/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/compat/convert-sklearn-to-river/index.html b/dev/api/compat/convert-sklearn-to-river/index.html index 55527a8fd4..83b0cb41b4 100644 --- a/dev/api/compat/convert-sklearn-to-river/index.html +++ b/dev/api/compat/convert-sklearn-to-river/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/compose/Discard/index.html b/dev/api/compose/Discard/index.html index b441e2797d..5b1bc6edc5 100644 --- a/dev/api/compose/Discard/index.html +++ b/dev/api/compose/Discard/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/compose/FuncTransformer/index.html b/dev/api/compose/FuncTransformer/index.html index b1e4c2eed5..9b79fff2af 100644 --- a/dev/api/compose/FuncTransformer/index.html +++ b/dev/api/compose/FuncTransformer/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/compose/Grouper/index.html b/dev/api/compose/Grouper/index.html index 21a8aaaf47..8c2ff0a7f1 100644 --- a/dev/api/compose/Grouper/index.html +++ b/dev/api/compose/Grouper/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/compose/Pipeline/index.html b/dev/api/compose/Pipeline/index.html index 8d414f971f..4002b8b60e 100644 --- a/dev/api/compose/Pipeline/index.html +++ b/dev/api/compose/Pipeline/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/compose/Prefixer/index.html b/dev/api/compose/Prefixer/index.html index 953dee4898..b1b3ce2a73 100644 --- a/dev/api/compose/Prefixer/index.html +++ b/dev/api/compose/Prefixer/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/compose/Renamer/index.html b/dev/api/compose/Renamer/index.html index 9bff681b2e..3d23939675 100644 --- a/dev/api/compose/Renamer/index.html +++ b/dev/api/compose/Renamer/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/compose/Select/index.html b/dev/api/compose/Select/index.html index ec02682c30..0c18e6b281 100644 --- a/dev/api/compose/Select/index.html +++ b/dev/api/compose/Select/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/compose/SelectType/index.html b/dev/api/compose/SelectType/index.html index 3e5dc02cfd..3301938f9c 100644 --- a/dev/api/compose/SelectType/index.html +++ b/dev/api/compose/SelectType/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/compose/Suffixer/index.html b/dev/api/compose/Suffixer/index.html index 98eb9e332a..a50f787a3b 100644 --- a/dev/api/compose/Suffixer/index.html +++ b/dev/api/compose/Suffixer/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/compose/TargetTransformRegressor/index.html b/dev/api/compose/TargetTransformRegressor/index.html index a918d86bf5..0406c3da81 100644 --- a/dev/api/compose/TargetTransformRegressor/index.html +++ b/dev/api/compose/TargetTransformRegressor/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/compose/TransformerProduct/index.html b/dev/api/compose/TransformerProduct/index.html index 27a5acf756..6a022b865d 100644 --- a/dev/api/compose/TransformerProduct/index.html +++ b/dev/api/compose/TransformerProduct/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/compose/TransformerUnion/index.html b/dev/api/compose/TransformerUnion/index.html index 3e2d71c353..6377ae0530 100644 --- a/dev/api/compose/TransformerUnion/index.html +++ b/dev/api/compose/TransformerUnion/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/compose/learn-during-predict/index.html b/dev/api/compose/learn-during-predict/index.html index 55df019034..db445e56bb 100644 --- a/dev/api/compose/learn-during-predict/index.html +++ b/dev/api/compose/learn-during-predict/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/conf/Interval/index.html b/dev/api/conf/Interval/index.html index 256dd815e5..bfc1c04ce7 100644 --- a/dev/api/conf/Interval/index.html +++ b/dev/api/conf/Interval/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/conf/RegressionJackknife/index.html b/dev/api/conf/RegressionJackknife/index.html index 706f4016a2..678d15120b 100644 --- a/dev/api/conf/RegressionJackknife/index.html +++ b/dev/api/conf/RegressionJackknife/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/covariance/EmpiricalCovariance/index.html b/dev/api/covariance/EmpiricalCovariance/index.html index cc6e0f0483..a7440abf7b 100644 --- a/dev/api/covariance/EmpiricalCovariance/index.html +++ b/dev/api/covariance/EmpiricalCovariance/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/covariance/EmpiricalPrecision/index.html b/dev/api/covariance/EmpiricalPrecision/index.html index a250b8a175..f9df8c63c6 100644 --- a/dev/api/covariance/EmpiricalPrecision/index.html +++ b/dev/api/covariance/EmpiricalPrecision/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/datasets/AirlinePassengers/index.html b/dev/api/datasets/AirlinePassengers/index.html index fbb219dae4..f316110b89 100644 --- a/dev/api/datasets/AirlinePassengers/index.html +++ b/dev/api/datasets/AirlinePassengers/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/datasets/Bananas/index.html b/dev/api/datasets/Bananas/index.html index 302b36043c..7df94a00be 100644 --- a/dev/api/datasets/Bananas/index.html +++ b/dev/api/datasets/Bananas/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/datasets/Bikes/index.html b/dev/api/datasets/Bikes/index.html index 6435a10c4f..2130b0fe82 100644 --- a/dev/api/datasets/Bikes/index.html +++ b/dev/api/datasets/Bikes/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/datasets/ChickWeights/index.html b/dev/api/datasets/ChickWeights/index.html index 2b9ff96340..6bc1660d59 100644 --- a/dev/api/datasets/ChickWeights/index.html +++ b/dev/api/datasets/ChickWeights/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/datasets/CreditCard/index.html b/dev/api/datasets/CreditCard/index.html index 1200c8740a..d86dfb8132 100644 --- a/dev/api/datasets/CreditCard/index.html +++ b/dev/api/datasets/CreditCard/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/datasets/Elec2/index.html b/dev/api/datasets/Elec2/index.html index 3f5afee801..da6f7c29cb 100644 --- a/dev/api/datasets/Elec2/index.html +++ b/dev/api/datasets/Elec2/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/datasets/HTTP/index.html b/dev/api/datasets/HTTP/index.html index eb7bc86d48..9317197529 100644 --- a/dev/api/datasets/HTTP/index.html +++ b/dev/api/datasets/HTTP/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/datasets/Higgs/index.html b/dev/api/datasets/Higgs/index.html index 93420fcea3..007cf8efa4 100644 --- a/dev/api/datasets/Higgs/index.html +++ b/dev/api/datasets/Higgs/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/datasets/ImageSegments/index.html b/dev/api/datasets/ImageSegments/index.html index c1d4a63e8e..6f961123e0 100644 --- a/dev/api/datasets/ImageSegments/index.html +++ b/dev/api/datasets/ImageSegments/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/datasets/Insects/index.html b/dev/api/datasets/Insects/index.html index 060d5f1f1e..2a7ff5be25 100644 --- a/dev/api/datasets/Insects/index.html +++ b/dev/api/datasets/Insects/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/datasets/Keystroke/index.html b/dev/api/datasets/Keystroke/index.html index 6bea901840..74bcba1f29 100644 --- a/dev/api/datasets/Keystroke/index.html +++ b/dev/api/datasets/Keystroke/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/datasets/MaliciousURL/index.html b/dev/api/datasets/MaliciousURL/index.html index b34cd2059e..6341a04c62 100644 --- a/dev/api/datasets/MaliciousURL/index.html +++ b/dev/api/datasets/MaliciousURL/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/datasets/MovieLens100K/index.html b/dev/api/datasets/MovieLens100K/index.html index 8ba4866c73..a2af8e1506 100644 --- a/dev/api/datasets/MovieLens100K/index.html +++ b/dev/api/datasets/MovieLens100K/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/datasets/Music/index.html b/dev/api/datasets/Music/index.html index 3fd868a9e1..f786c518a8 100644 --- a/dev/api/datasets/Music/index.html +++ b/dev/api/datasets/Music/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/datasets/Phishing/index.html b/dev/api/datasets/Phishing/index.html index 35df919cd6..93f4383ab7 100644 --- a/dev/api/datasets/Phishing/index.html +++ b/dev/api/datasets/Phishing/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/datasets/Restaurants/index.html b/dev/api/datasets/Restaurants/index.html index 813950be43..c3c06a5cdd 100644 --- a/dev/api/datasets/Restaurants/index.html +++ b/dev/api/datasets/Restaurants/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/datasets/SMSSpam/index.html b/dev/api/datasets/SMSSpam/index.html index 10f340efcc..157fc89e9f 100644 --- a/dev/api/datasets/SMSSpam/index.html +++ b/dev/api/datasets/SMSSpam/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/datasets/SMTP/index.html b/dev/api/datasets/SMTP/index.html index e18c3911c3..6cd1615a59 100644 --- a/dev/api/datasets/SMTP/index.html +++ b/dev/api/datasets/SMTP/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/datasets/SolarFlare/index.html b/dev/api/datasets/SolarFlare/index.html index 59a63b9499..68883e831b 100644 --- a/dev/api/datasets/SolarFlare/index.html +++ b/dev/api/datasets/SolarFlare/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/datasets/TREC07/index.html b/dev/api/datasets/TREC07/index.html index 55018855a1..8f054eaace 100644 --- a/dev/api/datasets/TREC07/index.html +++ b/dev/api/datasets/TREC07/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/datasets/Taxis/index.html b/dev/api/datasets/Taxis/index.html index 9b4450488f..9b282439de 100644 --- a/dev/api/datasets/Taxis/index.html +++ b/dev/api/datasets/Taxis/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/datasets/TrumpApproval/index.html b/dev/api/datasets/TrumpApproval/index.html index 9b6a91dc62..19e3009ac4 100644 --- a/dev/api/datasets/TrumpApproval/index.html +++ b/dev/api/datasets/TrumpApproval/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/datasets/WaterFlow/index.html b/dev/api/datasets/WaterFlow/index.html index 6a86543432..a733f8e04a 100644 --- a/dev/api/datasets/WaterFlow/index.html +++ b/dev/api/datasets/WaterFlow/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/datasets/base/Dataset/index.html b/dev/api/datasets/base/Dataset/index.html index 957053aab7..fda67e0cea 100644 --- a/dev/api/datasets/base/Dataset/index.html +++ b/dev/api/datasets/base/Dataset/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/datasets/base/FileDataset/index.html b/dev/api/datasets/base/FileDataset/index.html index dc3faafd8b..62f4d49911 100644 --- a/dev/api/datasets/base/FileDataset/index.html +++ b/dev/api/datasets/base/FileDataset/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/datasets/base/RemoteDataset/index.html b/dev/api/datasets/base/RemoteDataset/index.html index e52a25fa7d..50aeff1e50 100644 --- a/dev/api/datasets/base/RemoteDataset/index.html +++ b/dev/api/datasets/base/RemoteDataset/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/datasets/base/SyntheticDataset/index.html b/dev/api/datasets/base/SyntheticDataset/index.html index b8f905bc15..5849258096 100644 --- a/dev/api/datasets/base/SyntheticDataset/index.html +++ b/dev/api/datasets/base/SyntheticDataset/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/datasets/synth/Agrawal/index.html b/dev/api/datasets/synth/Agrawal/index.html index 88fd283a0d..2f6c74b8e9 100644 --- a/dev/api/datasets/synth/Agrawal/index.html +++ b/dev/api/datasets/synth/Agrawal/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/datasets/synth/AnomalySine/index.html b/dev/api/datasets/synth/AnomalySine/index.html index 1516567835..bda9f74ac1 100644 --- a/dev/api/datasets/synth/AnomalySine/index.html +++ b/dev/api/datasets/synth/AnomalySine/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/datasets/synth/ConceptDriftStream/index.html b/dev/api/datasets/synth/ConceptDriftStream/index.html index 1e65f88761..c5a457aae4 100644 --- a/dev/api/datasets/synth/ConceptDriftStream/index.html +++ b/dev/api/datasets/synth/ConceptDriftStream/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/datasets/synth/Friedman/index.html b/dev/api/datasets/synth/Friedman/index.html index 9bec4350d6..f32a5ae0b4 100644 --- a/dev/api/datasets/synth/Friedman/index.html +++ b/dev/api/datasets/synth/Friedman/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/datasets/synth/FriedmanDrift/index.html b/dev/api/datasets/synth/FriedmanDrift/index.html index ce3b50779c..7530a48190 100644 --- a/dev/api/datasets/synth/FriedmanDrift/index.html +++ b/dev/api/datasets/synth/FriedmanDrift/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/datasets/synth/Hyperplane/index.html b/dev/api/datasets/synth/Hyperplane/index.html index 9d231a6d8a..cf70147035 100644 --- a/dev/api/datasets/synth/Hyperplane/index.html +++ b/dev/api/datasets/synth/Hyperplane/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/datasets/synth/LED/index.html b/dev/api/datasets/synth/LED/index.html index c32cb88b2e..ed4e3becc2 100644 --- a/dev/api/datasets/synth/LED/index.html +++ b/dev/api/datasets/synth/LED/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/datasets/synth/LEDDrift/index.html b/dev/api/datasets/synth/LEDDrift/index.html index 03b0f2ca9d..29f61b5527 100644 --- a/dev/api/datasets/synth/LEDDrift/index.html +++ b/dev/api/datasets/synth/LEDDrift/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/datasets/synth/Logical/index.html b/dev/api/datasets/synth/Logical/index.html index 8d270d217d..f425e4e9d4 100644 --- a/dev/api/datasets/synth/Logical/index.html +++ b/dev/api/datasets/synth/Logical/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/datasets/synth/Mixed/index.html b/dev/api/datasets/synth/Mixed/index.html index 7111a0c437..df6a82ed39 100644 --- a/dev/api/datasets/synth/Mixed/index.html +++ b/dev/api/datasets/synth/Mixed/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/datasets/synth/Mv/index.html b/dev/api/datasets/synth/Mv/index.html index 3d54f9db0b..a3b81fbd2a 100644 --- a/dev/api/datasets/synth/Mv/index.html +++ b/dev/api/datasets/synth/Mv/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/datasets/synth/Planes2D/index.html b/dev/api/datasets/synth/Planes2D/index.html index e14a460a7b..8b45e0f3e7 100644 --- a/dev/api/datasets/synth/Planes2D/index.html +++ b/dev/api/datasets/synth/Planes2D/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/datasets/synth/RandomRBF/index.html b/dev/api/datasets/synth/RandomRBF/index.html index 22f222c82e..b425e7ca16 100644 --- a/dev/api/datasets/synth/RandomRBF/index.html +++ b/dev/api/datasets/synth/RandomRBF/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/datasets/synth/RandomRBFDrift/index.html b/dev/api/datasets/synth/RandomRBFDrift/index.html index 72a87d61d4..f563d619f3 100644 --- a/dev/api/datasets/synth/RandomRBFDrift/index.html +++ b/dev/api/datasets/synth/RandomRBFDrift/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/datasets/synth/RandomTree/index.html b/dev/api/datasets/synth/RandomTree/index.html index ef28b91da7..904d8684bc 100644 --- a/dev/api/datasets/synth/RandomTree/index.html +++ b/dev/api/datasets/synth/RandomTree/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/datasets/synth/SEA/index.html b/dev/api/datasets/synth/SEA/index.html index 9792db5e65..038bfc55fe 100644 --- a/dev/api/datasets/synth/SEA/index.html +++ b/dev/api/datasets/synth/SEA/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/datasets/synth/STAGGER/index.html b/dev/api/datasets/synth/STAGGER/index.html index 7d20ec7176..e65e6d7f7f 100644 --- a/dev/api/datasets/synth/STAGGER/index.html +++ b/dev/api/datasets/synth/STAGGER/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/datasets/synth/Sine/index.html b/dev/api/datasets/synth/Sine/index.html index 86ea8b30ca..39bbf7a200 100644 --- a/dev/api/datasets/synth/Sine/index.html +++ b/dev/api/datasets/synth/Sine/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/datasets/synth/Waveform/index.html b/dev/api/datasets/synth/Waveform/index.html index 2ef1588ae0..10acd2737b 100644 --- a/dev/api/datasets/synth/Waveform/index.html +++ b/dev/api/datasets/synth/Waveform/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/drift/ADWIN/index.html b/dev/api/drift/ADWIN/index.html index 02b8bd4e78..7effe1bb50 100644 --- a/dev/api/drift/ADWIN/index.html +++ b/dev/api/drift/ADWIN/index.html @@ -22,7 +22,7 @@ - + @@ -3546,7 +3546,7 @@

Methods - + diff --git a/dev/api/drift/DummyDriftDetector/index.html b/dev/api/drift/DummyDriftDetector/index.html index 41eddcce4c..d97dea46f0 100644 --- a/dev/api/drift/DummyDriftDetector/index.html +++ b/dev/api/drift/DummyDriftDetector/index.html @@ -22,7 +22,7 @@ - + @@ -3593,7 +3593,7 @@

Methods - + @@ -3547,7 +3547,7 @@

Methods - + @@ -3534,7 +3534,7 @@

Methods - + diff --git a/dev/api/drift/binary/EDDM/index.html b/dev/api/drift/binary/EDDM/index.html index f7e4f637d4..c63f4f1037 100644 --- a/dev/api/drift/binary/EDDM/index.html +++ b/dev/api/drift/binary/EDDM/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/drift/binary/HDDM-A/index.html b/dev/api/drift/binary/HDDM-A/index.html index b181d04fb2..baa2b5fd8b 100644 --- a/dev/api/drift/binary/HDDM-A/index.html +++ b/dev/api/drift/binary/HDDM-A/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/drift/binary/HDDM-W/index.html b/dev/api/drift/binary/HDDM-W/index.html index 4505323770..5538070655 100644 --- a/dev/api/drift/binary/HDDM-W/index.html +++ b/dev/api/drift/binary/HDDM-W/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/drift/datasets/AirlinePassengers/index.html b/dev/api/drift/datasets/AirlinePassengers/index.html index a58583f9ac..efbb019e73 100644 --- a/dev/api/drift/datasets/AirlinePassengers/index.html +++ b/dev/api/drift/datasets/AirlinePassengers/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/drift/datasets/Apple/index.html b/dev/api/drift/datasets/Apple/index.html index 94380cffa5..2230f263fb 100644 --- a/dev/api/drift/datasets/Apple/index.html +++ b/dev/api/drift/datasets/Apple/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/drift/datasets/Bitcoin/index.html b/dev/api/drift/datasets/Bitcoin/index.html index 70357cdd3c..7db5800184 100644 --- a/dev/api/drift/datasets/Bitcoin/index.html +++ b/dev/api/drift/datasets/Bitcoin/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/drift/datasets/BrentSpotPrice/index.html b/dev/api/drift/datasets/BrentSpotPrice/index.html index c24a48231c..7f3098e008 100644 --- a/dev/api/drift/datasets/BrentSpotPrice/index.html +++ b/dev/api/drift/datasets/BrentSpotPrice/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/drift/datasets/Occupancy/index.html b/dev/api/drift/datasets/Occupancy/index.html index 3cd21bf2c6..69490d10fc 100644 --- a/dev/api/drift/datasets/Occupancy/index.html +++ b/dev/api/drift/datasets/Occupancy/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/drift/datasets/RunLog/index.html b/dev/api/drift/datasets/RunLog/index.html index f27ae76860..8e21cd813f 100644 --- a/dev/api/drift/datasets/RunLog/index.html +++ b/dev/api/drift/datasets/RunLog/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/drift/datasets/UKCoalEmploy/index.html b/dev/api/drift/datasets/UKCoalEmploy/index.html index a9b769b3d4..2f057dbda4 100644 --- a/dev/api/drift/datasets/UKCoalEmploy/index.html +++ b/dev/api/drift/datasets/UKCoalEmploy/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/dummy/NoChangeClassifier/index.html b/dev/api/dummy/NoChangeClassifier/index.html index 9809f03a06..d032eae4c3 100644 --- a/dev/api/dummy/NoChangeClassifier/index.html +++ b/dev/api/dummy/NoChangeClassifier/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/dummy/PriorClassifier/index.html b/dev/api/dummy/PriorClassifier/index.html index ef7533ed3f..0bd1ff039e 100644 --- a/dev/api/dummy/PriorClassifier/index.html +++ b/dev/api/dummy/PriorClassifier/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/dummy/StatisticRegressor/index.html b/dev/api/dummy/StatisticRegressor/index.html index 4a9fbbdd08..44899e0401 100644 --- a/dev/api/dummy/StatisticRegressor/index.html +++ b/dev/api/dummy/StatisticRegressor/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/ensemble/ADWINBaggingClassifier/index.html b/dev/api/ensemble/ADWINBaggingClassifier/index.html index 6862e9e9b7..a19ec5aaee 100644 --- a/dev/api/ensemble/ADWINBaggingClassifier/index.html +++ b/dev/api/ensemble/ADWINBaggingClassifier/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/ensemble/ADWINBoostingClassifier/index.html b/dev/api/ensemble/ADWINBoostingClassifier/index.html index b0a2be36d5..3803e13f9a 100644 --- a/dev/api/ensemble/ADWINBoostingClassifier/index.html +++ b/dev/api/ensemble/ADWINBoostingClassifier/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/ensemble/AdaBoostClassifier/index.html b/dev/api/ensemble/AdaBoostClassifier/index.html index 71c24277e8..84e9f306fa 100644 --- a/dev/api/ensemble/AdaBoostClassifier/index.html +++ b/dev/api/ensemble/AdaBoostClassifier/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/ensemble/BOLEClassifier/index.html b/dev/api/ensemble/BOLEClassifier/index.html index 24094d062c..912f7f186f 100644 --- a/dev/api/ensemble/BOLEClassifier/index.html +++ b/dev/api/ensemble/BOLEClassifier/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/ensemble/BaggingClassifier/index.html b/dev/api/ensemble/BaggingClassifier/index.html index 25b64a3eff..3a1ab36b3d 100644 --- a/dev/api/ensemble/BaggingClassifier/index.html +++ b/dev/api/ensemble/BaggingClassifier/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/ensemble/BaggingRegressor/index.html b/dev/api/ensemble/BaggingRegressor/index.html index ba7ff4c3bf..6207780f63 100644 --- a/dev/api/ensemble/BaggingRegressor/index.html +++ b/dev/api/ensemble/BaggingRegressor/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/ensemble/EWARegressor/index.html b/dev/api/ensemble/EWARegressor/index.html index 3365a80e91..b3173bb2d2 100644 --- a/dev/api/ensemble/EWARegressor/index.html +++ b/dev/api/ensemble/EWARegressor/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/ensemble/LeveragingBaggingClassifier/index.html b/dev/api/ensemble/LeveragingBaggingClassifier/index.html index 926d9e1680..7bd556125d 100644 --- a/dev/api/ensemble/LeveragingBaggingClassifier/index.html +++ b/dev/api/ensemble/LeveragingBaggingClassifier/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/ensemble/SRPClassifier/index.html b/dev/api/ensemble/SRPClassifier/index.html index 1b15ce7094..5849b13311 100644 --- a/dev/api/ensemble/SRPClassifier/index.html +++ b/dev/api/ensemble/SRPClassifier/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/ensemble/SRPRegressor/index.html b/dev/api/ensemble/SRPRegressor/index.html index 134eddd4c9..fa843b457d 100644 --- a/dev/api/ensemble/SRPRegressor/index.html +++ b/dev/api/ensemble/SRPRegressor/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/ensemble/StackingClassifier/index.html b/dev/api/ensemble/StackingClassifier/index.html index 941def32a5..58eb2e997e 100644 --- a/dev/api/ensemble/StackingClassifier/index.html +++ b/dev/api/ensemble/StackingClassifier/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/ensemble/VotingClassifier/index.html b/dev/api/ensemble/VotingClassifier/index.html index dabe9f6662..0b571aeff4 100644 --- a/dev/api/ensemble/VotingClassifier/index.html +++ b/dev/api/ensemble/VotingClassifier/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/evaluate/BinaryClassificationTrack/index.html b/dev/api/evaluate/BinaryClassificationTrack/index.html index 83609885ab..b9bc4f93ba 100644 --- a/dev/api/evaluate/BinaryClassificationTrack/index.html +++ b/dev/api/evaluate/BinaryClassificationTrack/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/evaluate/MultiClassClassificationTrack/index.html b/dev/api/evaluate/MultiClassClassificationTrack/index.html index 4c3fcf3d4a..4349c2fe28 100644 --- a/dev/api/evaluate/MultiClassClassificationTrack/index.html +++ b/dev/api/evaluate/MultiClassClassificationTrack/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/evaluate/RegressionTrack/index.html b/dev/api/evaluate/RegressionTrack/index.html index 4832b4281e..5757e5d52b 100644 --- a/dev/api/evaluate/RegressionTrack/index.html +++ b/dev/api/evaluate/RegressionTrack/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/evaluate/Track/index.html b/dev/api/evaluate/Track/index.html index 32e7b5aa54..b0c05868a3 100644 --- a/dev/api/evaluate/Track/index.html +++ b/dev/api/evaluate/Track/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/evaluate/iter-progressive-val-score/index.html b/dev/api/evaluate/iter-progressive-val-score/index.html index 85623e0da0..0f6c02f670 100644 --- a/dev/api/evaluate/iter-progressive-val-score/index.html +++ b/dev/api/evaluate/iter-progressive-val-score/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/evaluate/progressive-val-score/index.html b/dev/api/evaluate/progressive-val-score/index.html index 7a3e94f7ba..e10cc9d0cc 100644 --- a/dev/api/evaluate/progressive-val-score/index.html +++ b/dev/api/evaluate/progressive-val-score/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/facto/FFMClassifier/index.html b/dev/api/facto/FFMClassifier/index.html index c10e920219..36206aebee 100644 --- a/dev/api/facto/FFMClassifier/index.html +++ b/dev/api/facto/FFMClassifier/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/facto/FFMRegressor/index.html b/dev/api/facto/FFMRegressor/index.html index 2b10f2ee9e..a39dd05d2e 100644 --- a/dev/api/facto/FFMRegressor/index.html +++ b/dev/api/facto/FFMRegressor/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/facto/FMClassifier/index.html b/dev/api/facto/FMClassifier/index.html index 9c382678e0..857e4b5051 100644 --- a/dev/api/facto/FMClassifier/index.html +++ b/dev/api/facto/FMClassifier/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/facto/FMRegressor/index.html b/dev/api/facto/FMRegressor/index.html index 858df9b2af..a448b82acc 100644 --- a/dev/api/facto/FMRegressor/index.html +++ b/dev/api/facto/FMRegressor/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/facto/FwFMClassifier/index.html b/dev/api/facto/FwFMClassifier/index.html index 3c53fd12a5..9733e49814 100644 --- a/dev/api/facto/FwFMClassifier/index.html +++ b/dev/api/facto/FwFMClassifier/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/facto/FwFMRegressor/index.html b/dev/api/facto/FwFMRegressor/index.html index 230b4a97d8..ee5b6daebd 100644 --- a/dev/api/facto/FwFMRegressor/index.html +++ b/dev/api/facto/FwFMRegressor/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/facto/HOFMClassifier/index.html b/dev/api/facto/HOFMClassifier/index.html index 6949276d1e..1bb2238e5b 100644 --- a/dev/api/facto/HOFMClassifier/index.html +++ b/dev/api/facto/HOFMClassifier/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/facto/HOFMRegressor/index.html b/dev/api/facto/HOFMRegressor/index.html index 7729319067..3c4f5a0bd3 100644 --- a/dev/api/facto/HOFMRegressor/index.html +++ b/dev/api/facto/HOFMRegressor/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/feature-extraction/Agg/index.html b/dev/api/feature-extraction/Agg/index.html index 79d3288727..a5272b7b3d 100644 --- a/dev/api/feature-extraction/Agg/index.html +++ b/dev/api/feature-extraction/Agg/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/feature-extraction/BagOfWords/index.html b/dev/api/feature-extraction/BagOfWords/index.html index 3931e4d752..86d7ff09d3 100644 --- a/dev/api/feature-extraction/BagOfWords/index.html +++ b/dev/api/feature-extraction/BagOfWords/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/feature-extraction/PolynomialExtender/index.html b/dev/api/feature-extraction/PolynomialExtender/index.html index 5a4538434f..456777e93b 100644 --- a/dev/api/feature-extraction/PolynomialExtender/index.html +++ b/dev/api/feature-extraction/PolynomialExtender/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/feature-extraction/RBFSampler/index.html b/dev/api/feature-extraction/RBFSampler/index.html index deeddf9d7b..f1dabab26e 100644 --- a/dev/api/feature-extraction/RBFSampler/index.html +++ b/dev/api/feature-extraction/RBFSampler/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/feature-extraction/TFIDF/index.html b/dev/api/feature-extraction/TFIDF/index.html index 8bce597bcb..406a49a311 100644 --- a/dev/api/feature-extraction/TFIDF/index.html +++ b/dev/api/feature-extraction/TFIDF/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/feature-extraction/TargetAgg/index.html b/dev/api/feature-extraction/TargetAgg/index.html index 2d803a330b..3087385b3c 100644 --- a/dev/api/feature-extraction/TargetAgg/index.html +++ b/dev/api/feature-extraction/TargetAgg/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/feature-selection/PoissonInclusion/index.html b/dev/api/feature-selection/PoissonInclusion/index.html index afb6c4f474..3eec7e07c6 100644 --- a/dev/api/feature-selection/PoissonInclusion/index.html +++ b/dev/api/feature-selection/PoissonInclusion/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/feature-selection/SelectKBest/index.html b/dev/api/feature-selection/SelectKBest/index.html index 8017a51b1a..1d7b3d2df7 100644 --- a/dev/api/feature-selection/SelectKBest/index.html +++ b/dev/api/feature-selection/SelectKBest/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/feature-selection/VarianceThreshold/index.html b/dev/api/feature-selection/VarianceThreshold/index.html index db355ed2dc..5c7f2c61a2 100644 --- a/dev/api/feature-selection/VarianceThreshold/index.html +++ b/dev/api/feature-selection/VarianceThreshold/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/forest/AMFClassifier/index.html b/dev/api/forest/AMFClassifier/index.html index 7929c78c41..4f80d43d62 100644 --- a/dev/api/forest/AMFClassifier/index.html +++ b/dev/api/forest/AMFClassifier/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/forest/AMFRegressor/index.html b/dev/api/forest/AMFRegressor/index.html index d548015e5b..ee3177e4f7 100644 --- a/dev/api/forest/AMFRegressor/index.html +++ b/dev/api/forest/AMFRegressor/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/forest/ARFClassifier/index.html b/dev/api/forest/ARFClassifier/index.html index ac78ce9e19..68b841f2ac 100644 --- a/dev/api/forest/ARFClassifier/index.html +++ b/dev/api/forest/ARFClassifier/index.html @@ -22,7 +22,7 @@ - + @@ -3559,13 +3559,52 @@

Examplesevaluate.progressive_val_score(dataset, model, metric) -
Accuracy: 71.07%
+
Accuracy: 71.17%
+

+

The total number of warnings and drifts detected, respectively +

model.n_warnings_detected(), model.n_drifts_detected()
+
+
(2, 1)
+

+

The number of warnings detected by tree number 2 +

model.n_warnings_detected(2)
+
+
1
+

+

And the corresponding number of actual concept drift detected +

model.n_drifts_detected(2)
+
+
1
 

Methods

learn_one
+n_drifts_detected +

Get the total number of concept drifts detected, or such number on an individual tree basis (optionally).

+

If drift detection is disabled, will return None.

+

Parameters

+
    +
  • tree_id'int | None' — defaults to None
  • +
+

Returns

+

int | None: The number of concept drifts detected.

+
+

+
+n_warnings_detected +

Get the total number of concept drift warnings detected, or the number on an individual tree basis (optionally).

+

If warning detection is disabled, will return None.

+

Parameters

+
    +
  • tree_id'int | None' — defaults to None
  • +
+

Returns

+

int | None: The number of concept drift warnings detected.

+
+

+
predict_one

Predict the label of a set of features x.

Parameters

diff --git a/dev/api/forest/ARFRegressor/index.html b/dev/api/forest/ARFRegressor/index.html index f5fee45649..8727e96c88 100644 --- a/dev/api/forest/ARFRegressor/index.html +++ b/dev/api/forest/ARFRegressor/index.html @@ -22,7 +22,7 @@ - + @@ -3564,13 +3564,37 @@

Examplesevaluate.progressive_val_score(dataset, model, metric)

-
MAE: 0.800378
+
MAE: 0.788619
 

Methods

learn_one
+n_drifts_detected +

Get the total number of concept drifts detected, or such number on an individual tree basis (optionally).

+

If drift detection is disabled, will return None.

+

Parameters

+
    +
  • tree_id'int | None' — defaults to None
  • +
+

Returns

+

int | None: The number of concept drifts detected.

+
+

+
+n_warnings_detected +

Get the total number of concept drift warnings detected, or the number on an individual tree basis (optionally).

+

If warning detection is disabled, will return None.

+

Parameters

+
    +
  • tree_id'int | None' — defaults to None
  • +
+

Returns

+

int | None: The number of concept drift warnings detected.

+
+

+
predict_one

Predict the output of features x.

Parameters

diff --git a/dev/api/forest/OXTRegressor/index.html b/dev/api/forest/OXTRegressor/index.html index 9a98626cb2..2bdf9bd398 100644 --- a/dev/api/forest/OXTRegressor/index.html +++ b/dev/api/forest/OXTRegressor/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/imblearn/ChebyshevOverSampler/index.html b/dev/api/imblearn/ChebyshevOverSampler/index.html index 52b1fd3554..f599f4fd84 100644 --- a/dev/api/imblearn/ChebyshevOverSampler/index.html +++ b/dev/api/imblearn/ChebyshevOverSampler/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/imblearn/ChebyshevUnderSampler/index.html b/dev/api/imblearn/ChebyshevUnderSampler/index.html index 2c5341ad1a..3ee98a7fe4 100644 --- a/dev/api/imblearn/ChebyshevUnderSampler/index.html +++ b/dev/api/imblearn/ChebyshevUnderSampler/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/imblearn/HardSamplingClassifier/index.html b/dev/api/imblearn/HardSamplingClassifier/index.html index 120ea81be3..942573ce17 100644 --- a/dev/api/imblearn/HardSamplingClassifier/index.html +++ b/dev/api/imblearn/HardSamplingClassifier/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/imblearn/HardSamplingRegressor/index.html b/dev/api/imblearn/HardSamplingRegressor/index.html index 707a631fd4..04662f27e0 100644 --- a/dev/api/imblearn/HardSamplingRegressor/index.html +++ b/dev/api/imblearn/HardSamplingRegressor/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/imblearn/RandomOverSampler/index.html b/dev/api/imblearn/RandomOverSampler/index.html index 4096f405ba..60d45ba902 100644 --- a/dev/api/imblearn/RandomOverSampler/index.html +++ b/dev/api/imblearn/RandomOverSampler/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/imblearn/RandomSampler/index.html b/dev/api/imblearn/RandomSampler/index.html index 2c475f59d6..e276cad650 100644 --- a/dev/api/imblearn/RandomSampler/index.html +++ b/dev/api/imblearn/RandomSampler/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/imblearn/RandomUnderSampler/index.html b/dev/api/imblearn/RandomUnderSampler/index.html index 5871e699cd..4717c92aee 100644 --- a/dev/api/imblearn/RandomUnderSampler/index.html +++ b/dev/api/imblearn/RandomUnderSampler/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/linear-model/ALMAClassifier/index.html b/dev/api/linear-model/ALMAClassifier/index.html index bbb2a20292..56be941e39 100644 --- a/dev/api/linear-model/ALMAClassifier/index.html +++ b/dev/api/linear-model/ALMAClassifier/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/linear-model/BayesianLinearRegression/index.html b/dev/api/linear-model/BayesianLinearRegression/index.html index 006d879db7..5d2df21fd6 100644 --- a/dev/api/linear-model/BayesianLinearRegression/index.html +++ b/dev/api/linear-model/BayesianLinearRegression/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/linear-model/LinearRegression/index.html b/dev/api/linear-model/LinearRegression/index.html index 496b702471..5ba4a080b5 100644 --- a/dev/api/linear-model/LinearRegression/index.html +++ b/dev/api/linear-model/LinearRegression/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/linear-model/LogisticRegression/index.html b/dev/api/linear-model/LogisticRegression/index.html index 916947531f..7e573e0ea0 100644 --- a/dev/api/linear-model/LogisticRegression/index.html +++ b/dev/api/linear-model/LogisticRegression/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/linear-model/PAClassifier/index.html b/dev/api/linear-model/PAClassifier/index.html index df570f07dc..26c83ac434 100644 --- a/dev/api/linear-model/PAClassifier/index.html +++ b/dev/api/linear-model/PAClassifier/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/linear-model/PARegressor/index.html b/dev/api/linear-model/PARegressor/index.html index e497ed8dad..ce5b587a4f 100644 --- a/dev/api/linear-model/PARegressor/index.html +++ b/dev/api/linear-model/PARegressor/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/linear-model/Perceptron/index.html b/dev/api/linear-model/Perceptron/index.html index b83b91ff2b..9443b9b653 100644 --- a/dev/api/linear-model/Perceptron/index.html +++ b/dev/api/linear-model/Perceptron/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/linear-model/SoftmaxRegression/index.html b/dev/api/linear-model/SoftmaxRegression/index.html index ea6059f31a..d057e6479a 100644 --- a/dev/api/linear-model/SoftmaxRegression/index.html +++ b/dev/api/linear-model/SoftmaxRegression/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/linear-model/base/GLM/index.html b/dev/api/linear-model/base/GLM/index.html index 0247a7b4f8..aa78917296 100644 --- a/dev/api/linear-model/base/GLM/index.html +++ b/dev/api/linear-model/base/GLM/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/metrics/Accuracy/index.html b/dev/api/metrics/Accuracy/index.html index e5ccce9f92..759beb812e 100644 --- a/dev/api/metrics/Accuracy/index.html +++ b/dev/api/metrics/Accuracy/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/metrics/AdjustedMutualInfo/index.html b/dev/api/metrics/AdjustedMutualInfo/index.html index 24677989dd..dd3beb10b7 100644 --- a/dev/api/metrics/AdjustedMutualInfo/index.html +++ b/dev/api/metrics/AdjustedMutualInfo/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/metrics/AdjustedRand/index.html b/dev/api/metrics/AdjustedRand/index.html index 3be7beef97..22ad365dfc 100644 --- a/dev/api/metrics/AdjustedRand/index.html +++ b/dev/api/metrics/AdjustedRand/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/metrics/BalancedAccuracy/index.html b/dev/api/metrics/BalancedAccuracy/index.html index 76359f9635..7175017e8e 100644 --- a/dev/api/metrics/BalancedAccuracy/index.html +++ b/dev/api/metrics/BalancedAccuracy/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/metrics/ClassificationReport/index.html b/dev/api/metrics/ClassificationReport/index.html index 86442cc203..60215aae0a 100644 --- a/dev/api/metrics/ClassificationReport/index.html +++ b/dev/api/metrics/ClassificationReport/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/metrics/CohenKappa/index.html b/dev/api/metrics/CohenKappa/index.html index 2251720c75..df68686fac 100644 --- a/dev/api/metrics/CohenKappa/index.html +++ b/dev/api/metrics/CohenKappa/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/metrics/Completeness/index.html b/dev/api/metrics/Completeness/index.html index c25b629f8e..5108786f6b 100644 --- a/dev/api/metrics/Completeness/index.html +++ b/dev/api/metrics/Completeness/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/metrics/ConfusionMatrix/index.html b/dev/api/metrics/ConfusionMatrix/index.html index 3d88e9e43b..10a8b27aac 100644 --- a/dev/api/metrics/ConfusionMatrix/index.html +++ b/dev/api/metrics/ConfusionMatrix/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/metrics/CrossEntropy/index.html b/dev/api/metrics/CrossEntropy/index.html index 6eafc4165a..e138e393a2 100644 --- a/dev/api/metrics/CrossEntropy/index.html +++ b/dev/api/metrics/CrossEntropy/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/metrics/F1/index.html b/dev/api/metrics/F1/index.html index deaa338f25..67d5bc0480 100644 --- a/dev/api/metrics/F1/index.html +++ b/dev/api/metrics/F1/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/metrics/FBeta/index.html b/dev/api/metrics/FBeta/index.html index bb004a701f..eedf1f6e5b 100644 --- a/dev/api/metrics/FBeta/index.html +++ b/dev/api/metrics/FBeta/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/metrics/FowlkesMallows/index.html b/dev/api/metrics/FowlkesMallows/index.html index fb2645ad43..7619ab0b50 100644 --- a/dev/api/metrics/FowlkesMallows/index.html +++ b/dev/api/metrics/FowlkesMallows/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/metrics/GeometricMean/index.html b/dev/api/metrics/GeometricMean/index.html index dd2354c718..161c07e3fe 100644 --- a/dev/api/metrics/GeometricMean/index.html +++ b/dev/api/metrics/GeometricMean/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/metrics/Homogeneity/index.html b/dev/api/metrics/Homogeneity/index.html index f2bca122b8..49a91ad5ea 100644 --- a/dev/api/metrics/Homogeneity/index.html +++ b/dev/api/metrics/Homogeneity/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/metrics/Jaccard/index.html b/dev/api/metrics/Jaccard/index.html index 9cef76e520..e5a9dfdad4 100644 --- a/dev/api/metrics/Jaccard/index.html +++ b/dev/api/metrics/Jaccard/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/metrics/LogLoss/index.html b/dev/api/metrics/LogLoss/index.html index 29e20271ea..8294b7a395 100644 --- a/dev/api/metrics/LogLoss/index.html +++ b/dev/api/metrics/LogLoss/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/metrics/MAE/index.html b/dev/api/metrics/MAE/index.html index 9db70a1f09..7a2539bbbf 100644 --- a/dev/api/metrics/MAE/index.html +++ b/dev/api/metrics/MAE/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/metrics/MAPE/index.html b/dev/api/metrics/MAPE/index.html index 5b672e3fd0..b2928eadca 100644 --- a/dev/api/metrics/MAPE/index.html +++ b/dev/api/metrics/MAPE/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/metrics/MCC/index.html b/dev/api/metrics/MCC/index.html index a4ef9d0b95..9672572a41 100644 --- a/dev/api/metrics/MCC/index.html +++ b/dev/api/metrics/MCC/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/metrics/MSE/index.html b/dev/api/metrics/MSE/index.html index edcf1a4b8d..e796d9b2af 100644 --- a/dev/api/metrics/MSE/index.html +++ b/dev/api/metrics/MSE/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/metrics/MacroF1/index.html b/dev/api/metrics/MacroF1/index.html index a0c2df73e9..22dcbac44d 100644 --- a/dev/api/metrics/MacroF1/index.html +++ b/dev/api/metrics/MacroF1/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/metrics/MacroFBeta/index.html b/dev/api/metrics/MacroFBeta/index.html index 7596d870a3..d4230b8a14 100644 --- a/dev/api/metrics/MacroFBeta/index.html +++ b/dev/api/metrics/MacroFBeta/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/metrics/MacroJaccard/index.html b/dev/api/metrics/MacroJaccard/index.html index 97c49ba56a..402a44e953 100644 --- a/dev/api/metrics/MacroJaccard/index.html +++ b/dev/api/metrics/MacroJaccard/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/metrics/MacroPrecision/index.html b/dev/api/metrics/MacroPrecision/index.html index e56b875815..899bd44e55 100644 --- a/dev/api/metrics/MacroPrecision/index.html +++ b/dev/api/metrics/MacroPrecision/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/metrics/MacroRecall/index.html b/dev/api/metrics/MacroRecall/index.html index 7ba8804dd0..a341c239cd 100644 --- a/dev/api/metrics/MacroRecall/index.html +++ b/dev/api/metrics/MacroRecall/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/metrics/MicroF1/index.html b/dev/api/metrics/MicroF1/index.html index dd9145bcc4..fe99792923 100644 --- a/dev/api/metrics/MicroF1/index.html +++ b/dev/api/metrics/MicroF1/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/metrics/MicroFBeta/index.html b/dev/api/metrics/MicroFBeta/index.html index d2e3ec33e2..7932311452 100644 --- a/dev/api/metrics/MicroFBeta/index.html +++ b/dev/api/metrics/MicroFBeta/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/metrics/MicroJaccard/index.html b/dev/api/metrics/MicroJaccard/index.html index 5fb7a02cb4..8d4c92f234 100644 --- a/dev/api/metrics/MicroJaccard/index.html +++ b/dev/api/metrics/MicroJaccard/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/metrics/MicroPrecision/index.html b/dev/api/metrics/MicroPrecision/index.html index 80ac5c9cfe..a8f1bf19ec 100644 --- a/dev/api/metrics/MicroPrecision/index.html +++ b/dev/api/metrics/MicroPrecision/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/metrics/MicroRecall/index.html b/dev/api/metrics/MicroRecall/index.html index 8eb5161ee8..33a842437f 100644 --- a/dev/api/metrics/MicroRecall/index.html +++ b/dev/api/metrics/MicroRecall/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/metrics/MultiFBeta/index.html b/dev/api/metrics/MultiFBeta/index.html index 04285b854d..d2d11bc88d 100644 --- a/dev/api/metrics/MultiFBeta/index.html +++ b/dev/api/metrics/MultiFBeta/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/metrics/MutualInfo/index.html b/dev/api/metrics/MutualInfo/index.html index ec93580d4c..1c2863a020 100644 --- a/dev/api/metrics/MutualInfo/index.html +++ b/dev/api/metrics/MutualInfo/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/metrics/NormalizedMutualInfo/index.html b/dev/api/metrics/NormalizedMutualInfo/index.html index bca17e1bfe..8e181b0cec 100644 --- a/dev/api/metrics/NormalizedMutualInfo/index.html +++ b/dev/api/metrics/NormalizedMutualInfo/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/metrics/Precision/index.html b/dev/api/metrics/Precision/index.html index 0161feec7f..1df839f0c4 100644 --- a/dev/api/metrics/Precision/index.html +++ b/dev/api/metrics/Precision/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/metrics/R2/index.html b/dev/api/metrics/R2/index.html index 643a79740d..52f074258a 100644 --- a/dev/api/metrics/R2/index.html +++ b/dev/api/metrics/R2/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/metrics/RMSE/index.html b/dev/api/metrics/RMSE/index.html index 9886467cb2..e3b1ba34fc 100644 --- a/dev/api/metrics/RMSE/index.html +++ b/dev/api/metrics/RMSE/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/metrics/RMSLE/index.html b/dev/api/metrics/RMSLE/index.html index 6bd21425c1..de9504d4de 100644 --- a/dev/api/metrics/RMSLE/index.html +++ b/dev/api/metrics/RMSLE/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/metrics/ROCAUC/index.html b/dev/api/metrics/ROCAUC/index.html index b4d101be8a..de9275c00b 100644 --- a/dev/api/metrics/ROCAUC/index.html +++ b/dev/api/metrics/ROCAUC/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/metrics/Rand/index.html b/dev/api/metrics/Rand/index.html index f0bbbd6902..ec1978c3a9 100644 --- a/dev/api/metrics/Rand/index.html +++ b/dev/api/metrics/Rand/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/metrics/Recall/index.html b/dev/api/metrics/Recall/index.html index 0f8dd01604..d6f10c2688 100644 --- a/dev/api/metrics/Recall/index.html +++ b/dev/api/metrics/Recall/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/metrics/RollingROCAUC/index.html b/dev/api/metrics/RollingROCAUC/index.html index 7092336523..3120cb0706 100644 --- a/dev/api/metrics/RollingROCAUC/index.html +++ b/dev/api/metrics/RollingROCAUC/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/metrics/SMAPE/index.html b/dev/api/metrics/SMAPE/index.html index 03d11473ae..637670ac82 100644 --- a/dev/api/metrics/SMAPE/index.html +++ b/dev/api/metrics/SMAPE/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/metrics/Silhouette/index.html b/dev/api/metrics/Silhouette/index.html index af1c3d9924..d2e4cebe6c 100644 --- a/dev/api/metrics/Silhouette/index.html +++ b/dev/api/metrics/Silhouette/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/metrics/VBeta/index.html b/dev/api/metrics/VBeta/index.html index e241f1ecdc..893652daed 100644 --- a/dev/api/metrics/VBeta/index.html +++ b/dev/api/metrics/VBeta/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/metrics/WeightedF1/index.html b/dev/api/metrics/WeightedF1/index.html index ddafc62d31..40c0190409 100644 --- a/dev/api/metrics/WeightedF1/index.html +++ b/dev/api/metrics/WeightedF1/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/metrics/WeightedFBeta/index.html b/dev/api/metrics/WeightedFBeta/index.html index eade339651..9e5e591727 100644 --- a/dev/api/metrics/WeightedFBeta/index.html +++ b/dev/api/metrics/WeightedFBeta/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/metrics/WeightedJaccard/index.html b/dev/api/metrics/WeightedJaccard/index.html index b94ee617f9..ee716fd091 100644 --- a/dev/api/metrics/WeightedJaccard/index.html +++ b/dev/api/metrics/WeightedJaccard/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/metrics/WeightedPrecision/index.html b/dev/api/metrics/WeightedPrecision/index.html index bfc06fbf25..2928558caf 100644 --- a/dev/api/metrics/WeightedPrecision/index.html +++ b/dev/api/metrics/WeightedPrecision/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/metrics/WeightedRecall/index.html b/dev/api/metrics/WeightedRecall/index.html index 11162c2d8e..993ecc7d3b 100644 --- a/dev/api/metrics/WeightedRecall/index.html +++ b/dev/api/metrics/WeightedRecall/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/metrics/base/BinaryMetric/index.html b/dev/api/metrics/base/BinaryMetric/index.html index 2b9cbb05df..b075727192 100644 --- a/dev/api/metrics/base/BinaryMetric/index.html +++ b/dev/api/metrics/base/BinaryMetric/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/metrics/base/ClassificationMetric/index.html b/dev/api/metrics/base/ClassificationMetric/index.html index 01f751281f..5a33b353b5 100644 --- a/dev/api/metrics/base/ClassificationMetric/index.html +++ b/dev/api/metrics/base/ClassificationMetric/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/metrics/base/Metric/index.html b/dev/api/metrics/base/Metric/index.html index 81ab9b4516..3ce57552bb 100644 --- a/dev/api/metrics/base/Metric/index.html +++ b/dev/api/metrics/base/Metric/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/metrics/base/Metrics/index.html b/dev/api/metrics/base/Metrics/index.html index d096a90ae1..99a92a784f 100644 --- a/dev/api/metrics/base/Metrics/index.html +++ b/dev/api/metrics/base/Metrics/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/metrics/base/MultiClassMetric/index.html b/dev/api/metrics/base/MultiClassMetric/index.html index 040134624f..b06bc7e706 100644 --- a/dev/api/metrics/base/MultiClassMetric/index.html +++ b/dev/api/metrics/base/MultiClassMetric/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/metrics/base/RegressionMetric/index.html b/dev/api/metrics/base/RegressionMetric/index.html index 5a892ad4df..9b3ce5b8c5 100644 --- a/dev/api/metrics/base/RegressionMetric/index.html +++ b/dev/api/metrics/base/RegressionMetric/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/metrics/base/WrapperMetric/index.html b/dev/api/metrics/base/WrapperMetric/index.html index a62b988baf..4d2d43c612 100644 --- a/dev/api/metrics/base/WrapperMetric/index.html +++ b/dev/api/metrics/base/WrapperMetric/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/metrics/multioutput/ExactMatch/index.html b/dev/api/metrics/multioutput/ExactMatch/index.html index 33580d8b61..0a8985194d 100644 --- a/dev/api/metrics/multioutput/ExactMatch/index.html +++ b/dev/api/metrics/multioutput/ExactMatch/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/metrics/multioutput/MacroAverage/index.html b/dev/api/metrics/multioutput/MacroAverage/index.html index f5af8bdd61..91cbcd48bf 100644 --- a/dev/api/metrics/multioutput/MacroAverage/index.html +++ b/dev/api/metrics/multioutput/MacroAverage/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/metrics/multioutput/MicroAverage/index.html b/dev/api/metrics/multioutput/MicroAverage/index.html index 2a3482353f..8675102a0d 100644 --- a/dev/api/metrics/multioutput/MicroAverage/index.html +++ b/dev/api/metrics/multioutput/MicroAverage/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/metrics/multioutput/MultiLabelConfusionMatrix/index.html b/dev/api/metrics/multioutput/MultiLabelConfusionMatrix/index.html index 0d6722406b..013b5febf9 100644 --- a/dev/api/metrics/multioutput/MultiLabelConfusionMatrix/index.html +++ b/dev/api/metrics/multioutput/MultiLabelConfusionMatrix/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/metrics/multioutput/PerOutput/index.html b/dev/api/metrics/multioutput/PerOutput/index.html index c6a63911d6..a93a5421b8 100644 --- a/dev/api/metrics/multioutput/PerOutput/index.html +++ b/dev/api/metrics/multioutput/PerOutput/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/metrics/multioutput/SampleAverage/index.html b/dev/api/metrics/multioutput/SampleAverage/index.html index 512837c47b..b28c97a219 100644 --- a/dev/api/metrics/multioutput/SampleAverage/index.html +++ b/dev/api/metrics/multioutput/SampleAverage/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/metrics/multioutput/base/MultiOutputClassificationMetric/index.html b/dev/api/metrics/multioutput/base/MultiOutputClassificationMetric/index.html index b8c069eb6e..f3d810ec7b 100644 --- a/dev/api/metrics/multioutput/base/MultiOutputClassificationMetric/index.html +++ b/dev/api/metrics/multioutput/base/MultiOutputClassificationMetric/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/metrics/multioutput/base/MultiOutputRegressionMetric/index.html b/dev/api/metrics/multioutput/base/MultiOutputRegressionMetric/index.html index 64a51c61f6..ba10ecce01 100644 --- a/dev/api/metrics/multioutput/base/MultiOutputRegressionMetric/index.html +++ b/dev/api/metrics/multioutput/base/MultiOutputRegressionMetric/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/misc/SDFT/index.html b/dev/api/misc/SDFT/index.html index b6773db6c1..a25e6ef378 100644 --- a/dev/api/misc/SDFT/index.html +++ b/dev/api/misc/SDFT/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/misc/Skyline/index.html b/dev/api/misc/Skyline/index.html index 720d57de20..565d312622 100644 --- a/dev/api/misc/Skyline/index.html +++ b/dev/api/misc/Skyline/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/model-selection/BanditClassifier/index.html b/dev/api/model-selection/BanditClassifier/index.html index 502f485342..0710af8923 100644 --- a/dev/api/model-selection/BanditClassifier/index.html +++ b/dev/api/model-selection/BanditClassifier/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/model-selection/BanditRegressor/index.html b/dev/api/model-selection/BanditRegressor/index.html index 06ee4d5fa4..ab8e19cac1 100644 --- a/dev/api/model-selection/BanditRegressor/index.html +++ b/dev/api/model-selection/BanditRegressor/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/model-selection/GreedyRegressor/index.html b/dev/api/model-selection/GreedyRegressor/index.html index 55cf9d5dc2..9a962d0312 100644 --- a/dev/api/model-selection/GreedyRegressor/index.html +++ b/dev/api/model-selection/GreedyRegressor/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/model-selection/SuccessiveHalvingClassifier/index.html b/dev/api/model-selection/SuccessiveHalvingClassifier/index.html index a14f6ca5df..26f2c14eed 100644 --- a/dev/api/model-selection/SuccessiveHalvingClassifier/index.html +++ b/dev/api/model-selection/SuccessiveHalvingClassifier/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/model-selection/SuccessiveHalvingRegressor/index.html b/dev/api/model-selection/SuccessiveHalvingRegressor/index.html index ec848e806a..963d57ebf8 100644 --- a/dev/api/model-selection/SuccessiveHalvingRegressor/index.html +++ b/dev/api/model-selection/SuccessiveHalvingRegressor/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/model-selection/base/ModelSelectionClassifier/index.html b/dev/api/model-selection/base/ModelSelectionClassifier/index.html index ccca6b1071..5bf5139734 100644 --- a/dev/api/model-selection/base/ModelSelectionClassifier/index.html +++ b/dev/api/model-selection/base/ModelSelectionClassifier/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/model-selection/base/ModelSelectionRegressor/index.html b/dev/api/model-selection/base/ModelSelectionRegressor/index.html index 68a0ada6c1..819488ede5 100644 --- a/dev/api/model-selection/base/ModelSelectionRegressor/index.html +++ b/dev/api/model-selection/base/ModelSelectionRegressor/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/multiclass/OneVsOneClassifier/index.html b/dev/api/multiclass/OneVsOneClassifier/index.html index d2fa626808..0205f89c41 100644 --- a/dev/api/multiclass/OneVsOneClassifier/index.html +++ b/dev/api/multiclass/OneVsOneClassifier/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/multiclass/OneVsRestClassifier/index.html b/dev/api/multiclass/OneVsRestClassifier/index.html index dc7d06bd20..6edd8b6e47 100644 --- a/dev/api/multiclass/OneVsRestClassifier/index.html +++ b/dev/api/multiclass/OneVsRestClassifier/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/multiclass/OutputCodeClassifier/index.html b/dev/api/multiclass/OutputCodeClassifier/index.html index 0e43b1fd4d..188d393da1 100644 --- a/dev/api/multiclass/OutputCodeClassifier/index.html +++ b/dev/api/multiclass/OutputCodeClassifier/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/multioutput/ClassifierChain/index.html b/dev/api/multioutput/ClassifierChain/index.html index 2f6a4230c5..69fceb349e 100644 --- a/dev/api/multioutput/ClassifierChain/index.html +++ b/dev/api/multioutput/ClassifierChain/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/multioutput/MonteCarloClassifierChain/index.html b/dev/api/multioutput/MonteCarloClassifierChain/index.html index 754e5c91b9..e5a0bffb46 100644 --- a/dev/api/multioutput/MonteCarloClassifierChain/index.html +++ b/dev/api/multioutput/MonteCarloClassifierChain/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/multioutput/MultiClassEncoder/index.html b/dev/api/multioutput/MultiClassEncoder/index.html index ab076b409a..2f93485d94 100644 --- a/dev/api/multioutput/MultiClassEncoder/index.html +++ b/dev/api/multioutput/MultiClassEncoder/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/multioutput/ProbabilisticClassifierChain/index.html b/dev/api/multioutput/ProbabilisticClassifierChain/index.html index e30cc10dd7..4acec6c5ba 100644 --- a/dev/api/multioutput/ProbabilisticClassifierChain/index.html +++ b/dev/api/multioutput/ProbabilisticClassifierChain/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/multioutput/RegressorChain/index.html b/dev/api/multioutput/RegressorChain/index.html index 645ef02749..6fb28a8a89 100644 --- a/dev/api/multioutput/RegressorChain/index.html +++ b/dev/api/multioutput/RegressorChain/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/naive-bayes/BernoulliNB/index.html b/dev/api/naive-bayes/BernoulliNB/index.html index 08afa09e6a..90af79c5fb 100644 --- a/dev/api/naive-bayes/BernoulliNB/index.html +++ b/dev/api/naive-bayes/BernoulliNB/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/naive-bayes/ComplementNB/index.html b/dev/api/naive-bayes/ComplementNB/index.html index dfbc46aacb..d1f7c8c8f2 100644 --- a/dev/api/naive-bayes/ComplementNB/index.html +++ b/dev/api/naive-bayes/ComplementNB/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/naive-bayes/GaussianNB/index.html b/dev/api/naive-bayes/GaussianNB/index.html index 664e08bff8..bac8169736 100644 --- a/dev/api/naive-bayes/GaussianNB/index.html +++ b/dev/api/naive-bayes/GaussianNB/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/naive-bayes/MultinomialNB/index.html b/dev/api/naive-bayes/MultinomialNB/index.html index aa358f88cf..78f803d7aa 100644 --- a/dev/api/naive-bayes/MultinomialNB/index.html +++ b/dev/api/naive-bayes/MultinomialNB/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/neighbors/KNNClassifier/index.html b/dev/api/neighbors/KNNClassifier/index.html index b528d852a6..29b88149dc 100644 --- a/dev/api/neighbors/KNNClassifier/index.html +++ b/dev/api/neighbors/KNNClassifier/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/neighbors/KNNRegressor/index.html b/dev/api/neighbors/KNNRegressor/index.html index 7695b5b12e..626add2c9f 100644 --- a/dev/api/neighbors/KNNRegressor/index.html +++ b/dev/api/neighbors/KNNRegressor/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/neighbors/LazySearch/index.html b/dev/api/neighbors/LazySearch/index.html index 558bd04f26..de593bd9ef 100644 --- a/dev/api/neighbors/LazySearch/index.html +++ b/dev/api/neighbors/LazySearch/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/neighbors/SWINN/index.html b/dev/api/neighbors/SWINN/index.html index 1fbd75b838..3e9d30fb0d 100644 --- a/dev/api/neighbors/SWINN/index.html +++ b/dev/api/neighbors/SWINN/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/neural-net/MLPRegressor/index.html b/dev/api/neural-net/MLPRegressor/index.html index 0b6d1034b4..dae6465bf8 100644 --- a/dev/api/neural-net/MLPRegressor/index.html +++ b/dev/api/neural-net/MLPRegressor/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/neural-net/activations/Identity/index.html b/dev/api/neural-net/activations/Identity/index.html index 0a4adbb2dd..8794499426 100644 --- a/dev/api/neural-net/activations/Identity/index.html +++ b/dev/api/neural-net/activations/Identity/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/neural-net/activations/ReLU/index.html b/dev/api/neural-net/activations/ReLU/index.html index f7c9b0f67c..5593f44f0c 100644 --- a/dev/api/neural-net/activations/ReLU/index.html +++ b/dev/api/neural-net/activations/ReLU/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/neural-net/activations/Sigmoid/index.html b/dev/api/neural-net/activations/Sigmoid/index.html index abbd00b696..a21fc03f32 100644 --- a/dev/api/neural-net/activations/Sigmoid/index.html +++ b/dev/api/neural-net/activations/Sigmoid/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/optim/AMSGrad/index.html b/dev/api/optim/AMSGrad/index.html index d8274c7c94..98d9cbc519 100644 --- a/dev/api/optim/AMSGrad/index.html +++ b/dev/api/optim/AMSGrad/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/optim/AdaBound/index.html b/dev/api/optim/AdaBound/index.html index aefc459c47..321c110ca2 100644 --- a/dev/api/optim/AdaBound/index.html +++ b/dev/api/optim/AdaBound/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/optim/AdaDelta/index.html b/dev/api/optim/AdaDelta/index.html index d75a2516b4..1b36d59692 100644 --- a/dev/api/optim/AdaDelta/index.html +++ b/dev/api/optim/AdaDelta/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/optim/AdaGrad/index.html b/dev/api/optim/AdaGrad/index.html index 44dcf884ec..75a389c6a8 100644 --- a/dev/api/optim/AdaGrad/index.html +++ b/dev/api/optim/AdaGrad/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/optim/AdaMax/index.html b/dev/api/optim/AdaMax/index.html index 30dddf8e21..e5f4867e8f 100644 --- a/dev/api/optim/AdaMax/index.html +++ b/dev/api/optim/AdaMax/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/optim/Adam/index.html b/dev/api/optim/Adam/index.html index bb6229cbfa..e48e9c5c1e 100644 --- a/dev/api/optim/Adam/index.html +++ b/dev/api/optim/Adam/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/optim/Averager/index.html b/dev/api/optim/Averager/index.html index 0102b4cba9..002034fb1a 100644 --- a/dev/api/optim/Averager/index.html +++ b/dev/api/optim/Averager/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/optim/FTRLProximal/index.html b/dev/api/optim/FTRLProximal/index.html index 9bcee764cd..bb30f53fca 100644 --- a/dev/api/optim/FTRLProximal/index.html +++ b/dev/api/optim/FTRLProximal/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/optim/Momentum/index.html b/dev/api/optim/Momentum/index.html index 19e420951b..16d35346ad 100644 --- a/dev/api/optim/Momentum/index.html +++ b/dev/api/optim/Momentum/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/optim/Nadam/index.html b/dev/api/optim/Nadam/index.html index b7c718ecba..11bcbb4f9c 100644 --- a/dev/api/optim/Nadam/index.html +++ b/dev/api/optim/Nadam/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/optim/NesterovMomentum/index.html b/dev/api/optim/NesterovMomentum/index.html index 80e17f775f..49f5804bae 100644 --- a/dev/api/optim/NesterovMomentum/index.html +++ b/dev/api/optim/NesterovMomentum/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/optim/RMSProp/index.html b/dev/api/optim/RMSProp/index.html index 10956c0c29..d4bfbb7ed5 100644 --- a/dev/api/optim/RMSProp/index.html +++ b/dev/api/optim/RMSProp/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/optim/SGD/index.html b/dev/api/optim/SGD/index.html index 1ba1b5fb0f..8c212dfa23 100644 --- a/dev/api/optim/SGD/index.html +++ b/dev/api/optim/SGD/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/optim/base/Initializer/index.html b/dev/api/optim/base/Initializer/index.html index fcaa486c7f..b9df5013a7 100644 --- a/dev/api/optim/base/Initializer/index.html +++ b/dev/api/optim/base/Initializer/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/optim/base/Loss/index.html b/dev/api/optim/base/Loss/index.html index 11ecf0c104..28a5253bc6 100644 --- a/dev/api/optim/base/Loss/index.html +++ b/dev/api/optim/base/Loss/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/optim/base/Optimizer/index.html b/dev/api/optim/base/Optimizer/index.html index d0fac659ff..fe1a79b636 100644 --- a/dev/api/optim/base/Optimizer/index.html +++ b/dev/api/optim/base/Optimizer/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/optim/base/Scheduler/index.html b/dev/api/optim/base/Scheduler/index.html index af9393b85d..dc451ab57f 100644 --- a/dev/api/optim/base/Scheduler/index.html +++ b/dev/api/optim/base/Scheduler/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/optim/initializers/Constant/index.html b/dev/api/optim/initializers/Constant/index.html index 2dea67f6a6..2220b08902 100644 --- a/dev/api/optim/initializers/Constant/index.html +++ b/dev/api/optim/initializers/Constant/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/optim/initializers/Normal/index.html b/dev/api/optim/initializers/Normal/index.html index 3038a00d20..421e685116 100644 --- a/dev/api/optim/initializers/Normal/index.html +++ b/dev/api/optim/initializers/Normal/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/optim/initializers/Zeros/index.html b/dev/api/optim/initializers/Zeros/index.html index f02c6cc1cd..0a646e9b06 100644 --- a/dev/api/optim/initializers/Zeros/index.html +++ b/dev/api/optim/initializers/Zeros/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/optim/losses/Absolute/index.html b/dev/api/optim/losses/Absolute/index.html index a8f8bb2dd0..c4af23ad94 100644 --- a/dev/api/optim/losses/Absolute/index.html +++ b/dev/api/optim/losses/Absolute/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/optim/losses/BinaryFocalLoss/index.html b/dev/api/optim/losses/BinaryFocalLoss/index.html index 9b7fb59e90..c3f29284e1 100644 --- a/dev/api/optim/losses/BinaryFocalLoss/index.html +++ b/dev/api/optim/losses/BinaryFocalLoss/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/optim/losses/BinaryLoss/index.html b/dev/api/optim/losses/BinaryLoss/index.html index 7aecec8395..7213ae1662 100644 --- a/dev/api/optim/losses/BinaryLoss/index.html +++ b/dev/api/optim/losses/BinaryLoss/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/optim/losses/Cauchy/index.html b/dev/api/optim/losses/Cauchy/index.html index f4e4c4e99b..e5beb98486 100644 --- a/dev/api/optim/losses/Cauchy/index.html +++ b/dev/api/optim/losses/Cauchy/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/optim/losses/CrossEntropy/index.html b/dev/api/optim/losses/CrossEntropy/index.html index 58f569ca67..1ea147fb11 100644 --- a/dev/api/optim/losses/CrossEntropy/index.html +++ b/dev/api/optim/losses/CrossEntropy/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/optim/losses/EpsilonInsensitiveHinge/index.html b/dev/api/optim/losses/EpsilonInsensitiveHinge/index.html index 77e89ae96b..445f730e09 100644 --- a/dev/api/optim/losses/EpsilonInsensitiveHinge/index.html +++ b/dev/api/optim/losses/EpsilonInsensitiveHinge/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/optim/losses/Hinge/index.html b/dev/api/optim/losses/Hinge/index.html index de2e6308f4..6b8a1006af 100644 --- a/dev/api/optim/losses/Hinge/index.html +++ b/dev/api/optim/losses/Hinge/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/optim/losses/Huber/index.html b/dev/api/optim/losses/Huber/index.html index af280d0f16..d4296d7d6c 100644 --- a/dev/api/optim/losses/Huber/index.html +++ b/dev/api/optim/losses/Huber/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/optim/losses/Log/index.html b/dev/api/optim/losses/Log/index.html index 7ab6a86986..3e86bbc5e4 100644 --- a/dev/api/optim/losses/Log/index.html +++ b/dev/api/optim/losses/Log/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/optim/losses/MultiClassLoss/index.html b/dev/api/optim/losses/MultiClassLoss/index.html index 8c13962119..b52387c94c 100644 --- a/dev/api/optim/losses/MultiClassLoss/index.html +++ b/dev/api/optim/losses/MultiClassLoss/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/optim/losses/Poisson/index.html b/dev/api/optim/losses/Poisson/index.html index 13a09bbe5e..bb8ca754e3 100644 --- a/dev/api/optim/losses/Poisson/index.html +++ b/dev/api/optim/losses/Poisson/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/optim/losses/Quantile/index.html b/dev/api/optim/losses/Quantile/index.html index ff196ff6ba..75bcef1e18 100644 --- a/dev/api/optim/losses/Quantile/index.html +++ b/dev/api/optim/losses/Quantile/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/optim/losses/RegressionLoss/index.html b/dev/api/optim/losses/RegressionLoss/index.html index 27a6bdb459..32a489eec3 100644 --- a/dev/api/optim/losses/RegressionLoss/index.html +++ b/dev/api/optim/losses/RegressionLoss/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/optim/losses/Squared/index.html b/dev/api/optim/losses/Squared/index.html index 140e3c50db..584c1514c8 100644 --- a/dev/api/optim/losses/Squared/index.html +++ b/dev/api/optim/losses/Squared/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/optim/schedulers/Constant/index.html b/dev/api/optim/schedulers/Constant/index.html index 3d17544f07..b4413a8d93 100644 --- a/dev/api/optim/schedulers/Constant/index.html +++ b/dev/api/optim/schedulers/Constant/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/optim/schedulers/InverseScaling/index.html b/dev/api/optim/schedulers/InverseScaling/index.html index a466d7e90a..158c8e6b81 100644 --- a/dev/api/optim/schedulers/InverseScaling/index.html +++ b/dev/api/optim/schedulers/InverseScaling/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/optim/schedulers/Optimal/index.html b/dev/api/optim/schedulers/Optimal/index.html index f182650840..7b65f4b408 100644 --- a/dev/api/optim/schedulers/Optimal/index.html +++ b/dev/api/optim/schedulers/Optimal/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/overview/index.html b/dev/api/overview/index.html index 43976a79d6..3f990c1175 100644 --- a/dev/api/overview/index.html +++ b/dev/api/overview/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/preprocessing/AdaptiveStandardScaler/index.html b/dev/api/preprocessing/AdaptiveStandardScaler/index.html index dee3772a8c..9cc995f748 100644 --- a/dev/api/preprocessing/AdaptiveStandardScaler/index.html +++ b/dev/api/preprocessing/AdaptiveStandardScaler/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/preprocessing/Binarizer/index.html b/dev/api/preprocessing/Binarizer/index.html index 6d78b2b032..7442cd6664 100644 --- a/dev/api/preprocessing/Binarizer/index.html +++ b/dev/api/preprocessing/Binarizer/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/preprocessing/FeatureHasher/index.html b/dev/api/preprocessing/FeatureHasher/index.html index 212f4f3c4a..8acbb41ae1 100644 --- a/dev/api/preprocessing/FeatureHasher/index.html +++ b/dev/api/preprocessing/FeatureHasher/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/preprocessing/GaussianRandomProjector/index.html b/dev/api/preprocessing/GaussianRandomProjector/index.html index 656233232a..68ee7d1197 100644 --- a/dev/api/preprocessing/GaussianRandomProjector/index.html +++ b/dev/api/preprocessing/GaussianRandomProjector/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/preprocessing/LDA/index.html b/dev/api/preprocessing/LDA/index.html index ad119f462b..239420d80c 100644 --- a/dev/api/preprocessing/LDA/index.html +++ b/dev/api/preprocessing/LDA/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/preprocessing/MaxAbsScaler/index.html b/dev/api/preprocessing/MaxAbsScaler/index.html index 9b8bb0ac12..2b54152d67 100644 --- a/dev/api/preprocessing/MaxAbsScaler/index.html +++ b/dev/api/preprocessing/MaxAbsScaler/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/preprocessing/MinMaxScaler/index.html b/dev/api/preprocessing/MinMaxScaler/index.html index 12b952e975..03946dcec3 100644 --- a/dev/api/preprocessing/MinMaxScaler/index.html +++ b/dev/api/preprocessing/MinMaxScaler/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/preprocessing/Normalizer/index.html b/dev/api/preprocessing/Normalizer/index.html index 16aafee496..5279ec2c87 100644 --- a/dev/api/preprocessing/Normalizer/index.html +++ b/dev/api/preprocessing/Normalizer/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/preprocessing/OneHotEncoder/index.html b/dev/api/preprocessing/OneHotEncoder/index.html index 794a464ce4..6e2fd0ba9f 100644 --- a/dev/api/preprocessing/OneHotEncoder/index.html +++ b/dev/api/preprocessing/OneHotEncoder/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/preprocessing/OrdinalEncoder/index.html b/dev/api/preprocessing/OrdinalEncoder/index.html index 93d5de6720..c4378c1547 100644 --- a/dev/api/preprocessing/OrdinalEncoder/index.html +++ b/dev/api/preprocessing/OrdinalEncoder/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/preprocessing/PredClipper/index.html b/dev/api/preprocessing/PredClipper/index.html index 1469f9e142..49e5ad9d11 100644 --- a/dev/api/preprocessing/PredClipper/index.html +++ b/dev/api/preprocessing/PredClipper/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/preprocessing/PreviousImputer/index.html b/dev/api/preprocessing/PreviousImputer/index.html index e1f6837909..0d21dfc8a6 100644 --- a/dev/api/preprocessing/PreviousImputer/index.html +++ b/dev/api/preprocessing/PreviousImputer/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/preprocessing/RobustScaler/index.html b/dev/api/preprocessing/RobustScaler/index.html index 8a79eeb396..ce4eacf6da 100644 --- a/dev/api/preprocessing/RobustScaler/index.html +++ b/dev/api/preprocessing/RobustScaler/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/preprocessing/SparseRandomProjector/index.html b/dev/api/preprocessing/SparseRandomProjector/index.html index 7c66ccd5e4..96fd25d41d 100644 --- a/dev/api/preprocessing/SparseRandomProjector/index.html +++ b/dev/api/preprocessing/SparseRandomProjector/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/preprocessing/StandardScaler/index.html b/dev/api/preprocessing/StandardScaler/index.html index d109dbf3ee..a6f8df9eab 100644 --- a/dev/api/preprocessing/StandardScaler/index.html +++ b/dev/api/preprocessing/StandardScaler/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/preprocessing/StatImputer/index.html b/dev/api/preprocessing/StatImputer/index.html index 74aa4da8d5..a107082729 100644 --- a/dev/api/preprocessing/StatImputer/index.html +++ b/dev/api/preprocessing/StatImputer/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/preprocessing/TargetMinMaxScaler/index.html b/dev/api/preprocessing/TargetMinMaxScaler/index.html index 7471adc32f..d1f5436487 100644 --- a/dev/api/preprocessing/TargetMinMaxScaler/index.html +++ b/dev/api/preprocessing/TargetMinMaxScaler/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/preprocessing/TargetStandardScaler/index.html b/dev/api/preprocessing/TargetStandardScaler/index.html index 893532db19..155ad20e7c 100644 --- a/dev/api/preprocessing/TargetStandardScaler/index.html +++ b/dev/api/preprocessing/TargetStandardScaler/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/proba/Beta/index.html b/dev/api/proba/Beta/index.html index d70ad6d207..5e604345d7 100644 --- a/dev/api/proba/Beta/index.html +++ b/dev/api/proba/Beta/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/proba/Gaussian/index.html b/dev/api/proba/Gaussian/index.html index 1eea12f95e..3a4153b0b7 100644 --- a/dev/api/proba/Gaussian/index.html +++ b/dev/api/proba/Gaussian/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/proba/Multinomial/index.html b/dev/api/proba/Multinomial/index.html index 8dfe73fd5b..9e9680c0c9 100644 --- a/dev/api/proba/Multinomial/index.html +++ b/dev/api/proba/Multinomial/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/proba/MultivariateGaussian/index.html b/dev/api/proba/MultivariateGaussian/index.html index b7832c37aa..bade54eda9 100644 --- a/dev/api/proba/MultivariateGaussian/index.html +++ b/dev/api/proba/MultivariateGaussian/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/proba/base/BinaryDistribution/index.html b/dev/api/proba/base/BinaryDistribution/index.html index 74bfa0a83e..ee3a1f4b50 100644 --- a/dev/api/proba/base/BinaryDistribution/index.html +++ b/dev/api/proba/base/BinaryDistribution/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/proba/base/ContinuousDistribution/index.html b/dev/api/proba/base/ContinuousDistribution/index.html index 661c49d63e..9814a21624 100644 --- a/dev/api/proba/base/ContinuousDistribution/index.html +++ b/dev/api/proba/base/ContinuousDistribution/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/proba/base/DiscreteDistribution/index.html b/dev/api/proba/base/DiscreteDistribution/index.html index f5a0a17f8f..a553f88601 100644 --- a/dev/api/proba/base/DiscreteDistribution/index.html +++ b/dev/api/proba/base/DiscreteDistribution/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/proba/base/Distribution/index.html b/dev/api/proba/base/Distribution/index.html index b070826dda..944ed4f63b 100644 --- a/dev/api/proba/base/Distribution/index.html +++ b/dev/api/proba/base/Distribution/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/reco/Baseline/index.html b/dev/api/reco/Baseline/index.html index 9ec854ab7a..a3c94fbc6b 100644 --- a/dev/api/reco/Baseline/index.html +++ b/dev/api/reco/Baseline/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/reco/BiasedMF/index.html b/dev/api/reco/BiasedMF/index.html index a90bf5c18b..567eecb88d 100644 --- a/dev/api/reco/BiasedMF/index.html +++ b/dev/api/reco/BiasedMF/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/reco/FunkMF/index.html b/dev/api/reco/FunkMF/index.html index c526b0e54c..df4a9870af 100644 --- a/dev/api/reco/FunkMF/index.html +++ b/dev/api/reco/FunkMF/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/reco/RandomNormal/index.html b/dev/api/reco/RandomNormal/index.html index b6545f6566..71ca600bcc 100644 --- a/dev/api/reco/RandomNormal/index.html +++ b/dev/api/reco/RandomNormal/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/reco/base/Ranker/index.html b/dev/api/reco/base/Ranker/index.html index b4307b0e89..f9b40e588e 100644 --- a/dev/api/reco/base/Ranker/index.html +++ b/dev/api/reco/base/Ranker/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/rules/AMRules/index.html b/dev/api/rules/AMRules/index.html index a3af0f960c..59c32cf6dc 100644 --- a/dev/api/rules/AMRules/index.html +++ b/dev/api/rules/AMRules/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/sketch/Counter/index.html b/dev/api/sketch/Counter/index.html index 828edf8059..8faded019a 100644 --- a/dev/api/sketch/Counter/index.html +++ b/dev/api/sketch/Counter/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/sketch/HeavyHitters/index.html b/dev/api/sketch/HeavyHitters/index.html index 09d88e5a46..f34c4eed34 100644 --- a/dev/api/sketch/HeavyHitters/index.html +++ b/dev/api/sketch/HeavyHitters/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/sketch/Histogram/index.html b/dev/api/sketch/Histogram/index.html index 06e1088ab0..7e4fdd425b 100644 --- a/dev/api/sketch/Histogram/index.html +++ b/dev/api/sketch/Histogram/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/sketch/Set/index.html b/dev/api/sketch/Set/index.html index abe6023c4c..a84c706d28 100644 --- a/dev/api/sketch/Set/index.html +++ b/dev/api/sketch/Set/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/stats/AbsMax/index.html b/dev/api/stats/AbsMax/index.html index 4495dfb0cc..5190171ef4 100644 --- a/dev/api/stats/AbsMax/index.html +++ b/dev/api/stats/AbsMax/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/stats/AutoCorr/index.html b/dev/api/stats/AutoCorr/index.html index 9b025ce532..34553ab6d6 100644 --- a/dev/api/stats/AutoCorr/index.html +++ b/dev/api/stats/AutoCorr/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/stats/BayesianMean/index.html b/dev/api/stats/BayesianMean/index.html index 3def0e553c..bf795a2081 100644 --- a/dev/api/stats/BayesianMean/index.html +++ b/dev/api/stats/BayesianMean/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/stats/Count/index.html b/dev/api/stats/Count/index.html index 2ffe02d563..d3c70e7868 100644 --- a/dev/api/stats/Count/index.html +++ b/dev/api/stats/Count/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/stats/Cov/index.html b/dev/api/stats/Cov/index.html index e38f5e5897..4d81fdaa8f 100644 --- a/dev/api/stats/Cov/index.html +++ b/dev/api/stats/Cov/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/stats/EWMean/index.html b/dev/api/stats/EWMean/index.html index 97b69a0acd..81b496f8a6 100644 --- a/dev/api/stats/EWMean/index.html +++ b/dev/api/stats/EWMean/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/stats/EWVar/index.html b/dev/api/stats/EWVar/index.html index 80b09791f9..2ec21d1f2c 100644 --- a/dev/api/stats/EWVar/index.html +++ b/dev/api/stats/EWVar/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/stats/Entropy/index.html b/dev/api/stats/Entropy/index.html index b62407c05e..f148155489 100644 --- a/dev/api/stats/Entropy/index.html +++ b/dev/api/stats/Entropy/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/stats/IQR/index.html b/dev/api/stats/IQR/index.html index 4841a7abe8..42fe8d8310 100644 --- a/dev/api/stats/IQR/index.html +++ b/dev/api/stats/IQR/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/stats/Kurtosis/index.html b/dev/api/stats/Kurtosis/index.html index bbf7af5de7..aa4477e2da 100644 --- a/dev/api/stats/Kurtosis/index.html +++ b/dev/api/stats/Kurtosis/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/stats/Link/index.html b/dev/api/stats/Link/index.html index 7e3c0548e6..e95bd134a2 100644 --- a/dev/api/stats/Link/index.html +++ b/dev/api/stats/Link/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/stats/MAD/index.html b/dev/api/stats/MAD/index.html index 2c24fe237a..9ae0713d5a 100644 --- a/dev/api/stats/MAD/index.html +++ b/dev/api/stats/MAD/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/stats/Max/index.html b/dev/api/stats/Max/index.html index 4d6f50dd82..83e3578b72 100644 --- a/dev/api/stats/Max/index.html +++ b/dev/api/stats/Max/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/stats/Mean/index.html b/dev/api/stats/Mean/index.html index a7775be781..7abdc8e7d5 100644 --- a/dev/api/stats/Mean/index.html +++ b/dev/api/stats/Mean/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/stats/Min/index.html b/dev/api/stats/Min/index.html index d611df4267..088e5d971d 100644 --- a/dev/api/stats/Min/index.html +++ b/dev/api/stats/Min/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/stats/Mode/index.html b/dev/api/stats/Mode/index.html index e348998e3e..2b43aaeae5 100644 --- a/dev/api/stats/Mode/index.html +++ b/dev/api/stats/Mode/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/stats/NUnique/index.html b/dev/api/stats/NUnique/index.html index b6eb10b0ef..c5096c227f 100644 --- a/dev/api/stats/NUnique/index.html +++ b/dev/api/stats/NUnique/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/stats/PeakToPeak/index.html b/dev/api/stats/PeakToPeak/index.html index 4f6ed8a3d9..1c49cecc7e 100644 --- a/dev/api/stats/PeakToPeak/index.html +++ b/dev/api/stats/PeakToPeak/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/stats/PearsonCorr/index.html b/dev/api/stats/PearsonCorr/index.html index dfaeedf0be..86abdbe112 100644 --- a/dev/api/stats/PearsonCorr/index.html +++ b/dev/api/stats/PearsonCorr/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/stats/Quantile/index.html b/dev/api/stats/Quantile/index.html index 2b5db868c9..b066f7b2f3 100644 --- a/dev/api/stats/Quantile/index.html +++ b/dev/api/stats/Quantile/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/stats/RollingAbsMax/index.html b/dev/api/stats/RollingAbsMax/index.html index afe4e419d3..d351ceede9 100644 --- a/dev/api/stats/RollingAbsMax/index.html +++ b/dev/api/stats/RollingAbsMax/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/stats/RollingIQR/index.html b/dev/api/stats/RollingIQR/index.html index f8f2f9be17..661f9722e5 100644 --- a/dev/api/stats/RollingIQR/index.html +++ b/dev/api/stats/RollingIQR/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/stats/RollingMax/index.html b/dev/api/stats/RollingMax/index.html index 92fcf2f089..bf65bcb334 100644 --- a/dev/api/stats/RollingMax/index.html +++ b/dev/api/stats/RollingMax/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/stats/RollingMin/index.html b/dev/api/stats/RollingMin/index.html index 987274c69e..54026ae3c4 100644 --- a/dev/api/stats/RollingMin/index.html +++ b/dev/api/stats/RollingMin/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/stats/RollingMode/index.html b/dev/api/stats/RollingMode/index.html index a060e8026d..d8a6271eec 100644 --- a/dev/api/stats/RollingMode/index.html +++ b/dev/api/stats/RollingMode/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/stats/RollingPeakToPeak/index.html b/dev/api/stats/RollingPeakToPeak/index.html index ca326a62f7..c62a0b19a8 100644 --- a/dev/api/stats/RollingPeakToPeak/index.html +++ b/dev/api/stats/RollingPeakToPeak/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/stats/RollingQuantile/index.html b/dev/api/stats/RollingQuantile/index.html index fcd6b4fc52..e5ebbb3ba4 100644 --- a/dev/api/stats/RollingQuantile/index.html +++ b/dev/api/stats/RollingQuantile/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/stats/SEM/index.html b/dev/api/stats/SEM/index.html index bdfa21be0f..26f8188a05 100644 --- a/dev/api/stats/SEM/index.html +++ b/dev/api/stats/SEM/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/stats/Shift/index.html b/dev/api/stats/Shift/index.html index c56ea1054a..156bbdc19f 100644 --- a/dev/api/stats/Shift/index.html +++ b/dev/api/stats/Shift/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/stats/Skew/index.html b/dev/api/stats/Skew/index.html index f8675db307..89a64c70fe 100644 --- a/dev/api/stats/Skew/index.html +++ b/dev/api/stats/Skew/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/stats/Sum/index.html b/dev/api/stats/Sum/index.html index 91c884f81d..7f399f7a3f 100644 --- a/dev/api/stats/Sum/index.html +++ b/dev/api/stats/Sum/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/stats/Var/index.html b/dev/api/stats/Var/index.html index 54c62bb368..4843c362b8 100644 --- a/dev/api/stats/Var/index.html +++ b/dev/api/stats/Var/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/stats/base/Bivariate/index.html b/dev/api/stats/base/Bivariate/index.html index 5fc9d97260..a68a2db0fe 100644 --- a/dev/api/stats/base/Bivariate/index.html +++ b/dev/api/stats/base/Bivariate/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/stats/base/Univariate/index.html b/dev/api/stats/base/Univariate/index.html index 7e45547de9..a9eb306a6e 100644 --- a/dev/api/stats/base/Univariate/index.html +++ b/dev/api/stats/base/Univariate/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/stream/Cache/index.html b/dev/api/stream/Cache/index.html index 814a7fc671..33de65fa1a 100644 --- a/dev/api/stream/Cache/index.html +++ b/dev/api/stream/Cache/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/stream/TwitchChatStream/index.html b/dev/api/stream/TwitchChatStream/index.html index 7effcd8aab..fb5256a1cc 100644 --- a/dev/api/stream/TwitchChatStream/index.html +++ b/dev/api/stream/TwitchChatStream/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/stream/TwitterLiveStream/index.html b/dev/api/stream/TwitterLiveStream/index.html index 5fd35d45d6..8881a5adbd 100644 --- a/dev/api/stream/TwitterLiveStream/index.html +++ b/dev/api/stream/TwitterLiveStream/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/stream/iter-arff/index.html b/dev/api/stream/iter-arff/index.html index 2ef6a94edd..85c174185a 100644 --- a/dev/api/stream/iter-arff/index.html +++ b/dev/api/stream/iter-arff/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/stream/iter-array/index.html b/dev/api/stream/iter-array/index.html index 71b20fabe8..da043678b5 100644 --- a/dev/api/stream/iter-array/index.html +++ b/dev/api/stream/iter-array/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/stream/iter-csv/index.html b/dev/api/stream/iter-csv/index.html index b5f0602075..f71aff0af9 100644 --- a/dev/api/stream/iter-csv/index.html +++ b/dev/api/stream/iter-csv/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/stream/iter-libsvm/index.html b/dev/api/stream/iter-libsvm/index.html index f8c09b6f8c..a1de3d3914 100644 --- a/dev/api/stream/iter-libsvm/index.html +++ b/dev/api/stream/iter-libsvm/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/stream/iter-pandas/index.html b/dev/api/stream/iter-pandas/index.html index ab66242a4a..cddeb4a64a 100644 --- a/dev/api/stream/iter-pandas/index.html +++ b/dev/api/stream/iter-pandas/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/stream/iter-sklearn-dataset/index.html b/dev/api/stream/iter-sklearn-dataset/index.html index dcfecaef49..27aa18d6ca 100644 --- a/dev/api/stream/iter-sklearn-dataset/index.html +++ b/dev/api/stream/iter-sklearn-dataset/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/stream/iter-sql/index.html b/dev/api/stream/iter-sql/index.html index 998b7db3fe..6c08d83655 100644 --- a/dev/api/stream/iter-sql/index.html +++ b/dev/api/stream/iter-sql/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/stream/shuffle/index.html b/dev/api/stream/shuffle/index.html index dfa8f5cfe6..e0d75d3418 100644 --- a/dev/api/stream/shuffle/index.html +++ b/dev/api/stream/shuffle/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/stream/simulate-qa/index.html b/dev/api/stream/simulate-qa/index.html index d7687485b7..9ac880c17e 100644 --- a/dev/api/stream/simulate-qa/index.html +++ b/dev/api/stream/simulate-qa/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/time-series/ForecastingMetric/index.html b/dev/api/time-series/ForecastingMetric/index.html index 6863128268..c4229c2c67 100644 --- a/dev/api/time-series/ForecastingMetric/index.html +++ b/dev/api/time-series/ForecastingMetric/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/time-series/HoltWinters/index.html b/dev/api/time-series/HoltWinters/index.html index 5ed2b0693c..4b0240ab36 100644 --- a/dev/api/time-series/HoltWinters/index.html +++ b/dev/api/time-series/HoltWinters/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/time-series/HorizonAggMetric/index.html b/dev/api/time-series/HorizonAggMetric/index.html index 03ba75d911..beb66b8912 100644 --- a/dev/api/time-series/HorizonAggMetric/index.html +++ b/dev/api/time-series/HorizonAggMetric/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/time-series/HorizonMetric/index.html b/dev/api/time-series/HorizonMetric/index.html index 7199a66324..a82e01e2ea 100644 --- a/dev/api/time-series/HorizonMetric/index.html +++ b/dev/api/time-series/HorizonMetric/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/time-series/SNARIMAX/index.html b/dev/api/time-series/SNARIMAX/index.html index 3ea2d59edd..922ee396f9 100644 --- a/dev/api/time-series/SNARIMAX/index.html +++ b/dev/api/time-series/SNARIMAX/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/time-series/base/Forecaster/index.html b/dev/api/time-series/base/Forecaster/index.html index d4b536d7b4..e49b093d0d 100644 --- a/dev/api/time-series/base/Forecaster/index.html +++ b/dev/api/time-series/base/Forecaster/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/time-series/evaluate/index.html b/dev/api/time-series/evaluate/index.html index ff2a4db469..57e730df1b 100644 --- a/dev/api/time-series/evaluate/index.html +++ b/dev/api/time-series/evaluate/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/time-series/iter-evaluate/index.html b/dev/api/time-series/iter-evaluate/index.html index 1cf82bb886..b91cd99e47 100644 --- a/dev/api/time-series/iter-evaluate/index.html +++ b/dev/api/time-series/iter-evaluate/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/tree/ExtremelyFastDecisionTreeClassifier/index.html b/dev/api/tree/ExtremelyFastDecisionTreeClassifier/index.html index 94370b8fb5..167d815e61 100644 --- a/dev/api/tree/ExtremelyFastDecisionTreeClassifier/index.html +++ b/dev/api/tree/ExtremelyFastDecisionTreeClassifier/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/tree/HoeffdingAdaptiveTreeClassifier/index.html b/dev/api/tree/HoeffdingAdaptiveTreeClassifier/index.html index ae07126378..cb26bf024b 100644 --- a/dev/api/tree/HoeffdingAdaptiveTreeClassifier/index.html +++ b/dev/api/tree/HoeffdingAdaptiveTreeClassifier/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/tree/HoeffdingAdaptiveTreeRegressor/index.html b/dev/api/tree/HoeffdingAdaptiveTreeRegressor/index.html index db1635ff6c..09a07f0969 100644 --- a/dev/api/tree/HoeffdingAdaptiveTreeRegressor/index.html +++ b/dev/api/tree/HoeffdingAdaptiveTreeRegressor/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/tree/HoeffdingTreeClassifier/index.html b/dev/api/tree/HoeffdingTreeClassifier/index.html index 4fbebd6232..c50bf2a608 100644 --- a/dev/api/tree/HoeffdingTreeClassifier/index.html +++ b/dev/api/tree/HoeffdingTreeClassifier/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/tree/HoeffdingTreeRegressor/index.html b/dev/api/tree/HoeffdingTreeRegressor/index.html index fa76a2872c..25916dd412 100644 --- a/dev/api/tree/HoeffdingTreeRegressor/index.html +++ b/dev/api/tree/HoeffdingTreeRegressor/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/tree/SGTClassifier/index.html b/dev/api/tree/SGTClassifier/index.html index b4a7d1a494..2c9b2878ab 100644 --- a/dev/api/tree/SGTClassifier/index.html +++ b/dev/api/tree/SGTClassifier/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/tree/SGTRegressor/index.html b/dev/api/tree/SGTRegressor/index.html index 972cf221c4..78ae13340f 100644 --- a/dev/api/tree/SGTRegressor/index.html +++ b/dev/api/tree/SGTRegressor/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/tree/base/Branch/index.html b/dev/api/tree/base/Branch/index.html index a546803da6..099de1f66c 100644 --- a/dev/api/tree/base/Branch/index.html +++ b/dev/api/tree/base/Branch/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/tree/base/Leaf/index.html b/dev/api/tree/base/Leaf/index.html index 05df14396f..b14ed1386d 100644 --- a/dev/api/tree/base/Leaf/index.html +++ b/dev/api/tree/base/Leaf/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/tree/iSOUPTreeRegressor/index.html b/dev/api/tree/iSOUPTreeRegressor/index.html index 211eaa89fe..6f35ab2c95 100644 --- a/dev/api/tree/iSOUPTreeRegressor/index.html +++ b/dev/api/tree/iSOUPTreeRegressor/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/tree/splitter/DynamicQuantizer/index.html b/dev/api/tree/splitter/DynamicQuantizer/index.html index 484e51ac50..8ad5254896 100644 --- a/dev/api/tree/splitter/DynamicQuantizer/index.html +++ b/dev/api/tree/splitter/DynamicQuantizer/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/tree/splitter/EBSTSplitter/index.html b/dev/api/tree/splitter/EBSTSplitter/index.html index 25d463e17e..18380608fe 100644 --- a/dev/api/tree/splitter/EBSTSplitter/index.html +++ b/dev/api/tree/splitter/EBSTSplitter/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/tree/splitter/ExhaustiveSplitter/index.html b/dev/api/tree/splitter/ExhaustiveSplitter/index.html index 373d403af2..0c9cb6eab9 100644 --- a/dev/api/tree/splitter/ExhaustiveSplitter/index.html +++ b/dev/api/tree/splitter/ExhaustiveSplitter/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/tree/splitter/GaussianSplitter/index.html b/dev/api/tree/splitter/GaussianSplitter/index.html index 9a65e384a6..bdf64971e5 100644 --- a/dev/api/tree/splitter/GaussianSplitter/index.html +++ b/dev/api/tree/splitter/GaussianSplitter/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/tree/splitter/HistogramSplitter/index.html b/dev/api/tree/splitter/HistogramSplitter/index.html index 60a0380285..e05280f10f 100644 --- a/dev/api/tree/splitter/HistogramSplitter/index.html +++ b/dev/api/tree/splitter/HistogramSplitter/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/tree/splitter/QOSplitter/index.html b/dev/api/tree/splitter/QOSplitter/index.html index da93257eee..fedd2071e3 100644 --- a/dev/api/tree/splitter/QOSplitter/index.html +++ b/dev/api/tree/splitter/QOSplitter/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/tree/splitter/Quantizer/index.html b/dev/api/tree/splitter/Quantizer/index.html index c8d21e1111..04a574abba 100644 --- a/dev/api/tree/splitter/Quantizer/index.html +++ b/dev/api/tree/splitter/Quantizer/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/tree/splitter/Splitter/index.html b/dev/api/tree/splitter/Splitter/index.html index 70876bd17f..ec23fb5117 100644 --- a/dev/api/tree/splitter/Splitter/index.html +++ b/dev/api/tree/splitter/Splitter/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/tree/splitter/StaticQuantizer/index.html b/dev/api/tree/splitter/StaticQuantizer/index.html index 1984356cd4..e3f3d676c8 100644 --- a/dev/api/tree/splitter/StaticQuantizer/index.html +++ b/dev/api/tree/splitter/StaticQuantizer/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/tree/splitter/TEBSTSplitter/index.html b/dev/api/tree/splitter/TEBSTSplitter/index.html index 2926d54181..3d9380ff54 100644 --- a/dev/api/tree/splitter/TEBSTSplitter/index.html +++ b/dev/api/tree/splitter/TEBSTSplitter/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/utils/Rolling/index.html b/dev/api/utils/Rolling/index.html index 91806cb191..1890a8cf96 100644 --- a/dev/api/utils/Rolling/index.html +++ b/dev/api/utils/Rolling/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/utils/SortedWindow/index.html b/dev/api/utils/SortedWindow/index.html index 8b674e8ba9..d7fa888a12 100644 --- a/dev/api/utils/SortedWindow/index.html +++ b/dev/api/utils/SortedWindow/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/utils/TimeRolling/index.html b/dev/api/utils/TimeRolling/index.html index c360602b3c..ee8ec3186e 100644 --- a/dev/api/utils/TimeRolling/index.html +++ b/dev/api/utils/TimeRolling/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/utils/VectorDict/index.html b/dev/api/utils/VectorDict/index.html index 9e043a85ad..6eb9331a08 100644 --- a/dev/api/utils/VectorDict/index.html +++ b/dev/api/utils/VectorDict/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/utils/dict2numpy/index.html b/dev/api/utils/dict2numpy/index.html index 36c6348cbb..1101e1e2fb 100644 --- a/dev/api/utils/dict2numpy/index.html +++ b/dev/api/utils/dict2numpy/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/utils/expand-param-grid/index.html b/dev/api/utils/expand-param-grid/index.html index 5965af8a0f..7402296398 100644 --- a/dev/api/utils/expand-param-grid/index.html +++ b/dev/api/utils/expand-param-grid/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/utils/log-method-calls/index.html b/dev/api/utils/log-method-calls/index.html index 187f1df79b..528cf4790b 100644 --- a/dev/api/utils/log-method-calls/index.html +++ b/dev/api/utils/log-method-calls/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/utils/math/argmax/index.html b/dev/api/utils/math/argmax/index.html index a43db9e390..bc49857beb 100644 --- a/dev/api/utils/math/argmax/index.html +++ b/dev/api/utils/math/argmax/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/utils/math/chain-dot/index.html b/dev/api/utils/math/chain-dot/index.html index 702bf5991e..a494fb2808 100644 --- a/dev/api/utils/math/chain-dot/index.html +++ b/dev/api/utils/math/chain-dot/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/utils/math/clamp/index.html b/dev/api/utils/math/clamp/index.html index 9e0dabf1d1..d058a3b72c 100644 --- a/dev/api/utils/math/clamp/index.html +++ b/dev/api/utils/math/clamp/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/utils/math/dot/index.html b/dev/api/utils/math/dot/index.html index 4bd5374bdc..c042632d1c 100644 --- a/dev/api/utils/math/dot/index.html +++ b/dev/api/utils/math/dot/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/utils/math/dotvecmat/index.html b/dev/api/utils/math/dotvecmat/index.html index 3de12455a3..c70284dd79 100644 --- a/dev/api/utils/math/dotvecmat/index.html +++ b/dev/api/utils/math/dotvecmat/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/utils/math/log-sum-2-exp/index.html b/dev/api/utils/math/log-sum-2-exp/index.html index 9f86e0e48a..717c97b5f7 100644 --- a/dev/api/utils/math/log-sum-2-exp/index.html +++ b/dev/api/utils/math/log-sum-2-exp/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/utils/math/matmul2d/index.html b/dev/api/utils/math/matmul2d/index.html index 78d7254e31..9c5949c08a 100644 --- a/dev/api/utils/math/matmul2d/index.html +++ b/dev/api/utils/math/matmul2d/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/utils/math/minkowski-distance/index.html b/dev/api/utils/math/minkowski-distance/index.html index 68fcfa53fe..a00d018afc 100644 --- a/dev/api/utils/math/minkowski-distance/index.html +++ b/dev/api/utils/math/minkowski-distance/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/utils/math/norm/index.html b/dev/api/utils/math/norm/index.html index ee4330a803..45d1ffe4c5 100644 --- a/dev/api/utils/math/norm/index.html +++ b/dev/api/utils/math/norm/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/utils/math/outer/index.html b/dev/api/utils/math/outer/index.html index a128444aa3..7ac7ed6617 100644 --- a/dev/api/utils/math/outer/index.html +++ b/dev/api/utils/math/outer/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/utils/math/prod/index.html b/dev/api/utils/math/prod/index.html index 3d02147601..f80dcf56ec 100644 --- a/dev/api/utils/math/prod/index.html +++ b/dev/api/utils/math/prod/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/utils/math/sherman-morrison/index.html b/dev/api/utils/math/sherman-morrison/index.html index bddd30194c..560b6fc681 100644 --- a/dev/api/utils/math/sherman-morrison/index.html +++ b/dev/api/utils/math/sherman-morrison/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/utils/math/sigmoid/index.html b/dev/api/utils/math/sigmoid/index.html index 48839e502b..ddd566da21 100644 --- a/dev/api/utils/math/sigmoid/index.html +++ b/dev/api/utils/math/sigmoid/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/utils/math/sign/index.html b/dev/api/utils/math/sign/index.html index 24f9a3b39a..bea0128b8d 100644 --- a/dev/api/utils/math/sign/index.html +++ b/dev/api/utils/math/sign/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/utils/math/softmax/index.html b/dev/api/utils/math/softmax/index.html index 33c3a89604..c9c1bc9f8d 100644 --- a/dev/api/utils/math/softmax/index.html +++ b/dev/api/utils/math/softmax/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/utils/math/woodbury-matrix/index.html b/dev/api/utils/math/woodbury-matrix/index.html index 88fcad9bd0..a1b94c5934 100644 --- a/dev/api/utils/math/woodbury-matrix/index.html +++ b/dev/api/utils/math/woodbury-matrix/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/utils/norm/normalize-values-in-dict/index.html b/dev/api/utils/norm/normalize-values-in-dict/index.html index 73672c3199..d083491beb 100644 --- a/dev/api/utils/norm/normalize-values-in-dict/index.html +++ b/dev/api/utils/norm/normalize-values-in-dict/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/utils/norm/scale-values-in-dict/index.html b/dev/api/utils/norm/scale-values-in-dict/index.html index 0c606cbb85..c4e0f74fb5 100644 --- a/dev/api/utils/norm/scale-values-in-dict/index.html +++ b/dev/api/utils/norm/scale-values-in-dict/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/utils/numpy2dict/index.html b/dev/api/utils/numpy2dict/index.html index f0ccf55f12..47ce92c005 100644 --- a/dev/api/utils/numpy2dict/index.html +++ b/dev/api/utils/numpy2dict/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/utils/pretty/humanize-bytes/index.html b/dev/api/utils/pretty/humanize-bytes/index.html index 6a1f0950fd..cd9d08269c 100644 --- a/dev/api/utils/pretty/humanize-bytes/index.html +++ b/dev/api/utils/pretty/humanize-bytes/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/utils/pretty/print-table/index.html b/dev/api/utils/pretty/print-table/index.html index 8caaed0072..5dc98cb608 100644 --- a/dev/api/utils/pretty/print-table/index.html +++ b/dev/api/utils/pretty/print-table/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/utils/random/exponential/index.html b/dev/api/utils/random/exponential/index.html index 431924c502..d4ba1ab083 100644 --- a/dev/api/utils/random/exponential/index.html +++ b/dev/api/utils/random/exponential/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/api/utils/random/poisson/index.html b/dev/api/utils/random/poisson/index.html index dcfb050433..d955f27b20 100644 --- a/dev/api/utils/random/poisson/index.html +++ b/dev/api/utils/random/poisson/index.html @@ -22,7 +22,7 @@ - + diff --git a/dev/benchmarks/Binary classification/index.html b/dev/benchmarks/Binary classification/index.html index f6faab16ba..065aad184a 100644 --- a/dev/benchmarks/Binary classification/index.html +++ b/dev/benchmarks/Binary classification/index.html @@ -22,7 +22,7 @@ - + @@ -36990,14 +36990,14 @@

Environment

+ + +
  • + + forest + +
  • @@ -1571,6 +1578,13 @@ anomaly + + +
  • + + forest + +
  • @@ -1594,6 +1608,10 @@

    anomalyapi.anomaly.LocalOutlierFactor, which is an online version of the LOF algorithm for anomaly detection that matches the scikit-learn implementation. +

    forest

    +
      +
    • Simplify inner the structures of forest.ARFClassifier and forest.ARFRegressor by removing redundant class hierarchy. Simplify how concept drift logging can be accessed in individual trees and in the forest as a whole.
    • +
    diff --git a/dev/search/search_index.json b/dev/search/search_index.json index 2eb783ba46..ca01bf24a5 100644 --- a/dev/search/search_index.json +++ b/dev/search/search_index.json @@ -1 +1 @@ -{"config":{"lang":["en"],"separator":"[\\s\\-]+","pipeline":["stopWordFilter"]},"docs":[{"location":"api/overview/","title":"Overview","text":""},{"location":"api/overview/#active","title":"active","text":"

    Online active learning.

    • EntropySampler
    "},{"location":"api/overview/#base","title":"base","text":"
    • ActiveLearningClassifier
    "},{"location":"api/overview/#anomaly","title":"anomaly","text":"

    Anomaly detection.

    Estimators in the anomaly module have a bespoke API. Each anomaly detector has a score_one method instead of a predict_one method. This method returns an anomaly score. Normal observations should have a low score, whereas anomalous observations should have a high score. The range of the scores is relative to each estimator.

    Anomaly detectors are usually unsupervised, in that they analyze the distribution of the features they are shown. But River also has a notion of supervised anomaly detectors. These analyze the distribution of a target variable, and optionally include the distribution of the features as well. They are useful for detecting labelling anomalies, which can be detrimental if they learned by a model.

    • GaussianScorer
    • HalfSpaceTrees
    • LocalOutlierFactor
    • OneClassSVM
    • QuantileFilter
    • ThresholdFilter
    "},{"location":"api/overview/#base_1","title":"base","text":"
    • AnomalyDetector
    • AnomalyFilter
    • SupervisedAnomalyDetector
    "},{"location":"api/overview/#bandit","title":"bandit","text":"

    Multi-armed bandit (MAB) policies.

    The bandit policies in River have a generic API. This allows them to be used in a variety of situations. For instance, they can be used for model selection (see model_selection.BanditRegressor).

    Classes

    • BayesUCB
    • EpsilonGreedy
    • Exp3
    • LinUCBDisjoint
    • RandomPolicy
    • ThompsonSampling
    • UCB

    Functions

    • evaluate
    • evaluate_offline
    "},{"location":"api/overview/#base_2","title":"base","text":"
    • ContextualPolicy
    • Policy
    "},{"location":"api/overview/#datasets","title":"datasets","text":"
    • BanditDataset
    • NewsArticles
    "},{"location":"api/overview/#envs","title":"envs","text":"
    • CandyCaneContest
    • KArmedTestbed
    "},{"location":"api/overview/#base_3","title":"base","text":"

    Base interfaces.

    Every estimator in River is a class, and as such inherits from at least one base interface. These are used to categorize, organize, and standardize the many estimators that River contains.

    This module contains mixin classes, which are all suffixed by Mixin. Their purpose is to provide additional functionality to an estimator, and thus need to be used in conjunction with a non-mixin base class.

    This module also contains utilities for type hinting and tagging estimators.

    • Base
    • BinaryDriftAndWarningDetector
    • BinaryDriftDetector
    • Classifier
    • Clusterer
    • DriftAndWarningDetector
    • DriftDetector
    • Ensemble
    • Estimator
    • MiniBatchClassifier
    • MiniBatchRegressor
    • MiniBatchSupervisedTransformer
    • MiniBatchTransformer
    • MultiLabelClassifier
    • MultiTargetRegressor
    • Regressor
    • SupervisedTransformer
    • Transformer
    • Wrapper
    • WrapperEnsemble
    "},{"location":"api/overview/#cluster","title":"cluster","text":"

    Unsupervised clustering.

    • CluStream
    • DBSTREAM
    • DenStream
    • KMeans
    • STREAMKMeans
    • TextClust
    "},{"location":"api/overview/#compat","title":"compat","text":"

    Compatibility tools.

    This module contains adapters for making River estimators compatible with other libraries, and vice-versa whenever possible. The relevant adapters will only be usable if you have installed the necessary library. For instance, you have to install scikit-learn in order to use the compat.convert_sklearn_to_river function.

    Classes

    • River2SKLClassifier
    • River2SKLClusterer
    • River2SKLRegressor
    • River2SKLTransformer
    • SKL2RiverClassifier
    • SKL2RiverRegressor

    Functions

    • convert_river_to_sklearn
    • convert_sklearn_to_river
    "},{"location":"api/overview/#compose","title":"compose","text":"

    Model composition.

    This module contains utilities for merging multiple modeling steps into a single pipeline. Although pipelines are not the only way to process a stream of data, we highly encourage you to use them.

    Classes

    • Discard
    • FuncTransformer
    • Grouper
    • Pipeline
    • Prefixer
    • Renamer
    • Select
    • SelectType
    • Suffixer
    • TargetTransformRegressor
    • TransformerProduct
    • TransformerUnion

    Functions

    • learn_during_predict
    "},{"location":"api/overview/#conf","title":"conf","text":"

    Conformal predictions. This modules contains wrappers to enable conformal predictions on any regressor or classifier.

    • Interval
    • RegressionJackknife
    "},{"location":"api/overview/#covariance","title":"covariance","text":"

    Online estimation of covariance and precision matrices.

    • EmpiricalCovariance
    • EmpiricalPrecision
    "},{"location":"api/overview/#datasets_1","title":"datasets","text":"

    Datasets.

    This module contains a collection of datasets for multiple tasks: classification, regression, etc. The data corresponds to popular datasets and are conveniently wrapped to easily iterate over the data in a stream fashion. All datasets have fixed size. Please refer to river.synth if you are interested in infinite synthetic data generators.

    Regression

    Name Samples Features AirlinePassengers 144 1 Bikes 182,470 8 ChickWeights 578 3 MovieLens100K 100,000 10 Restaurants 252,108 7 Taxis 1,458,644 8 TrumpApproval 1,001 6 WaterFlow 1,268 1

    Binary classification

    Name Samples Features Sparse Bananas 5,300 2 CreditCard 284,807 30 Elec2 45,312 8 Higgs 11,000,000 28 HTTP 567,498 3 MaliciousURL 2,396,130 3,231,961 \u2714\ufe0f Phishing 1,250 9 SMSSpam 5,574 1 SMTP 95,156 3 TREC07 75,419 5

    Multi-class classification

    Name Samples Features Classes ImageSegments 2,310 18 7 Insects 52,848 33 6 Keystroke 20,400 31 51

    Multi-output binary classification

    Name Samples Features Outputs Music 593 72 6

    Multi-output regression

    Name Samples Features Outputs SolarFlare 1,066 10 3"},{"location":"api/overview/#base_4","title":"base","text":"
    • Dataset
    • FileDataset
    • RemoteDataset
    • SyntheticDataset
    "},{"location":"api/overview/#synth","title":"synth","text":"

    Synthetic datasets.

    Each synthetic dataset is a stream generator. The benefit of using a generator is that they do not store the data and each data sample is generated on the fly. Except for a couple of methods, the majority of these methods are infinite data generators.

    Binary classification

    Name Features Agrawal 9 AnomalySine 2 ConceptDriftStream 9 Hyperplane 10 Mixed 4 SEA 3 Sine 2 STAGGER 3

    Regression

    Name Features Friedman 10 FriedmanDrift 10 Mv 10 Planes2D 10

    Multi-class classification

    Name Features Classes LED 7 10 LEDDrift 7 10 RandomRBF 10 2 RandomRBFDrift 10 2 RandomTree 10 2 Waveform 21 3

    Multi-output binary classification

    Name Features Outputs Logical 2 3"},{"location":"api/overview/#drift","title":"drift","text":"

    Concept Drift Detection.

    This module contains concept drift detection methods. The purpose of a drift detector is to raise an alarm if the data distribution changes. A good drift detector method is the one that maximizes the true positives while keeping the number of false positives to a minimum.

    • ADWIN
    • DriftRetrainingClassifier
    • DummyDriftDetector
    • KSWIN
    • PageHinkley
    "},{"location":"api/overview/#binary","title":"binary","text":"

    Drift detection for binary data.

    • DDM
    • EDDM
    • HDDM_A
    • HDDM_W
    "},{"location":"api/overview/#datasets_2","title":"datasets","text":"
    • AirlinePassengers
    • Apple
    • Bitcoin
    • BrentSpotPrice
    • Occupancy
    • RunLog
    • UKCoalEmploy
    "},{"location":"api/overview/#dummy","title":"dummy","text":"

    Dummy estimators.

    This module is here for testing purposes, as well as providing baseline performances.

    • NoChangeClassifier
    • PriorClassifier
    • StatisticRegressor
    "},{"location":"api/overview/#ensemble","title":"ensemble","text":"

    Ensemble learning.

    Broadly speaking, there are two kinds of ensemble approaches. There are those that copy a single model several times and aggregate the predictions of said copies. This includes bagging as well as boosting. Then there are those that are composed of an arbitrary list of models, and can therefore aggregate predictions from different kinds of models.

    • ADWINBaggingClassifier
    • ADWINBoostingClassifier
    • AdaBoostClassifier
    • BOLEClassifier
    • BaggingClassifier
    • BaggingRegressor
    • EWARegressor
    • LeveragingBaggingClassifier
    • SRPClassifier
    • SRPRegressor
    • StackingClassifier
    • VotingClassifier
    "},{"location":"api/overview/#evaluate","title":"evaluate","text":"

    Model evaluation.

    This module provides utilities to evaluate an online model. The goal is to reproduce a real-world scenario with high fidelity. The core function of this module is progressive_val_score, which allows to evaluate a model via progressive validation.

    This module also exposes \"tracks\". A track is a predefined combination of a dataset and one or more metrics. This allows a principled manner to compare models with each other. For instance, the RegressionTrack contains several datasets and metrics to evaluate regression models. There is also a bare Track class to implement a custom track. The benchmarks directory at the root of the River repository uses these tracks.

    Classes

    • BinaryClassificationTrack
    • MultiClassClassificationTrack
    • RegressionTrack
    • Track

    Functions

    • iter_progressive_val_score
    • progressive_val_score
    "},{"location":"api/overview/#facto","title":"facto","text":"

    Factorization machines.

    • FFMClassifier
    • FFMRegressor
    • FMClassifier
    • FMRegressor
    • FwFMClassifier
    • FwFMRegressor
    • HOFMClassifier
    • HOFMRegressor
    "},{"location":"api/overview/#feature_extraction","title":"feature_extraction","text":"

    Feature extraction.

    This module can be used to extract information from raw features. This includes encoding categorical data as well as looking at interactions between existing features. This differs from the preprocessing module, in that the latter's purpose is rather to clean the data so that it may be processed by a particular machine learning algorithm.

    • Agg
    • BagOfWords
    • PolynomialExtender
    • RBFSampler
    • TFIDF
    • TargetAgg
    "},{"location":"api/overview/#feature_selection","title":"feature_selection","text":"

    Feature selection.

    • PoissonInclusion
    • SelectKBest
    • VarianceThreshold
    "},{"location":"api/overview/#forest","title":"forest","text":"

    This module implements forest-based classifiers and regressors.

    • AMFClassifier
    • AMFRegressor
    • ARFClassifier
    • ARFRegressor
    • OXTRegressor
    "},{"location":"api/overview/#imblearn","title":"imblearn","text":"

    Sampling methods.

    • ChebyshevOverSampler
    • ChebyshevUnderSampler
    • HardSamplingClassifier
    • HardSamplingRegressor
    • RandomOverSampler
    • RandomSampler
    • RandomUnderSampler
    "},{"location":"api/overview/#linear_model","title":"linear_model","text":"

    Linear models.

    • ALMAClassifier
    • BayesianLinearRegression
    • LinearRegression
    • LogisticRegression
    • PAClassifier
    • PARegressor
    • Perceptron
    • SoftmaxRegression
    "},{"location":"api/overview/#base_5","title":"base","text":"
    • GLM
    "},{"location":"api/overview/#metrics","title":"metrics","text":"

    Evaluation metrics.

    All the metrics are updated one sample at a time. This way we can track performance of predictive methods over time.

    Note that all metrics have a revert method, enabling them to be wrapped in utils.Rolling. This allows computirng rolling metrics:

    from river import metrics, utils\n\ny_true = [True, False, True, True]\ny_pred = [False, False, True, True]\n\nmetric = utils.Rolling(metrics.Accuracy(), window_size=3)\n\nfor yt, yp in zip(y_true, y_pred):\n    print(metric.update(yt, yp))\n
    Accuracy: 0.00%\nAccuracy: 50.00%\nAccuracy: 66.67%\nAccuracy: 100.00%\n
    • Accuracy
    • AdjustedMutualInfo
    • AdjustedRand
    • BalancedAccuracy
    • ClassificationReport
    • CohenKappa
    • Completeness
    • ConfusionMatrix
    • CrossEntropy
    • F1
    • FBeta
    • FowlkesMallows
    • GeometricMean
    • Homogeneity
    • Jaccard
    • LogLoss
    • MAE
    • MAPE
    • MCC
    • MSE
    • MacroF1
    • MacroFBeta
    • MacroJaccard
    • MacroPrecision
    • MacroRecall
    • MicroF1
    • MicroFBeta
    • MicroJaccard
    • MicroPrecision
    • MicroRecall
    • MultiFBeta
    • MutualInfo
    • NormalizedMutualInfo
    • Precision
    • R2
    • RMSE
    • RMSLE
    • ROCAUC
    • Rand
    • Recall
    • RollingROCAUC
    • SMAPE
    • Silhouette
    • VBeta
    • WeightedF1
    • WeightedFBeta
    • WeightedJaccard
    • WeightedPrecision
    • WeightedRecall
    "},{"location":"api/overview/#base_6","title":"base","text":"
    • BinaryMetric
    • ClassificationMetric
    • Metric
    • Metrics
    • MultiClassMetric
    • RegressionMetric
    • WrapperMetric
    "},{"location":"api/overview/#multioutput","title":"multioutput","text":"

    Metrics for multi-output learning.

    • ExactMatch
    • MacroAverage
    • MicroAverage
    • MultiLabelConfusionMatrix
    • PerOutput
    • SampleAverage
    "},{"location":"api/overview/#base_7","title":"base","text":"
    • MultiOutputClassificationMetric
    • MultiOutputRegressionMetric
    "},{"location":"api/overview/#misc","title":"misc","text":"

    Miscellaneous.

    This module essentially regroups some implementations that have nowhere else to go.

    • SDFT
    • Skyline
    "},{"location":"api/overview/#model_selection","title":"model_selection","text":"

    Model selection.

    This module regroups a variety of methods that may be used for performing model selection. An model selector is provided with a list of models. These are called \"experts\" in the expert learning literature. The model selector's goal is to perform at least as well as the best model. Indeed, initially, the best model is not known. The performance of each model becomes more apparent as time goes by. Different strategies are possible, each one offering a different tradeoff in terms of accuracy and computational performance.

    Model selection can be used for tuning the hyperparameters of a model. This may be done by creating a copy of the model for each set of hyperparameters, and treating each copy as a separate model. The utils.expand_param_grid function can be used for this purpose.

    • BanditClassifier
    • BanditRegressor
    • GreedyRegressor
    • SuccessiveHalvingClassifier
    • SuccessiveHalvingRegressor
    "},{"location":"api/overview/#base_8","title":"base","text":"
    • ModelSelectionClassifier
    • ModelSelectionRegressor
    "},{"location":"api/overview/#multiclass","title":"multiclass","text":"

    Multi-class classification.

    • OneVsOneClassifier
    • OneVsRestClassifier
    • OutputCodeClassifier
    "},{"location":"api/overview/#multioutput_1","title":"multioutput","text":"

    Multi-output models.

    • ClassifierChain
    • MonteCarloClassifierChain
    • MultiClassEncoder
    • ProbabilisticClassifierChain
    • RegressorChain
    "},{"location":"api/overview/#naive_bayes","title":"naive_bayes","text":"

    Naive Bayes algorithms.

    • BernoulliNB
    • ComplementNB
    • GaussianNB
    • MultinomialNB
    "},{"location":"api/overview/#neighbors","title":"neighbors","text":"

    Neighbors-based learning.

    Also known as lazy methods. In these methods, generalisation of the training data is delayed until a query is received.

    • KNNClassifier
    • KNNRegressor
    • LazySearch
    • SWINN
    "},{"location":"api/overview/#neural_net","title":"neural_net","text":"

    Neural networks.

    • MLPRegressor
    "},{"location":"api/overview/#activations","title":"activations","text":"
    • Identity
    • ReLU
    • Sigmoid
    "},{"location":"api/overview/#optim","title":"optim","text":"

    Stochastic optimization.

    • AMSGrad
    • AdaBound
    • AdaDelta
    • AdaGrad
    • AdaMax
    • Adam
    • Averager
    • FTRLProximal
    • Momentum
    • Nadam
    • NesterovMomentum
    • RMSProp
    • SGD
    "},{"location":"api/overview/#base_9","title":"base","text":"
    • Initializer
    • Loss
    • Optimizer
    • Scheduler
    "},{"location":"api/overview/#initializers","title":"initializers","text":"

    Weight initializers.

    • Constant
    • Normal
    • Zeros
    "},{"location":"api/overview/#losses","title":"losses","text":"

    Loss functions.

    Each loss function is intended to work with both single values as well as numpy vectors.

    • Absolute
    • BinaryFocalLoss
    • BinaryLoss
    • Cauchy
    • CrossEntropy
    • EpsilonInsensitiveHinge
    • Hinge
    • Huber
    • Log
    • MultiClassLoss
    • Poisson
    • Quantile
    • RegressionLoss
    • Squared
    "},{"location":"api/overview/#schedulers","title":"schedulers","text":"

    Learning rate schedulers.

    • Constant
    • InverseScaling
    • Optimal
    "},{"location":"api/overview/#preprocessing","title":"preprocessing","text":"

    Feature preprocessing.

    The purpose of this module is to modify an existing set of features so that they can be processed by a machine learning algorithm. This may be done by scaling numeric parts of the data or by one-hot encoding categorical features. The difference with the feature_extraction module is that the latter extracts new information from the data

    • AdaptiveStandardScaler
    • Binarizer
    • FeatureHasher
    • GaussianRandomProjector
    • LDA
    • MaxAbsScaler
    • MinMaxScaler
    • Normalizer
    • OneHotEncoder
    • OrdinalEncoder
    • PredClipper
    • PreviousImputer
    • RobustScaler
    • SparseRandomProjector
    • StandardScaler
    • StatImputer
    • TargetMinMaxScaler
    • TargetStandardScaler
    "},{"location":"api/overview/#proba","title":"proba","text":"

    Probability distributions.

    • Beta
    • Gaussian
    • Multinomial
    • MultivariateGaussian
    "},{"location":"api/overview/#base_10","title":"base","text":"
    • BinaryDistribution
    • ContinuousDistribution
    • DiscreteDistribution
    • Distribution
    "},{"location":"api/overview/#reco","title":"reco","text":"

    Recommender systems module.

    Recommender systems (recsys for short) is a large topic. This module is far from comprehensive. It simply provides models which can contribute towards building a recommender system.

    A typical recommender system is made up of a retrieval phase, followed by a ranking phase. The output of the retrieval phase is a shortlist of the catalogue of items. The items in the shortlist are then usually ranked according to the expected preference the user will have for each item. This module focuses on the ranking phase.

    Models which inherit from the Ranker class have a rank method. This allows sorting a set of items for a given user. Each model also has a learn_one(user, item, y, context) which allows learning user preferences. The y parameter is a reward value, the nature of which depends is specific to each and every recommendation task. Typically the reward is a number or a boolean value. It is up to the user to determine how to translate a user session into training data.

    • Baseline
    • BiasedMF
    • FunkMF
    • RandomNormal
    "},{"location":"api/overview/#base_11","title":"base","text":"
    • Ranker
    "},{"location":"api/overview/#rules","title":"rules","text":"

    Decision rules-based algorithms.

    • AMRules
    "},{"location":"api/overview/#sketch","title":"sketch","text":"

    Data containers and collections for sequential data.

    This module has summary and sketch structures that operate with constrained amounts of memory and processing time.

    • Counter
    • HeavyHitters
    • Histogram
    • Set
    "},{"location":"api/overview/#stats","title":"stats","text":"

    Running statistics

    • AbsMax
    • AutoCorr
    • BayesianMean
    • Count
    • Cov
    • EWMean
    • EWVar
    • Entropy
    • IQR
    • Kurtosis
    • Link
    • MAD
    • Max
    • Mean
    • Min
    • Mode
    • NUnique
    • PeakToPeak
    • PearsonCorr
    • Quantile
    • RollingAbsMax
    • RollingIQR
    • RollingMax
    • RollingMin
    • RollingMode
    • RollingPeakToPeak
    • RollingQuantile
    • SEM
    • Shift
    • Skew
    • Sum
    • Var
    "},{"location":"api/overview/#base_12","title":"base","text":"
    • Bivariate
    • Univariate
    "},{"location":"api/overview/#stream","title":"stream","text":"

    Streaming utilities.

    The module includes tools to iterate over data streams.

    Classes

    • Cache
    • TwitchChatStream
    • TwitterLiveStream

    Functions

    • iter_arff
    • iter_array
    • iter_csv
    • iter_libsvm
    • iter_pandas
    • iter_sklearn_dataset
    • iter_sql
    • shuffle
    • simulate_qa
    "},{"location":"api/overview/#time_series","title":"time_series","text":"

    Time series forecasting.

    Classes

    • ForecastingMetric
    • HoltWinters
    • HorizonAggMetric
    • HorizonMetric
    • SNARIMAX

    Functions

    • evaluate
    • iter_evaluate
    "},{"location":"api/overview/#base_13","title":"base","text":"
    • Forecaster
    "},{"location":"api/overview/#tree","title":"tree","text":"

    This module implements incremental Decision Tree (iDT) algorithms for handling classification and regression tasks.

    Each family of iDT will be presented in a dedicated section.

    At any moment, iDT might face situations where an input feature previously used to make a split decision is missing in an incoming sample. In this case, the most traversed path is selected to pass down the instance. Moreover, in the case of nominal features, if a new category arises and the feature is used in a decision node, a new branch is created to accommodate the new value.

    1. Hoeffding Trees

    This family of iDT algorithms use the Hoeffding Bound to determine whether or not the incrementally computed best split candidates would be equivalent to the ones obtained in a batch-processing fashion.

    All the available Hoeffding Tree (HT) implementation share some common functionalities:

    • Set the maximum tree depth allowed (max_depth).

    • Handle Active and Inactive nodes: Active learning nodes update their own internal state to improve predictions and monitor input features to perform split attempts. Inactive learning nodes do not update their internal state and only keep the predictors; they are used to save memory in the tree (max_size).

    • Enable/disable memory management.

    • Define strategies to sort leaves according to how likely they are going to be split. This enables deactivating non-promising leaves to save memory.

    • Disabling \u2018poor\u2019 attributes to save memory and speed up tree construction. A poor attribute is an input feature whose split merit is much smaller than the current best candidate. Once a feature is disabled, the tree stops saving statistics necessary to split such a feature.

    • Define properties to access leaf prediction strategies, split criteria, and other relevant characteristics.

    2. Stochastic Gradient Trees

    Stochastic Gradient Trees (SGT) directly optimize a loss function, rather than relying on split heuristics to guide the tree growth. F-tests are performed do decide whether a leaf should be expanded or its prediction value should be updated.

    SGTs can deal with binary classification and single-target regression. They also support dynamic and static feature quantizers to deal with numerical inputs.

    • ExtremelyFastDecisionTreeClassifier
    • HoeffdingAdaptiveTreeClassifier
    • HoeffdingAdaptiveTreeRegressor
    • HoeffdingTreeClassifier
    • HoeffdingTreeRegressor
    • SGTClassifier
    • SGTRegressor
    • iSOUPTreeRegressor
    "},{"location":"api/overview/#base_14","title":"base","text":"

    This module defines generic branch and leaf implementations. These should be used in River by each tree-based model. Using these classes makes the code more DRY. The only exception for not doing so would be for performance, whereby a tree-based model uses a bespoke implementation.

    This module defines a bunch of methods to ease the manipulation and diagnostic of trees. Its intention is to provide utilities for walking over a tree and visualizing it.

    • Branch
    • Leaf
    "},{"location":"api/overview/#splitter","title":"splitter","text":"

    This module implements the Attribute Observers (AO) (or tree splitters) that are used by the Hoeffding Trees (HT). It also implements the feature quantizers (FQ) used by Stochastic Gradient Trees (SGT). AOs are a core aspect of the HTs construction, and might represent one of the major bottlenecks when building the trees. The same holds for SGTs and FQs. The correct choice and setup of a splitter might result in significant differences in the running time and memory usage of the incremental decision trees.

    AOs for classification and regression trees can be differentiated by using the property is_target_class (True for splitters designed to classification tasks). An error will be raised if one tries to use a classification splitter in a regression tree and vice-versa. Lastly, AOs cannot be used in SGT and FQs cannot be used in Hoeffding Trees. So, care must be taken when choosing the correct feature splitter.

    • DynamicQuantizer
    • EBSTSplitter
    • ExhaustiveSplitter
    • GaussianSplitter
    • HistogramSplitter
    • QOSplitter
    • Quantizer
    • Splitter
    • StaticQuantizer
    • TEBSTSplitter
    "},{"location":"api/overview/#utils","title":"utils","text":"

    Shared utility classes and functions

    Classes

    • Rolling
    • SortedWindow
    • TimeRolling
    • VectorDict

    Functions

    • dict2numpy
    • expand_param_grid
    • log_method_calls
    • numpy2dict
    "},{"location":"api/overview/#math","title":"math","text":"

    Mathematical utility functions (intended for internal purposes).

    A lot of this is experimental and has a high probability of changing in the future.

    • argmax
    • chain_dot
    • clamp
    • dot
    • dotvecmat
    • log_sum_2_exp
    • matmul2d
    • minkowski_distance
    • norm
    • outer
    • prod
    • sherman_morrison
    • sigmoid
    • sign
    • softmax
    • woodbury_matrix
    "},{"location":"api/overview/#norm","title":"norm","text":"
    • normalize_values_in_dict
    • scale_values_in_dict
    "},{"location":"api/overview/#pretty","title":"pretty","text":"

    Helper functions for making things readable by humans.

    • humanize_bytes
    • print_table
    "},{"location":"api/overview/#random","title":"random","text":"
    • exponential
    • poisson
    "},{"location":"api/active/EntropySampler/","title":"EntropySampler","text":"

    Active learning classifier based on entropy measures.

    The entropy sampler selects samples for labeling based on the entropy of the prediction. The higher the entropy, the more likely the sample will be selected for labeling. The entropy measure is normalized to [0, 1] and then raised to the power of the discount factor.

    "},{"location":"api/active/EntropySampler/#parameters","title":"Parameters","text":"
    • classifier

      Type \u2192 base.Classifier

      The classifier to wrap.

    • discount_factor

      Type \u2192 float

      Default \u2192 3

      The discount factor to apply to the entropy measure. A value of 1 won't affect the entropy. The higher the discount factor, the more the entropy will be discounted, and the less likely samples will be selected for labeling. A value of 0 will select all samples for labeling. The discount factor is thus a way to control how many samples are selected for labeling.

    • seed

      Default \u2192 None

      Random number generator seed for reproducibility.

    "},{"location":"api/active/EntropySampler/#examples","title":"Examples","text":"

    from river import active\nfrom river import datasets\nfrom river import feature_extraction\nfrom river import linear_model\nfrom river import metrics\n\ndataset = datasets.SMSSpam()\nmetric = metrics.Accuracy()\nmodel = (\n    feature_extraction.TFIDF(on='body') |\n    linear_model.LogisticRegression()\n)\nmodel = active.EntropySampler(model, seed=42)\n\nn_samples_used = 0\nfor x, y in dataset:\n    y_pred, ask = model.predict_one(x)\n    metric = metric.update(y, y_pred)\n    if ask:\n        n_samples_used += 1\n        model = model.learn_one(x, y)\n\nmetric\n
    Accuracy: 86.60%\n

    dataset.n_samples, n_samples_used\n
    (5574, 1921)\n

    print(f\"{n_samples_used / dataset.n_samples:.2%}\")\n
    34.46%\n

    "},{"location":"api/active/EntropySampler/#methods","title":"Methods","text":"learn_one

    Update the model with a set of features x and a label y.

    Parameters

    • x
    • y
    • kwargs

    Returns

    self

    predict_one

    Predict the label of x and indicate whether a label is needed.

    Parameters

    • x
    • kwargs

    Returns

    The predicted label.

    predict_proba_one

    Predict the probability of each label for x and indicate whether a label is needed.

    Parameters

    • x
    • kwargs

    Returns

    A dictionary that associates a probability which each label.

    "},{"location":"api/active/base/ActiveLearningClassifier/","title":"ActiveLearningClassifier","text":"

    Base class for active learning classifiers.

    "},{"location":"api/active/base/ActiveLearningClassifier/#parameters","title":"Parameters","text":"
    • classifier

      Type \u2192 base.Classifier

      The classifier to wrap.

    • seed

      Type \u2192 int | None

      Default \u2192 None

      Random number generator seed for reproducibility.

    "},{"location":"api/active/base/ActiveLearningClassifier/#methods","title":"Methods","text":"learn_one

    Update the model with a set of features x and a label y.

    Parameters

    • x
    • y
    • kwargs

    Returns

    self

    predict_one

    Predict the label of x and indicate whether a label is needed.

    Parameters

    • x
    • kwargs

    Returns

    The predicted label.

    predict_proba_one

    Predict the probability of each label for x and indicate whether a label is needed.

    Parameters

    • x
    • kwargs

    Returns

    A dictionary that associates a probability which each label.

    "},{"location":"api/anomaly/GaussianScorer/","title":"GaussianScorer","text":"

    Univariate Gaussian anomaly detector.

    This is a supervised anomaly detector. It fits a Gaussian distribution to the target values. The anomaly score is then computed as so:

    \\[score = 2 \\mid CDF(y) - 0.5 \\mid\\]

    This makes it so that the anomaly score is between 0 and 1.

    "},{"location":"api/anomaly/GaussianScorer/#parameters","title":"Parameters","text":"
    • window_size

      Default \u2192 None

      Set this to fit the Gaussian distribution over a window of recent values.

    • grace_period

      Default \u2192 100

      Number of samples before which a 0 is always returned. This is handy because the Gaussian distribution needs time to stabilize, and will likely produce overly high anomaly score for the first samples.

    "},{"location":"api/anomaly/GaussianScorer/#examples","title":"Examples","text":"

    import random\nfrom river import anomaly\n\nrng = random.Random(42)\ndetector = anomaly.GaussianScorer()\n\nfor y in (rng.gauss(0, 1) for _ in range(100)):\n    detector = detector.learn_one(None, y)\n\ndetector.score_one(None, -3)\n
    0.999477...\n

    detector.score_one(None, 3)\n
    0.999153...\n

    detector.score_one(None, 0)\n
    0.052665...\n

    detector.score_one(None, 0.5)\n
    0.383717...\n

    "},{"location":"api/anomaly/GaussianScorer/#methods","title":"Methods","text":"learn_one

    Update the model.

    Parameters

    • x \u2014 'dict'
    • y \u2014 'base.typing.Target'

    Returns

    SupervisedAnomalyDetector: self

    score_one

    Return an outlier score.

    A high score is indicative of an anomaly. A low score corresponds a normal observation.

    Parameters

    • x \u2014 'dict'
    • y \u2014 'base.typing.Target'

    Returns

    float: An anomaly score. A high score is indicative of an anomaly. A low score corresponds a

    "},{"location":"api/anomaly/HalfSpaceTrees/","title":"HalfSpaceTrees","text":"

    Half-Space Trees (HST).

    Half-space trees are an online variant of isolation forests. They work well when anomalies are spread out. However, they do not work well if anomalies are packed together in windows.

    By default, this implementation assumes that each feature has values that are comprised between 0 and 1. If this isn't the case, then you can manually specify the limits via the limits argument. If you do not know the limits in advance, then you can use a preprocessing.MinMaxScaler as an initial preprocessing step.

    The current implementation builds the trees the first time the learn_one method is called. Therefore, the first learn_one call might be slow, whereas subsequent calls will be very fast in comparison. In general, the computation time of both learn_one and score_one scales linearly with the number of trees, and exponentially with the height of each tree.

    Note that high scores indicate anomalies, whereas low scores indicate normal observations.

    "},{"location":"api/anomaly/HalfSpaceTrees/#parameters","title":"Parameters","text":"
    • n_trees

      Default \u2192 10

      Number of trees to use.

    • height

      Default \u2192 8

      Height of each tree. Note that a tree of height h is made up of h + 1 levels and therefore contains 2 ** (h + 1) - 1 nodes.

    • window_size

      Default \u2192 250

      Number of observations to use for calculating the mass at each node in each tree.

    • limits

      Type \u2192 dict[base.typing.FeatureName, tuple[float, float]] | None

      Default \u2192 None

      Specifies the range of each feature. By default each feature is assumed to be in range [0, 1].

    • seed

      Type \u2192 int | None

      Default \u2192 None

      Random number seed.

    "},{"location":"api/anomaly/HalfSpaceTrees/#attributes","title":"Attributes","text":"
    • size_limit

      This is the threshold under which the node search stops during the scoring phase. The value .1 is a magic constant indicated in the original paper.

    "},{"location":"api/anomaly/HalfSpaceTrees/#examples","title":"Examples","text":"

    from river import anomaly\n\nX = [0.5, 0.45, 0.43, 0.44, 0.445, 0.45, 0.0]\nhst = anomaly.HalfSpaceTrees(\n    n_trees=5,\n    height=3,\n    window_size=3,\n    seed=42\n)\n\nfor x in X[:3]:\n    hst = hst.learn_one({'x': x})  # Warming up\n\nfor x in X:\n    features = {'x': x}\n    hst = hst.learn_one(features)\n    print(f'Anomaly score for x={x:.3f}: {hst.score_one(features):.3f}')\n
    Anomaly score for x=0.500: 0.107\nAnomaly score for x=0.450: 0.071\nAnomaly score for x=0.430: 0.107\nAnomaly score for x=0.440: 0.107\nAnomaly score for x=0.445: 0.107\nAnomaly score for x=0.450: 0.071\nAnomaly score for x=0.000: 0.853\n

    The feature values are all comprised between 0 and 1. This is what is assumed by the model by default. In the following example, we construct a pipeline that scales the data online and ensures that the values of each feature are comprised between 0 and 1.

    from river import compose\nfrom river import datasets\nfrom river import metrics\nfrom river import preprocessing\n\nmodel = compose.Pipeline(\n    preprocessing.MinMaxScaler(),\n    anomaly.HalfSpaceTrees(seed=42)\n)\n\nauc = metrics.ROCAUC()\n\nfor x, y in datasets.CreditCard().take(2500):\n    score = model.score_one(x)\n    model = model.learn_one(x)\n    auc = auc.update(y, score)\n\nauc\n
    ROCAUC: 91.15%\n

    You can also use the evaluate.progressive_val_score function to evaluate the model on a data stream.

    from river import evaluate\n\nmodel = model.clone()\n\nevaluate.progressive_val_score(\n    dataset=datasets.CreditCard().take(2500),\n    model=model,\n    metric=metrics.ROCAUC(),\n    print_every=1000\n)\n
    [1,000] ROCAUC: 88.43%\n[2,000] ROCAUC: 89.28%\n[2,500] ROCAUC: 91.15%\nROCAUC: 91.15%\n

    "},{"location":"api/anomaly/HalfSpaceTrees/#methods","title":"Methods","text":"learn_one

    Update the model.

    Parameters

    • x \u2014 'dict'

    Returns

    AnomalyDetector: self

    score_one

    Return an outlier score.

    A high score is indicative of an anomaly. A low score corresponds to a normal observation.

    Parameters

    • x \u2014 'dict'

    Returns

    float: An anomaly score. A high score is indicative of an anomaly. A low score corresponds a

    1. Tan, S.C., Ting, K.M. and Liu, T.F., 2011, June. Fast anomaly detection for streaming data. In Twenty-Second International Joint Conference on Artificial Intelligence. \u21a9

    "},{"location":"api/anomaly/LocalOutlierFactor/","title":"LocalOutlierFactor","text":"

    Incremental Local Outlier Factor (Incremental LOF).

    The Incremental Local Outlier Factor (ILOF) is an online version of the Local Outlier Factor (LOF), proposed by Pokrajac et al. (2017), and is used to identify outliers based on density of local neighbors.

    The algorithm take into account the following elements: - NewPoints: new points;

    - `kNN(p)`: the k-nearest neighboors of `p` (the k-closest points to `p`);\n\n- `RkNN(p)`: the reverse-k-nearest neighboors of `p` (points that have `p` as one of their neighboors);\n\n- `set_upd_lrd`: Set of points that need to have the local reachability distance updated;\n\n- `set_upd_lof`: Set of points that need to have the local outlier factor updated.\n

    This current implementation within River, based on the original one in the paper, follows the following steps: 1) Insert new data points (NewPoints) and calculate its distance to existing points; 2) Update the nreaest neighboors and reverse nearest neighboors of all the points; 3) Define sets of affected points that required updates; 4) Calculate the reachability-distance from new point to neighboors (NewPoints -> kNN(NewPoints)) and from rev-neighboors to new point (RkNN(NewPoints) -> NewPoints); 5) Update the reachability-distance for affected points: RkNN(RkNN(NewPoints)) -> RkNN(NewPoints) 6) Update local reachability distance of affected points: lrd(set_upd_lrd); 7) Update local outlier factor: lof(set_upd_lof).

    The incremental LOF algorithm is expected to provide equivalent detection performance as the iterated static LOF algroithm (applied after insertion of each data record), while requiring significantly less computational time. Moreover, the insertion of a new data point as well as deletion of an old data point influence only a limited number of their closest neighbors, which means that the number of updates per such insertion/deletion does not depend on the total number of instances learned/in the data set.

    "},{"location":"api/anomaly/LocalOutlierFactor/#parameters","title":"Parameters","text":"
    • n_neighbors

      Type \u2192 int

      Default \u2192 10

      The number of nearest neighbors to use for density estimation.

    • distance_func

      Type \u2192 DistanceFunc

      Default \u2192 None

      Distance function to be used. By default, the Euclidean distance is used.

    "},{"location":"api/anomaly/LocalOutlierFactor/#attributes","title":"Attributes","text":"
    • x_list

      A list of stored observations.

    • x_batch

      A buffer to hold incoming observations until it's time to update the model.

    • x_scores

      A buffer to hold incoming observations until it's time to score them.

    • dist_dict

      A dictionary to hold distances between observations.

    • neighborhoods

      A dictionary to hold neighborhoods for each observation.

    • rev_neighborhoods

      A dictionary to hold reverse neighborhoods for each observation.

    • k_dist

      A dictionary to hold k-distances for each observation.

    • reach_dist

      A dictionary to hold reachability distances for each observation.

    • lof

      A dictionary to hold Local Outlier Factors for each observation.

    • local_reach

      A dictionary to hold local reachability distances for each observation.

    "},{"location":"api/anomaly/LocalOutlierFactor/#examples","title":"Examples","text":"

    import pandas as pd\nfrom river import anomaly\nfrom river import datasets\n\ncc_df = pd.DataFrame(datasets.CreditCard())\n\nlof = anomaly.LocalOutlierFactor(n_neighbors=20)\n\nfor x, _ in datasets.CreditCard().take(200):\n    lof.learn_one(x)\n\nlof.learn_many(cc_df[201:401])\n\nscores = []\nfor x in cc_df[0][401:406]:\n    scores.append(lof.score_one(x))\n\n[round(score, 3) for score in scores]\n
    [1.802, 1.937, 1.567, 1.181, 1.28]\n

    "},{"location":"api/anomaly/LocalOutlierFactor/#methods","title":"Methods","text":"learn learn_many learn_one

    Update the model.

    Parameters

    • x \u2014 'dict'

    Returns

    AnomalyDetector: self

    score_one

    Return an outlier score.

    A high score is indicative of an anomaly. A low score corresponds to a normal observation.

    Parameters

    • x \u2014 'dict'

    Returns

    float: An anomaly score. A high score is indicative of an anomaly. A low score corresponds a

    David Pokrajac, Aleksandar Lazarevic, and Longin Jan Latecki (2007). Incremental Local Outlier Detection for Data Streams. In: Proceedings of the 2007 IEEE Symposium on Computational Intelligence and Data Mining (CIDM 2007). 504-515. DOI: 10.1109/CIDM.2007.368917.

    "},{"location":"api/anomaly/OneClassSVM/","title":"OneClassSVM","text":"

    One-class SVM for anomaly detection.

    This is a stochastic implementation of the one-class SVM algorithm, and will not exactly match its batch formulation.

    It is encouraged to scale the data upstream with preprocessing.StandardScaler, as well as use feature_extraction.RBFSampler to capture non-linearities.

    "},{"location":"api/anomaly/OneClassSVM/#parameters","title":"Parameters","text":"
    • nu

      Default \u2192 0.1

      An upper bound on the fraction of training errors and a lower bound of the fraction of support vectors. You can think of it as the expected fraction of anomalies.

    • optimizer

      Type \u2192 optim.base.Optimizer | None

      Default \u2192 None

      The sequential optimizer used for updating the weights.

    • intercept_lr

      Type \u2192 optim.base.Scheduler | float

      Default \u2192 0.01

      Learning rate scheduler used for updating the intercept. A optim.schedulers.Constant is used if a float is provided. The intercept is not updated when this is set to 0.

    • clip_gradient

      Default \u2192 1000000000000.0

      Clips the absolute value of each gradient value.

    • initializer

      Type \u2192 optim.base.Initializer | None

      Default \u2192 None

      Weights initialization scheme.

    "},{"location":"api/anomaly/OneClassSVM/#attributes","title":"Attributes","text":"
    • weights
    "},{"location":"api/anomaly/OneClassSVM/#examples","title":"Examples","text":"

    from river import anomaly\nfrom river import compose\nfrom river import datasets\nfrom river import metrics\nfrom river import preprocessing\n\nmodel = anomaly.QuantileFilter(\n    anomaly.OneClassSVM(nu=0.2),\n    q=0.995\n)\n\nauc = metrics.ROCAUC()\n\nfor x, y in datasets.CreditCard().take(2500):\n    score = model.score_one(x)\n    is_anomaly = model.classify(score)\n    model = model.learn_one(x)\n    auc = auc.update(y, is_anomaly)\n\nauc\n
    ROCAUC: 74.68%\n

    You can also use the evaluate.progressive_val_score function to evaluate the model on a data stream.

    from river import evaluate\n\nmodel = model.clone()\n\nevaluate.progressive_val_score(\n    dataset=datasets.CreditCard().take(2500),\n    model=model,\n    metric=metrics.ROCAUC(),\n    print_every=1000\n)\n
    [1,000] ROCAUC: 74.40%\n[2,000] ROCAUC: 74.60%\n[2,500] ROCAUC: 74.68%\nROCAUC: 74.68%\n

    "},{"location":"api/anomaly/OneClassSVM/#methods","title":"Methods","text":"learn_many learn_one

    Update the model.

    Parameters

    • x \u2014 'dict'

    Returns

    AnomalyDetector: self

    score_one

    Return an outlier score.

    A high score is indicative of an anomaly. A low score corresponds to a normal observation.

    Parameters

    • x

    Returns

    An anomaly score. A high score is indicative of an anomaly. A low score corresponds a

    "},{"location":"api/anomaly/QuantileFilter/","title":"QuantileFilter","text":"

    Threshold anomaly filter.

    "},{"location":"api/anomaly/QuantileFilter/#parameters","title":"Parameters","text":"
    • anomaly_detector

      An anomaly detector.

    • q

      Type \u2192 float

      The quantile level above which to classify an anomaly score as anomalous.

    • protect_anomaly_detector

      Default \u2192 True

      Indicates whether or not the anomaly detector should be updated when the anomaly score is anomalous. If the data contains sporadic anomalies, then the anomaly detector should likely not be updated. Indeed, if it learns the anomaly score, then it will slowly start to consider anomalous anomaly scores as normal. This might be desirable, for instance in the case of drift.

    "},{"location":"api/anomaly/QuantileFilter/#attributes","title":"Attributes","text":"
    • q
    "},{"location":"api/anomaly/QuantileFilter/#examples","title":"Examples","text":"

    from river import anomaly\nfrom river import compose\nfrom river import datasets\nfrom river import metrics\nfrom river import preprocessing\n\nmodel = compose.Pipeline(\n    preprocessing.MinMaxScaler(),\n    anomaly.QuantileFilter(\n        anomaly.HalfSpaceTrees(seed=42),\n        q=0.95\n    )\n)\n\nreport = metrics.ClassificationReport()\n\nfor x, y in datasets.CreditCard().take(2000):\n    score = model.score_one(x)\n    is_anomaly = model['QuantileFilter'].classify(score)\n    model = model.learn_one(x)\n    report = report.update(y, is_anomaly)\n\nreport\n
                   Precision   Recall   F1       Support\n<BLANKLINE>\n       0      99.95%   94.49%   97.14%      1998\n       1       0.90%   50.00%    1.77%         2\n<BLANKLINE>\n   Macro      50.42%   72.25%   49.46%\n   Micro      94.45%   94.45%   94.45%\nWeighted      99.85%   94.45%   97.05%\n<BLANKLINE>\n                 94.45% accuracy\n

    "},{"location":"api/anomaly/QuantileFilter/#methods","title":"Methods","text":"classify

    Classify an anomaly score as anomalous or not.

    Parameters

    • score \u2014 'float'

    Returns

    bool: A boolean value indicating whether the anomaly score is anomalous or not.

    learn_one

    Update the anomaly filter and the underlying anomaly detector.

    Parameters

    • args
    • learn_kwargs

    Returns

    self

    score_one

    Return an outlier score.

    A high score is indicative of an anomaly. A low score corresponds to a normal observation.

    Parameters

    • args
    • kwargs

    Returns

    An anomaly score. A high score is indicative of an anomaly. A low score corresponds a

    "},{"location":"api/anomaly/ThresholdFilter/","title":"ThresholdFilter","text":"

    Threshold anomaly filter.

    "},{"location":"api/anomaly/ThresholdFilter/#parameters","title":"Parameters","text":"
    • anomaly_detector

      An anomaly detector.

    • threshold

      Type \u2192 float

      A threshold above which to classify an anomaly score as anomalous.

    • protect_anomaly_detector

      Default \u2192 True

      Indicates whether or not the anomaly detector should be updated when the anomaly score is anomalous. If the data contains sporadic anomalies, then the anomaly detector should likely not be updated. Indeed, if it learns the anomaly score, then it will slowly start to consider anomalous anomaly scores as normal. This might be desirable, for instance in the case of drift.

    "},{"location":"api/anomaly/ThresholdFilter/#examples","title":"Examples","text":"

    Anomaly filters can be used as part of a pipeline. For instance, we might want to filter out anomalous observations so as not to corrupt a supervised model. As an example, let's take the datasets.WaterFlow dataset. Some of the samples have anomalous target variables because of human interventions. We don't want our model to learn these values.

    from river import datasets\nfrom river import metrics\nfrom river import time_series\n\ndataset = datasets.WaterFlow()\nmetric = metrics.SMAPE()\n\nperiod = 24  # 24 samples per day\n\nmodel = (\n    anomaly.ThresholdFilter(\n        anomaly.GaussianScorer(\n            window_size=period * 7,  # 7 days\n            grace_period=30\n        ),\n        threshold=0.995\n    ) |\n    time_series.HoltWinters(\n        alpha=0.3,\n        beta=0.1,\n        multiplicative=False\n    )\n)\n\ntime_series.evaluate(\n    dataset,\n    model,\n    metric,\n    horizon=period\n)\n
    +1  SMAPE: 4.220171\n+2  SMAPE: 4.322648\n+3  SMAPE: 4.418546\n+4  SMAPE: 4.504986\n+5  SMAPE: 4.57924\n+6  SMAPE: 4.64123\n+7  SMAPE: 4.694042\n+8  SMAPE: 4.740753\n+9  SMAPE: 4.777291\n+10 SMAPE: 4.804558\n+11 SMAPE: 4.828114\n+12 SMAPE: 4.849823\n+13 SMAPE: 4.865871\n+14 SMAPE: 4.871972\n+15 SMAPE: 4.866274\n+16 SMAPE: 4.842614\n+17 SMAPE: 4.806214\n+18 SMAPE: 4.763355\n+19 SMAPE: 4.713455\n+20 SMAPE: 4.672062\n+21 SMAPE: 4.659102\n+22 SMAPE: 4.693496\n+23 SMAPE: 4.773707\n+24 SMAPE: 4.880654\n

    "},{"location":"api/anomaly/ThresholdFilter/#methods","title":"Methods","text":"classify

    Classify an anomaly score as anomalous or not.

    Parameters

    • score \u2014 'float'

    Returns

    bool: A boolean value indicating whether the anomaly score is anomalous or not.

    learn_one

    Update the anomaly filter and the underlying anomaly detector.

    Parameters

    • args
    • learn_kwargs

    Returns

    self

    score_one

    Return an outlier score.

    A high score is indicative of an anomaly. A low score corresponds to a normal observation.

    Parameters

    • args
    • kwargs

    Returns

    An anomaly score. A high score is indicative of an anomaly. A low score corresponds a

    "},{"location":"api/anomaly/base/AnomalyDetector/","title":"AnomalyDetector","text":"

    An anomaly detector.

    "},{"location":"api/anomaly/base/AnomalyDetector/#methods","title":"Methods","text":"learn_one

    Update the model.

    Parameters

    • x \u2014 'dict'

    Returns

    AnomalyDetector: self

    score_one

    Return an outlier score.

    A high score is indicative of an anomaly. A low score corresponds to a normal observation.

    Parameters

    • x \u2014 'dict'

    Returns

    float: An anomaly score. A high score is indicative of an anomaly. A low score corresponds a

    "},{"location":"api/anomaly/base/AnomalyFilter/","title":"AnomalyFilter","text":"

    Anomaly filter base class.

    An anomaly filter has the ability to classify an anomaly score as anomalous or not. It can then be used to filter anomalies, in particular as part of a pipeline.

    "},{"location":"api/anomaly/base/AnomalyFilter/#parameters","title":"Parameters","text":"
    • anomaly_detector

      Type \u2192 AnomalyDetector

      An anomaly detector wrapped by the anomaly filter.

    • protect_anomaly_detector

      Default \u2192 True

      Indicates whether or not the anomaly detector should be updated when the anomaly score is anomalous. If the data contains sporadic anomalies, then the anomaly detector should likely not be updated. Indeed, if it learns the anomaly score, then it will slowly start to consider anomalous anomaly scores as normal. This might be desirable, for instance in the case of drift.

    "},{"location":"api/anomaly/base/AnomalyFilter/#methods","title":"Methods","text":"classify

    Classify an anomaly score as anomalous or not.

    Parameters

    • score \u2014 'float'

    Returns

    bool: A boolean value indicating whether the anomaly score is anomalous or not.

    learn_one

    Update the anomaly filter and the underlying anomaly detector.

    Parameters

    • args
    • learn_kwargs

    Returns

    self

    score_one

    Return an outlier score.

    A high score is indicative of an anomaly. A low score corresponds to a normal observation.

    Parameters

    • args
    • kwargs

    Returns

    An anomaly score. A high score is indicative of an anomaly. A low score corresponds a

    "},{"location":"api/anomaly/base/SupervisedAnomalyDetector/","title":"SupervisedAnomalyDetector","text":"

    A supervised anomaly detector.

    "},{"location":"api/anomaly/base/SupervisedAnomalyDetector/#methods","title":"Methods","text":"learn_one

    Update the model.

    Parameters

    • x \u2014 'dict'
    • y \u2014 'base.typing.Target'

    Returns

    SupervisedAnomalyDetector: self

    score_one

    Return an outlier score.

    A high score is indicative of an anomaly. A low score corresponds a normal observation.

    Parameters

    • x \u2014 'dict'
    • y \u2014 'base.typing.Target'

    Returns

    float: An anomaly score. A high score is indicative of an anomaly. A low score corresponds a

    "},{"location":"api/bandit/BayesUCB/","title":"BayesUCB","text":"

    Bayes-UCB bandit policy.

    Bayes-UCB is a Bayesian algorithm for the multi-armed bandit problem. It uses the posterior distribution of the reward of each arm to compute an upper confidence bound (UCB) on the expected reward of each arm. The arm with the highest UCB is then pulled. The posterior distribution is updated after each pull. The algorithm is described in [^1].

    "},{"location":"api/bandit/BayesUCB/#parameters","title":"Parameters","text":"
    • reward_obj

      Default \u2192 None

      The reward object that is used to update the posterior distribution.

    • burn_in

      Default \u2192 0

      Number of initial observations per arm before using the posterior distribution.

    • seed

      Type \u2192 int | None

      Default \u2192 None

      Random number generator seed for reproducibility.

    "},{"location":"api/bandit/BayesUCB/#attributes","title":"Attributes","text":"
    • ranking

      Return the list of arms in descending order of performance.

    "},{"location":"api/bandit/BayesUCB/#examples","title":"Examples","text":"

    import gym\nfrom river import bandit\nfrom river import proba\nfrom river import stats\n\nenv = gym.make(\n    'river_bandits/CandyCaneContest-v0'\n)\n_ = env.reset(seed=42)\n_ = env.action_space.seed(123)\n\npolicy = bandit.BayesUCB(seed=123)\n\nmetric = stats.Sum()\nwhile True:\n    action = policy.pull(range(env.action_space.n))\n    observation, reward, terminated, truncated, info = env.step(action)\n    policy = policy.update(action, reward)\n    metric = metric.update(reward)\n    if terminated or truncated:\n        break\n\nmetric\n
    Sum: 841.\n

    "},{"location":"api/bandit/BayesUCB/#methods","title":"Methods","text":"compute_index

    the p-th quantile of the beta distribution for the arm

    Parameters

    • arm_id

    pull

    Pull arm(s).

    This method is a generator that yields the arm(s) that should be pulled. During the burn-in phase, all the arms that have not been pulled enough times are yielded. Once the burn-in phase is over, the policy is allowed to choose the arm(s) that should be pulled. If you only want to pull one arm at a time during the burn-in phase, simply call next(policy.pull(arms)).

    Parameters

    • arm_ids \u2014 'list[ArmID]'

    Returns

    ArmID: A single arm.

    update

    Rewrite update function

    Parameters

    • arm_id
    • reward_args
    • reward_kwargs

    "},{"location":"api/bandit/EpsilonGreedy/","title":"EpsilonGreedy","text":"

    \\(\\varepsilon\\)-greedy bandit policy.

    Performs arm selection by using an \\(\\varepsilon\\)-greedy bandit strategy. An arm is selected at each step. The best arm is selected (1 - \\(\\varepsilon\\))% of the time.

    Selection bias is a common problem when using bandits. This bias can be mitigated by using burn-in phase. Each model is given the chance to learn during the first burn_in steps.

    "},{"location":"api/bandit/EpsilonGreedy/#parameters","title":"Parameters","text":"
    • epsilon

      Type \u2192 float

      The probability of exploring.

    • decay

      Default \u2192 0.0

      The decay rate of epsilon.

    • reward_obj

      Default \u2192 None

      The reward object used to measure the performance of each arm. This can be a metric, a statistic, or a distribution.

    • burn_in

      Default \u2192 0

      The number of steps to use for the burn-in phase. Each arm is given the chance to be pulled during the burn-in phase. This is useful to mitigate selection bias.

    • seed

      Type \u2192 int | None

      Default \u2192 None

      Random number generator seed for reproducibility.

    "},{"location":"api/bandit/EpsilonGreedy/#attributes","title":"Attributes","text":"
    • current_epsilon

      The value of epsilon after factoring in the decay rate.

    • ranking

      Return the list of arms in descending order of performance.

    "},{"location":"api/bandit/EpsilonGreedy/#examples","title":"Examples","text":"

    import gym\nfrom river import bandit\nfrom river import stats\n\nenv = gym.make(\n    'river_bandits/CandyCaneContest-v0'\n)\n_ = env.reset(seed=42)\n_ = env.action_space.seed(123)\n\npolicy = bandit.EpsilonGreedy(epsilon=0.9, seed=101)\n\nmetric = stats.Sum()\nwhile True:\n    arm = policy.pull(range(env.action_space.n))\n    observation, reward, terminated, truncated, info = env.step(arm)\n    policy = policy.update(arm, reward)\n    metric = metric.update(reward)\n    if terminated or truncated:\n        break\n\nmetric\n
    Sum: 775.\n

    "},{"location":"api/bandit/EpsilonGreedy/#methods","title":"Methods","text":"pull

    Pull arm(s).

    This method is a generator that yields the arm(s) that should be pulled. During the burn-in phase, all the arms that have not been pulled enough times are yielded. Once the burn-in phase is over, the policy is allowed to choose the arm(s) that should be pulled. If you only want to pull one arm at a time during the burn-in phase, simply call next(policy.pull(arms)).

    Parameters

    • arm_ids \u2014 'list[ArmID]'

    Returns

    ArmID: A single arm.

    update

    Update an arm's state.

    Parameters

    • arm_id
    • reward_args
    • reward_kwargs

    1. \u03b5-Greedy Algorithm - The Multi-Armed Bandit Problem and Its Solutions - Lilian Weng \u21a9

    "},{"location":"api/bandit/Exp3/","title":"Exp3","text":"

    Exp3 bandit policy.

    This policy works by maintaining a weight for each arm. These weights are used to randomly decide which arm to pull. The weights are increased or decreased, depending on the reward. An egalitarianism factor \\(\\gamma \\in [0, 1]\\) is included, to tune the desire to pick an arm uniformly at random. That is, if \\(\\gamma = 1\\), the arms are picked uniformly at random.

    "},{"location":"api/bandit/Exp3/#parameters","title":"Parameters","text":"
    • gamma

      Type \u2192 float

      The egalitarianism factor. Setting this to 0 leads to what is called the EXP3 policy.

    • reward_obj

      Default \u2192 None

      The reward object used to measure the performance of each arm. This can be a metric, a statistic, or a distribution.

    • reward_scaler

      Default \u2192 None

      A reward scaler used to scale the rewards before they are fed to the reward object. This can be useful to scale the rewards to a (0, 1) range for instance.

    • burn_in

      Default \u2192 0

      The number of steps to use for the burn-in phase. Each arm is given the chance to be pulled during the burn-in phase. This is useful to mitigate selection bias.

    • seed

      Type \u2192 int | None

      Default \u2192 None

      Random number generator seed for reproducibility.

    "},{"location":"api/bandit/Exp3/#attributes","title":"Attributes","text":"
    • ranking

      Return the list of arms in descending order of performance.

    "},{"location":"api/bandit/Exp3/#examples","title":"Examples","text":"

    import gym\nfrom river import bandit\nfrom river import proba\nfrom river import stats\n\nenv = gym.make(\n    'river_bandits/CandyCaneContest-v0'\n)\n_ = env.reset(seed=42)\n_ = env.action_space.seed(123)\n\npolicy = bandit.Exp3(gamma=0.5, seed=42)\n\nmetric = stats.Sum()\nwhile True:\n    action = policy.pull(range(env.action_space.n))\n    observation, reward, terminated, truncated, info = env.step(action)\n    policy = policy.update(action, reward)\n    metric = metric.update(reward)\n    if terminated or truncated:\n        break\n\nmetric\n
    Sum: 799.\n

    "},{"location":"api/bandit/Exp3/#methods","title":"Methods","text":"pull

    Pull arm(s).

    This method is a generator that yields the arm(s) that should be pulled. During the burn-in phase, all the arms that have not been pulled enough times are yielded. Once the burn-in phase is over, the policy is allowed to choose the arm(s) that should be pulled. If you only want to pull one arm at a time during the burn-in phase, simply call next(policy.pull(arms)).

    Parameters

    • arm_ids \u2014 'list[ArmID]'

    Returns

    ArmID: A single arm.

    update

    Update an arm's state.

    Parameters

    • arm_id
    • reward_args
    • reward_kwargs

    1. Auer, P., Cesa-Bianchi, N., Freund, Y. and Schapire, R.E., 2002. The nonstochastic multiarmed bandit problem. SIAM journal on computing, 32(1), pp.48-77. \u21a9

    2. Adversarial Bandits and the Exp3 Algorithm \u2014 Jeremy Kun \u21a9

    "},{"location":"api/bandit/LinUCBDisjoint/","title":"LinUCBDisjoint","text":"

    LinUCB, disjoint variant.

    Although it works, as of yet it is too slow to realistically be used in practice.

    The way this works is that each arm is assigned a linear_model.BayesianLinearRegression instance. This instance is updated every time the arm is pulled. The context is used as features for the regression. The reward is used as the target. The posterior distribution is used to compute the upper confidence bound. The arm with the highest upper confidence bound is pulled.

    "},{"location":"api/bandit/LinUCBDisjoint/#parameters","title":"Parameters","text":"
    • alpha

      Type \u2192 float

      Default \u2192 1.0

      Parameter used in each Bayesian linear regression.

    • beta

      Type \u2192 float

      Default \u2192 1.0

      Parameter used in each Bayesian linear regression.

    • smoothing

      Type \u2192 float | None

      Default \u2192 None

      Parameter used in each Bayesian linear regression.

    • reward_obj

      Default \u2192 None

      The reward object used to measure the performance of each arm.

    • burn_in

      Default \u2192 0

      The number of time steps during which each arm is pulled once.

    • seed

      Type \u2192 int | None

      Default \u2192 None

      Random number generator seed for reproducibility.

    "},{"location":"api/bandit/LinUCBDisjoint/#attributes","title":"Attributes","text":"
    • ranking

      Return the list of arms in descending order of performance.

    "},{"location":"api/bandit/LinUCBDisjoint/#methods","title":"Methods","text":"pull

    Pull arm(s).

    This method is a generator that yields the arm(s) that should be pulled. During the burn-in phase, all the arms that have not been pulled enough times are yielded. Once the burn-in phase is over, the policy is allowed to choose the arm(s) that should be pulled. If you only want to pull one arm at a time during the burn-in phase, simply call next(policy.pull(arms)).

    Parameters

    • arm_ids \u2014 'list[ArmID]'
    • context \u2014 'dict' \u2014 defaults to None

    Returns

    ArmID: A single arm.

    update

    Rewrite update function

    Parameters

    • arm_id
    • context
    • reward_args
    • reward_kwargs

    1. A Contextual-Bandit Approach to Personalized News Article Recommendation [^2:] Contextual Bandits Analysis of LinUCB Disjoint Algorithm with Dataset \u21a9

    "},{"location":"api/bandit/RandomPolicy/","title":"RandomPolicy","text":"

    Random bandit policy.

    This policy simply pulls a random arm at each time step. It is useful as a baseline.

    "},{"location":"api/bandit/RandomPolicy/#parameters","title":"Parameters","text":"
    • reward_obj

      Default \u2192 None

      The reward object that is used to update the posterior distribution.

    • burn_in

      Default \u2192 0

      Number of initial observations per arm before using the posterior distribution.

    • seed

      Type \u2192 int | None

      Default \u2192 None

      Random number generator seed for reproducibility.

    "},{"location":"api/bandit/RandomPolicy/#attributes","title":"Attributes","text":"
    • ranking

      Return the list of arms in descending order of performance.

    "},{"location":"api/bandit/RandomPolicy/#examples","title":"Examples","text":"

    import gym\nfrom river import bandit\nfrom river import proba\nfrom river import stats\n\nenv = gym.make(\n    'river_bandits/CandyCaneContest-v0'\n)\n_ = env.reset(seed=42)\n_ = env.action_space.seed(123)\n\npolicy = bandit.RandomPolicy(seed=123)\n\nmetric = stats.Sum()\nwhile True:\n    action = policy.pull(range(env.action_space.n))\n    observation, reward, terminated, truncated, info = env.step(action)\n    policy = policy.update(action, reward)\n    metric = metric.update(reward)\n    if terminated or truncated:\n        break\n\nmetric\n
    Sum: 755.\n

    "},{"location":"api/bandit/RandomPolicy/#methods","title":"Methods","text":"pull

    Pull arm(s).

    This method is a generator that yields the arm(s) that should be pulled. During the burn-in phase, all the arms that have not been pulled enough times are yielded. Once the burn-in phase is over, the policy is allowed to choose the arm(s) that should be pulled. If you only want to pull one arm at a time during the burn-in phase, simply call next(policy.pull(arms)).

    Parameters

    • arm_ids \u2014 'list[ArmID]'

    Returns

    ArmID: A single arm.

    update

    Update an arm's state.

    Parameters

    • arm_id
    • reward_args
    • reward_kwargs

    "},{"location":"api/bandit/ThompsonSampling/","title":"ThompsonSampling","text":"

    Thompson sampling.

    Thompson sampling is often used with a Beta distribution. However, any probability distribution can be used, as long it makes sense with the reward shape. For instance, a Beta distribution is meant to be used with binary rewards, while a Gaussian distribution is meant to be used with continuous rewards.

    The randomness of a distribution is controlled by its seed. The seed should not set within the distribution, but should rather be defined in the policy parametrization. In other words, you should do this:

    policy = ThompsonSampling(dist=proba.Beta(1, 1), seed=42) \n

    and not this:

    policy = ThompsonSampling(dist=proba.Beta(1, 1, seed=42)) \n
    "},{"location":"api/bandit/ThompsonSampling/#parameters","title":"Parameters","text":"
    • reward_obj

      Type \u2192 proba.base.Distribution

      Default \u2192 None

      A distribution to sample from.

    • burn_in

      Default \u2192 0

      The number of steps to use for the burn-in phase. Each arm is given the chance to be pulled during the burn-in phase. This is useful to mitigate selection bias.

    • seed

      Type \u2192 int | None

      Default \u2192 None

      Random number generator seed for reproducibility.

    "},{"location":"api/bandit/ThompsonSampling/#attributes","title":"Attributes","text":"
    • ranking

      Return the list of arms in descending order of performance.

    "},{"location":"api/bandit/ThompsonSampling/#examples","title":"Examples","text":"

    import gym\nfrom river import bandit\nfrom river import proba\nfrom river import stats\n\nenv = gym.make(\n    'river_bandits/CandyCaneContest-v0'\n)\n_ = env.reset(seed=42)\n_ = env.action_space.seed(123)\n\npolicy = bandit.ThompsonSampling(reward_obj=proba.Beta(), seed=101)\n\nmetric = stats.Sum()\nwhile True:\n    arm = policy.pull(range(env.action_space.n))\n    observation, reward, terminated, truncated, info = env.step(arm)\n    policy = policy.update(arm, reward)\n    metric = metric.update(reward)\n    if terminated or truncated:\n        break\n\nmetric\n
    Sum: 820.\n

    "},{"location":"api/bandit/ThompsonSampling/#methods","title":"Methods","text":"pull

    Pull arm(s).

    This method is a generator that yields the arm(s) that should be pulled. During the burn-in phase, all the arms that have not been pulled enough times are yielded. Once the burn-in phase is over, the policy is allowed to choose the arm(s) that should be pulled. If you only want to pull one arm at a time during the burn-in phase, simply call next(policy.pull(arms)).

    Parameters

    • arm_ids \u2014 'list[ArmID]'

    Returns

    ArmID: A single arm.

    update

    Update an arm's state.

    Parameters

    • arm_id
    • reward_args
    • reward_kwargs

    1. An Empirical Evaluation of Thompson Sampling \u21a9

    "},{"location":"api/bandit/UCB/","title":"UCB","text":"

    Upper Confidence Bound (UCB) bandit policy.

    Due to the nature of this algorithm, it's recommended to scale the target so that it exhibits sub-gaussian properties. This can be done by passing a preprocessing.TargetStandardScaler instance to the reward_scaler argument.

    "},{"location":"api/bandit/UCB/#parameters","title":"Parameters","text":"
    • delta

      Type \u2192 float

      The confidence level. Setting this to 1 leads to what is called the UCB1 policy.

    • reward_obj

      Default \u2192 None

      The reward object used to measure the performance of each arm. This can be a metric, a statistic, or a distribution.

    • reward_scaler

      Default \u2192 None

      A reward scaler used to scale the rewards before they are fed to the reward object. This can be useful to scale the rewards to a (0, 1) range for instance.

    • burn_in

      Default \u2192 0

      The number of steps to use for the burn-in phase. Each arm is given the chance to be pulled during the burn-in phase. This is useful to mitigate selection bias.

    • seed

      Type \u2192 int

      Default \u2192 None

      Random number generator seed for reproducibility.

    "},{"location":"api/bandit/UCB/#attributes","title":"Attributes","text":"
    • ranking

      Return the list of arms in descending order of performance.

    "},{"location":"api/bandit/UCB/#examples","title":"Examples","text":"

    import gym\nfrom river import bandit\nfrom river import preprocessing\nfrom river import stats\n\nenv = gym.make(\n    'river_bandits/CandyCaneContest-v0'\n)\n_ = env.reset(seed=42)\n_ = env.action_space.seed(123)\n\npolicy = bandit.UCB(\n    delta=100,\n    reward_scaler=preprocessing.TargetStandardScaler(None),\n    seed=42\n)\n\nmetric = stats.Sum()\nwhile True:\n    arm = policy.pull(range(env.action_space.n))\n    observation, reward, terminated, truncated, info = env.step(arm)\n    policy = policy.update(arm, reward)\n    metric = metric.update(reward)\n    if terminated or truncated:\n        break\n\nmetric\n
    Sum: 744.\n

    "},{"location":"api/bandit/UCB/#methods","title":"Methods","text":"pull

    Pull arm(s).

    This method is a generator that yields the arm(s) that should be pulled. During the burn-in phase, all the arms that have not been pulled enough times are yielded. Once the burn-in phase is over, the policy is allowed to choose the arm(s) that should be pulled. If you only want to pull one arm at a time during the burn-in phase, simply call next(policy.pull(arms)).

    Parameters

    • arm_ids \u2014 'list[ArmID]'

    Returns

    ArmID: A single arm.

    update

    Update an arm's state.

    Parameters

    • arm_id
    • reward_args
    • reward_kwargs

    1. Lai, T. L., & Robbins, H. (1985). Asymptotically efficient adaptive allocation rules. Advances in applied mathematics, 6(1), 4-22. \u21a9

    2. Upper Confidence Bounds - The Multi-Armed Bandit Problem and Its Solutions - Lilian Weng \u21a9

    3. The Upper Confidence Bound Algorithm - Bandit Algorithms \u21a9

    "},{"location":"api/bandit/evaluate-offline/","title":"evaluate_offline","text":"

    Evaluate a policy on historical logs using replay.

    This is a high-level utility function for evaluating a policy using the replay methodology. This methodology is an off-policy evaluation method. It does not require an environment, and is instead data-driven.

    At each step, an arm is pulled from the provided policy. If the arm is the same as the arm that was pulled in the historical data, the reward is used to update the policy. If the arm is different, the reward is ignored. This is the off-policy aspect of the evaluation.

    "},{"location":"api/bandit/evaluate-offline/#parameters","title":"Parameters","text":"
    • policy

      Type \u2192 bandit.base.Policy

      The policy to evaluate.

    • history

      Type \u2192 History | bandit.datasets.BanditDataset

      The history of the bandit problem. This is a generator that yields tuples of the form (arms, context, arm, reward).

    • reward_stat

      Type \u2192 stats.base.Univariate

      Default \u2192 None

      The reward statistic to use. Defaults to stats.Sum.

    "},{"location":"api/bandit/evaluate-offline/#examples","title":"Examples","text":"

    import random\nfrom river import bandit\n\nrng = random.Random(42)\narms = ['A', 'B', 'C']\nclicks = [\n    (\n        arms,\n        # no context\n        None,\n        # random arm\n        rng.choice(arms),\n        # reward\n        rng.random() > 0.5\n    )\n    for _ in range(1000)\n]\n\ntotal_reward, n_samples_used = bandit.evaluate_offline(\n    policy=bandit.EpsilonGreedy(0.1, seed=42),\n    history=clicks,\n)\n\ntotal_reward\n
    Sum: 172.\n

    n_samples_used\n
    321\n

    This also works out of the box with datasets that inherit from river.bandit.BanditDataset.

    news = bandit.datasets.NewsArticles()\ntotal_reward, n_samples_used = bandit.evaluate_offline(\n    policy=bandit.RandomPolicy(seed=42),\n    history=news,\n)\n\ntotal_reward, n_samples_used\n
    (Sum: 105., 1027)\n

    As expected, the policy succeeds in roughly 10% of cases. Indeed, there are 10 arms and 10000 samples, so the expected number of successes is 1000.

    1. Offline Evaluation of Multi-Armed Bandit Algorithms in Python using Replay \u21a9

    2. Unbiased Offline Evaluation of Contextual-bandit-based News Article Recommendation Algorithms \u21a9

    3. Understanding Inverse Propensity Score for Contextual Bandits \u21a9

    "},{"location":"api/bandit/evaluate/","title":"evaluate","text":"

    Benchmark a list of policies on a given Gym environment.

    This is a high-level utility function for benchmarking a list of policies on a given Gym environment. For example, it can be used to populate a pandas.DataFrame with the contents of each step of each episode.

    "},{"location":"api/bandit/evaluate/#parameters","title":"Parameters","text":"
    • policies

      Type \u2192 list[bandit.base.Policy]

      A list of policies to evaluate. The policy will be reset before each episode.

    • env

      Type \u2192 gym.Env

      The Gym environment to use. One copy will be made for each policy at the beginning of each episode.

    • reward_stat

      Type \u2192 stats.base.Univariate | None

      Default \u2192 None

      A univariate statistic to keep track of the rewards. This statistic will be reset before each episode. Note that this is not the same as the reward object used by the policies. It's just a statistic to keep track of each policy's performance. If None, stats.Sum is used.

    • n_episodes

      Type \u2192 int

      Default \u2192 20

      The number of episodes to run.

    • seed

      Type \u2192 int | None

      Default \u2192 None

      Random number generator seed for reproducibility. A random number generator will be used to seed differently the environment before each episode.

    "},{"location":"api/bandit/evaluate/#examples","title":"Examples","text":"

    import gym\nfrom river import bandit\n\ntrace = bandit.evaluate(\n    policies=[\n        bandit.UCB(delta=1, seed=42),\n        bandit.EpsilonGreedy(epsilon=0.1, seed=42),\n    ],\n    env=gym.make(\n        'river_bandits/CandyCaneContest-v0',\n        max_episode_steps=100\n    ),\n    n_episodes=5,\n    seed=42\n)\n\nfor step in trace:\n    print(step)\n    break\n
    {'episode': 0, 'step': 0, 'policy_idx': 0, 'arm': 81, 'reward': 0.0, 'reward_stat': 0.0}\n

    The return type of this function is a generator. Each step of the generator is a dictionary. You can pass the generator to a pandas.DataFrame to get a nice representation of the results.

    import pandas as pd\n\ntrace = bandit.evaluate(\n    policies=[\n        bandit.UCB(delta=1, seed=42),\n        bandit.EpsilonGreedy(epsilon=0.1, seed=42),\n    ],\n    env=gym.make(\n        'river_bandits/CandyCaneContest-v0',\n        max_episode_steps=100\n    ),\n    n_episodes=5,\n    seed=42\n)\n\ntrace_df = pd.DataFrame(trace)\ntrace_df.sample(5, random_state=42)\n
         episode  step  policy_idx  arm  reward  reward_stat\n521        2    60           1   25     0.0         36.0\n737        3    68           1   40     1.0         20.0\n740        3    70           0   58     0.0         36.0\n660        3    30           0   31     1.0         16.0\n411        2     5           1   35     1.0          5.0\n

    The length of the dataframe is the number of policies times the number of episodes times the maximum number of steps per episode.

    len(trace_df)\n
    1000\n

    (\n    trace_df.policy_idx.nunique() *\n    trace_df.episode.nunique() *\n    trace_df.step.nunique()\n)\n
    1000\n

    "},{"location":"api/bandit/base/ContextualPolicy/","title":"ContextualPolicy","text":"

    Contextual bandit policy base class.

    "},{"location":"api/bandit/base/ContextualPolicy/#parameters","title":"Parameters","text":"
    • reward_obj

      Type \u2192 RewardObj | None

      Default \u2192 None

      The reward object used to measure the performance of each arm. This can be a metric, a statistic, or a distribution.

    • reward_scaler

      Type \u2192 compose.TargetTransformRegressor | None

      Default \u2192 None

      A reward scaler used to scale the rewards before they are fed to the reward object. This can be useful to scale the rewards to a (0, 1) range for instance.

    • burn_in

      Default \u2192 0

      The number of steps to use for the burn-in phase. Each arm is given the chance to be pulled during the burn-in phase. This is useful to mitigate selection bias.

    "},{"location":"api/bandit/base/ContextualPolicy/#attributes","title":"Attributes","text":"
    • ranking

      Return the list of arms in descending order of performance.

    "},{"location":"api/bandit/base/ContextualPolicy/#methods","title":"Methods","text":"pull

    Pull arm(s).

    This method is a generator that yields the arm(s) that should be pulled. During the burn-in phase, all the arms that have not been pulled enough times are yielded. Once the burn-in phase is over, the policy is allowed to choose the arm(s) that should be pulled. If you only want to pull one arm at a time during the burn-in phase, simply call next(policy.pull(arms)).

    Parameters

    • arm_ids \u2014 'list[ArmID]'
    • context \u2014 'dict' \u2014 defaults to None

    Returns

    ArmID: A single arm.

    update

    Update an arm's state.

    Parameters

    • arm_id
    • context
    • reward_args
    • reward_kwargs

    "},{"location":"api/bandit/base/Policy/","title":"Policy","text":"

    Bandit policy base class.

    "},{"location":"api/bandit/base/Policy/#parameters","title":"Parameters","text":"
    • reward_obj

      Type \u2192 RewardObj | None

      Default \u2192 None

      The reward object used to measure the performance of each arm. This can be a metric, a statistic, or a distribution.

    • reward_scaler

      Type \u2192 compose.TargetTransformRegressor | None

      Default \u2192 None

      A reward scaler used to scale the rewards before they are fed to the reward object. This can be useful to scale the rewards to a (0, 1) range for instance.

    • burn_in

      Default \u2192 0

      The number of steps to use for the burn-in phase. Each arm is given the chance to be pulled during the burn-in phase. This is useful to mitigate selection bias.

    "},{"location":"api/bandit/base/Policy/#attributes","title":"Attributes","text":"
    • ranking

      Return the list of arms in descending order of performance.

    "},{"location":"api/bandit/base/Policy/#methods","title":"Methods","text":"pull

    Pull arm(s).

    This method is a generator that yields the arm(s) that should be pulled. During the burn-in phase, all the arms that have not been pulled enough times are yielded. Once the burn-in phase is over, the policy is allowed to choose the arm(s) that should be pulled. If you only want to pull one arm at a time during the burn-in phase, simply call next(policy.pull(arms)).

    Parameters

    • arm_ids \u2014 'list[ArmID]'

    Returns

    ArmID: A single arm.

    update

    Update an arm's state.

    Parameters

    • arm_id
    • reward_args
    • reward_kwargs

    "},{"location":"api/bandit/datasets/BanditDataset/","title":"BanditDataset","text":"

    Base class for bandit datasets.

    "},{"location":"api/bandit/datasets/BanditDataset/#parameters","title":"Parameters","text":"
    • n_features

      Number of features in the dataset.

    • n_samples

      Default \u2192 None

      Number of samples in the dataset.

    • n_classes

      Default \u2192 None

      Number of classes in the dataset, only applies to classification datasets.

    • n_outputs

      Default \u2192 None

      Number of outputs the target is made of, only applies to multi-output datasets.

    • sparse

      Default \u2192 False

      Whether the dataset is sparse or not.

    "},{"location":"api/bandit/datasets/BanditDataset/#attributes","title":"Attributes","text":"
    • arms

      The list of arms that can be pulled.

    • desc

      Return the description from the docstring.

    "},{"location":"api/bandit/datasets/BanditDataset/#methods","title":"Methods","text":"take

    Iterate over the k samples.

    Parameters

    • k \u2014 'int'

    "},{"location":"api/bandit/datasets/NewsArticles/","title":"NewsArticles","text":"

    News articles bandit dataset.

    This is a personalization dataset. It contains 10000 observations. There are 10 arms, and the reward is binary. There are 100 features, which turns this into a contextual bandit problem.

    "},{"location":"api/bandit/datasets/NewsArticles/#attributes","title":"Attributes","text":"
    • arms

      The list of arms that can be pulled.

    • desc

      Return the description from the docstring.

    • is_downloaded

      Indicate whether or the data has been correctly downloaded.

    • path

    "},{"location":"api/bandit/datasets/NewsArticles/#examples","title":"Examples","text":"

    from river import bandit\n\ndataset = bandit.datasets.NewsArticles()\ncontext, arm, reward = next(iter(dataset))\n\nlen(context)\n
    100\n

    arm, reward\n
    (2, False)\n

    "},{"location":"api/bandit/datasets/NewsArticles/#methods","title":"Methods","text":"download take

    Iterate over the k samples.

    Parameters

    • k \u2014 'int'

    1. Machine Learning for Personalization homework \u21a9

    2. Contextual Bandits Analysis of LinUCB Disjoint Algorithm with Dataset \u21a9

    "},{"location":"api/bandit/envs/CandyCaneContest/","title":"CandyCaneContest","text":"

    Candy cane contest Kaggle competition.

    "},{"location":"api/bandit/envs/CandyCaneContest/#parameters","title":"Parameters","text":"
    • n_machines

      Default \u2192 100

      Number of vending machines.

    • reward_decay

      Default \u2192 0.03

      The multiplicate rate at which the expected reward of each vending machine decays.

    "},{"location":"api/bandit/envs/CandyCaneContest/#attributes","title":"Attributes","text":"
    • np_random

      Returns the environment's internal :attr:_np_random that if not set will initialise with a random seed.

    • render_mode

    • spec

    • unwrapped

      Returns the base non-wrapped environment. Returns: Env: The base non-wrapped gym.Env instance

    "},{"location":"api/bandit/envs/CandyCaneContest/#examples","title":"Examples","text":"

    import gym\nfrom river import stats\n\nenv = gym.make('river_bandits/CandyCaneContest-v0')\n_ = env.reset(seed=42)\n_ = env.action_space.seed(123)\n\nmetric = stats.Sum()\nwhile True:\n    arm = env.action_space.sample()\n    observation, reward, terminated, truncated, info = env.step(arm)\n    metric = metric.update(reward)\n    if terminated or truncated:\n        break\n\nmetric\n
    Sum: 734.\n

    "},{"location":"api/bandit/envs/CandyCaneContest/#methods","title":"Methods","text":"close

    Override close in your subclass to perform any necessary cleanup.

    Environments will automatically :meth:close() themselves when garbage collected or when the program exits.

    render

    Compute the render frames as specified by render_mode attribute during initialization of the environment.

    The set of supported modes varies per environment. (And some third-party environments may not support rendering at all.) By convention, if render_mode is: - None (default): no render is computed. - human: render return None. The environment is continuously rendered in the current display or terminal. Usually for human consumption. - rgb_array: return a single frame representing the current state of the environment. A frame is a numpy.ndarray with shape (x, y, 3) representing RGB values for an x-by-y pixel image. - rgb_array_list: return a list of frames representing the states of the environment since the last reset. Each frame is a numpy.ndarray with shape (x, y, 3), as with rgb_array. - ansi: Return a strings (str) or StringIO.StringIO containing a terminal-style text representation for each time step. The text can include newlines and ANSI escape sequences (e.g. for colors). Note: Make sure that your class's metadata 'render_modes' key includes the list of supported modes. It's recommended to call super() in implementations to use the functionality of this method.

    reset

    Resets the environment to an initial state and returns the initial observation.

    This method can reset the environment's random number generator(s) if seed is an integer or if the environment has not yet initialized a random number generator. If the environment already has a random number generator and :meth:reset is called with seed=None, the RNG should not be reset. Moreover, :meth:reset should (in the typical use case) be called with an integer seed right after initialization and then never again. Args: seed (optional int): The seed that is used to initialize the environment's PRNG. If the environment does not already have a PRNG and seed=None (the default option) is passed, a seed will be chosen from some source of entropy (e.g. timestamp or /dev/urandom). However, if the environment already has a PRNG and seed=None is passed, the PRNG will not be reset. If you pass an integer, the PRNG will be reset even if it already exists. Usually, you want to pass an integer right after the environment has been initialized and then never again. Please refer to the minimal example above to see this paradigm in action. options (optional dict): Additional information to specify how the environment is reset (optional, depending on the specific environment) Returns: observation (object): Observation of the initial state. This will be an element of :attr:observation_space (typically a numpy array) and is analogous to the observation returned by :meth:step. info (dictionary): This dictionary contains auxiliary information complementing observation. It should be analogous to the info returned by :meth:step.

    Parameters

    • seed \u2014 Optional[int] \u2014 defaults to None
    • options \u2014 Optional[dict] \u2014 defaults to None

    step

    Run one timestep of the environment's dynamics.

    When end of episode is reached, you are responsible for calling :meth:reset to reset this environment's state. Accepts an action and returns either a tuple (observation, reward, terminated, truncated, info). Args: action (ActType): an action provided by the agent Returns: observation (object): this will be an element of the environment's :attr:observation_space. This may, for instance, be a numpy array containing the positions and velocities of certain objects. reward (float): The amount of reward returned as a result of taking the action. terminated (bool): whether a terminal state (as defined under the MDP of the task) is reached. In this case further step() calls could return undefined results. truncated (bool): whether a truncation condition outside the scope of the MDP is satisfied. Typically a timelimit, but could also be used to indicate agent physically going out of bounds. Can be used to end the episode prematurely before a terminal state is reached. info (dictionary): info contains auxiliary diagnostic information (helpful for debugging, learning, and logging). This might, for instance, contain: metrics that describe the agent's performance state, variables that are hidden from observations, or individual reward terms that are combined to produce the total reward. It also can contain information that distinguishes truncation and termination, however this is deprecated in favour of returning two booleans, and will be removed in a future version. (deprecated) done (bool): A boolean value for if the episode has ended, in which case further :meth:step calls will return undefined results. A done signal may be emitted for different reasons: Maybe the task underlying the environment was solved successfully, a certain timelimit was exceeded, or the physics simulation has entered an invalid state.

    Parameters

    • machine_index

    1. Santa 2020 - The Candy Cane Contest \u21a9

    "},{"location":"api/bandit/envs/KArmedTestbed/","title":"KArmedTestbed","text":"

    k-armed testbed.

    This is a simple environment that can be used to test bandit algorithms. It is based on the 10 armed testbed described in the book \"Reinforcement Learning: An Introduction\" by Sutton and Barto.

    "},{"location":"api/bandit/envs/KArmedTestbed/#parameters","title":"Parameters","text":"
    • k

      Type \u2192 int

      Default \u2192 10

      Number of arms.

    "},{"location":"api/bandit/envs/KArmedTestbed/#attributes","title":"Attributes","text":"
    • np_random

      Returns the environment's internal :attr:_np_random that if not set will initialise with a random seed.

    • render_mode

    • spec

    • unwrapped

      Returns the base non-wrapped environment. Returns: Env: The base non-wrapped gym.Env instance

    "},{"location":"api/bandit/envs/KArmedTestbed/#methods","title":"Methods","text":"close

    Override close in your subclass to perform any necessary cleanup.

    Environments will automatically :meth:close() themselves when garbage collected or when the program exits.

    render

    Compute the render frames as specified by render_mode attribute during initialization of the environment.

    The set of supported modes varies per environment. (And some third-party environments may not support rendering at all.) By convention, if render_mode is: - None (default): no render is computed. - human: render return None. The environment is continuously rendered in the current display or terminal. Usually for human consumption. - rgb_array: return a single frame representing the current state of the environment. A frame is a numpy.ndarray with shape (x, y, 3) representing RGB values for an x-by-y pixel image. - rgb_array_list: return a list of frames representing the states of the environment since the last reset. Each frame is a numpy.ndarray with shape (x, y, 3), as with rgb_array. - ansi: Return a strings (str) or StringIO.StringIO containing a terminal-style text representation for each time step. The text can include newlines and ANSI escape sequences (e.g. for colors). Note: Make sure that your class's metadata 'render_modes' key includes the list of supported modes. It's recommended to call super() in implementations to use the functionality of this method.

    reset

    Resets the environment to an initial state and returns the initial observation.

    This method can reset the environment's random number generator(s) if seed is an integer or if the environment has not yet initialized a random number generator. If the environment already has a random number generator and :meth:reset is called with seed=None, the RNG should not be reset. Moreover, :meth:reset should (in the typical use case) be called with an integer seed right after initialization and then never again. Args: seed (optional int): The seed that is used to initialize the environment's PRNG. If the environment does not already have a PRNG and seed=None (the default option) is passed, a seed will be chosen from some source of entropy (e.g. timestamp or /dev/urandom). However, if the environment already has a PRNG and seed=None is passed, the PRNG will not be reset. If you pass an integer, the PRNG will be reset even if it already exists. Usually, you want to pass an integer right after the environment has been initialized and then never again. Please refer to the minimal example above to see this paradigm in action. options (optional dict): Additional information to specify how the environment is reset (optional, depending on the specific environment) Returns: observation (object): Observation of the initial state. This will be an element of :attr:observation_space (typically a numpy array) and is analogous to the observation returned by :meth:step. info (dictionary): This dictionary contains auxiliary information complementing observation. It should be analogous to the info returned by :meth:step.

    Parameters

    • seed \u2014 Optional[int] \u2014 defaults to None
    • options \u2014 Optional[dict] \u2014 defaults to None

    step

    Run one timestep of the environment's dynamics.

    When end of episode is reached, you are responsible for calling :meth:reset to reset this environment's state. Accepts an action and returns either a tuple (observation, reward, terminated, truncated, info). Args: action (ActType): an action provided by the agent Returns: observation (object): this will be an element of the environment's :attr:observation_space. This may, for instance, be a numpy array containing the positions and velocities of certain objects. reward (float): The amount of reward returned as a result of taking the action. terminated (bool): whether a terminal state (as defined under the MDP of the task) is reached. In this case further step() calls could return undefined results. truncated (bool): whether a truncation condition outside the scope of the MDP is satisfied. Typically a timelimit, but could also be used to indicate agent physically going out of bounds. Can be used to end the episode prematurely before a terminal state is reached. info (dictionary): info contains auxiliary diagnostic information (helpful for debugging, learning, and logging). This might, for instance, contain: metrics that describe the agent's performance state, variables that are hidden from observations, or individual reward terms that are combined to produce the total reward. It also can contain information that distinguishes truncation and termination, however this is deprecated in favour of returning two booleans, and will be removed in a future version. (deprecated) done (bool): A boolean value for if the episode has ended, in which case further :meth:step calls will return undefined results. A done signal may be emitted for different reasons: Maybe the task underlying the environment was solved successfully, a certain timelimit was exceeded, or the physics simulation has entered an invalid state.

    Parameters

    • arm

    "},{"location":"api/base/Base/","title":"Base","text":"

    Base class that is inherited by the majority of classes in River.

    This base class allows us to handle the following tasks in a uniform manner:

    • Getting and setting parameters

    • Displaying information

    • Mutating/cloning

    "},{"location":"api/base/Base/#methods","title":"Methods","text":"clone

    Return a fresh estimator with the same parameters.

    The clone has the same parameters but has not been updated with any data. This works by looking at the parameters from the class signature. Each parameter is either - recursively cloned if its a class. - deep-copied via copy.deepcopy if not. If the calling object is stochastic (i.e. it accepts a seed parameter) and has not been seeded, then the clone will not be idempotent. Indeed, this method's purpose if simply to return a new instance with the same input parameters.

    Parameters

    • new_params \u2014 'dict | None' \u2014 defaults to None
    • include_attributes \u2014 defaults to False

    mutate

    Modify attributes.

    This changes parameters inplace. Although you can change attributes yourself, this is the recommended way to proceed. By default, all attributes are immutable, meaning they shouldn't be mutated. Calling mutate on an immutable attribute raises a ValueError. Mutable attributes are specified via the _mutable_attributes property, and are thus specified on a per-estimator basis.

    Parameters

    • new_attrs \u2014 'dict'

    "},{"location":"api/base/BinaryDriftAndWarningDetector/","title":"BinaryDriftAndWarningDetector","text":"

    A binary drift detector that is also capable of issuing warnings.

    "},{"location":"api/base/BinaryDriftAndWarningDetector/#attributes","title":"Attributes","text":"
    • drift_detected

      Whether or not a drift is detected following the last update.

    • warning_detected

      Whether or not a drift is detected following the last update.

    "},{"location":"api/base/BinaryDriftAndWarningDetector/#methods","title":"Methods","text":"update

    Update the detector with a single boolean input.

    Parameters

    • x \u2014 'bool'

    Returns

    BinaryDriftDetector: self

    "},{"location":"api/base/BinaryDriftDetector/","title":"BinaryDriftDetector","text":"

    A drift detector for binary data.

    "},{"location":"api/base/BinaryDriftDetector/#attributes","title":"Attributes","text":"
    • drift_detected

      Whether or not a drift is detected following the last update.

    "},{"location":"api/base/BinaryDriftDetector/#methods","title":"Methods","text":"update

    Update the detector with a single boolean input.

    Parameters

    • x \u2014 'bool'

    Returns

    BinaryDriftDetector: self

    "},{"location":"api/base/Classifier/","title":"Classifier","text":"

    A classifier.

    "},{"location":"api/base/Classifier/#methods","title":"Methods","text":"learn_one

    Update the model with a set of features x and a label y.

    Parameters

    • x \u2014 'dict'
    • y \u2014 'base.typing.ClfTarget'

    Returns

    Classifier: self

    predict_one

    Predict the label of a set of features x.

    Parameters

    • x \u2014 'dict'
    • kwargs

    Returns

    base.typing.ClfTarget | None: The predicted label.

    predict_proba_one

    Predict the probability of each label for a dictionary of features x.

    Parameters

    • x \u2014 'dict'

    Returns

    dict[base.typing.ClfTarget, float]: A dictionary that associates a probability which each label.

    "},{"location":"api/base/Clusterer/","title":"Clusterer","text":"

    A clustering model.

    "},{"location":"api/base/Clusterer/#methods","title":"Methods","text":"learn_one

    Update the model with a set of features x.

    Parameters

    • x \u2014 'dict'

    Returns

    Clusterer: self

    predict_one

    Predicts the cluster number for a set of features x.

    Parameters

    • x \u2014 'dict'

    Returns

    int: A cluster number.

    "},{"location":"api/base/DriftAndWarningDetector/","title":"DriftAndWarningDetector","text":"

    A drift detector that is also capable of issuing warnings.

    "},{"location":"api/base/DriftAndWarningDetector/#attributes","title":"Attributes","text":"
    • drift_detected

      Whether or not a drift is detected following the last update.

    • warning_detected

      Whether or not a drift is detected following the last update.

    "},{"location":"api/base/DriftAndWarningDetector/#methods","title":"Methods","text":"update

    Update the detector with a single data point.

    Parameters

    • x \u2014 'numbers.Number'

    Returns

    DriftDetector: self

    "},{"location":"api/base/DriftDetector/","title":"DriftDetector","text":"

    A drift detector.

    "},{"location":"api/base/DriftDetector/#attributes","title":"Attributes","text":"
    • drift_detected

      Whether or not a drift is detected following the last update.

    "},{"location":"api/base/DriftDetector/#methods","title":"Methods","text":"update

    Update the detector with a single data point.

    Parameters

    • x \u2014 'numbers.Number'

    Returns

    DriftDetector: self

    "},{"location":"api/base/Ensemble/","title":"Ensemble","text":"

    An ensemble is a model which is composed of a list of models.

    "},{"location":"api/base/Ensemble/#parameters","title":"Parameters","text":"
    • models

      Type \u2192 Iterator[Estimator]

    "},{"location":"api/base/Ensemble/#attributes","title":"Attributes","text":"
    • models
    "},{"location":"api/base/Ensemble/#methods","title":"Methods","text":"append

    S.append(value) -- append value to the end of the sequence

    Parameters

    • item

    clear

    S.clear() -> None -- remove all items from S

    copy count

    S.count(value) -> integer -- return number of occurrences of value

    Parameters

    • item

    extend

    S.extend(iterable) -- extend sequence by appending elements from the iterable

    Parameters

    • other

    index

    S.index(value, [start, [stop]]) -> integer -- return first index of value. Raises ValueError if the value is not present.

    Supporting start and stop arguments is optional, but recommended.

    Parameters

    • item
    • args

    insert

    S.insert(index, value) -- insert value before index

    Parameters

    • i
    • item

    pop

    S.pop([index]) -> item -- remove and return item at index (default last). Raise IndexError if list is empty or index is out of range.

    Parameters

    • i \u2014 defaults to -1

    remove

    S.remove(value) -- remove first occurrence of value. Raise ValueError if the value is not present.

    Parameters

    • item

    reverse

    S.reverse() -- reverse IN PLACE

    sort"},{"location":"api/base/Estimator/","title":"Estimator","text":"

    An estimator.

    "},{"location":"api/base/Estimator/#methods","title":"Methods","text":""},{"location":"api/base/MiniBatchClassifier/","title":"MiniBatchClassifier","text":"

    A classifier that can operate on mini-batches.

    "},{"location":"api/base/MiniBatchClassifier/#methods","title":"Methods","text":"learn_many

    Update the model with a mini-batch of features X and boolean targets y.

    Parameters

    • X \u2014 'pd.DataFrame'
    • y \u2014 'pd.Series'

    Returns

    MiniBatchClassifier: self

    learn_one

    Update the model with a set of features x and a label y.

    Parameters

    • x \u2014 'dict'
    • y \u2014 'base.typing.ClfTarget'

    Returns

    Classifier: self

    predict_many

    Predict the outcome for each given sample.

    Parameters

    • X \u2014 'pd.DataFrame'

    Returns

    pd.Series: The predicted labels.

    predict_one

    Predict the label of a set of features x.

    Parameters

    • x \u2014 'dict'
    • kwargs

    Returns

    base.typing.ClfTarget | None: The predicted label.

    predict_proba_many

    Predict the outcome probabilities for each given sample.

    Parameters

    • X \u2014 'pd.DataFrame'

    Returns

    pd.DataFrame: A dataframe with probabilities of True and False for each sample.

    predict_proba_one

    Predict the probability of each label for a dictionary of features x.

    Parameters

    • x \u2014 'dict'

    Returns

    dict[base.typing.ClfTarget, float]: A dictionary that associates a probability which each label.

    "},{"location":"api/base/MiniBatchRegressor/","title":"MiniBatchRegressor","text":"

    A regressor that can operate on mini-batches.

    "},{"location":"api/base/MiniBatchRegressor/#methods","title":"Methods","text":"learn_many

    Update the model with a mini-batch of features X and real-valued targets y.

    Parameters

    • X \u2014 'pd.DataFrame'
    • y \u2014 'pd.Series'

    Returns

    MiniBatchRegressor: self

    learn_one

    Fits to a set of features x and a real-valued target y.

    Parameters

    • x \u2014 'dict'
    • y \u2014 'base.typing.RegTarget'

    Returns

    Regressor: self

    predict_many

    Predict the outcome for each given sample.

    Parameters

    • X \u2014 'pd.DataFrame'

    Returns

    pd.Series: The predicted outcomes.

    predict_one

    Predict the output of features x.

    Parameters

    • x \u2014 'dict'

    Returns

    base.typing.RegTarget: The prediction.

    "},{"location":"api/base/MiniBatchSupervisedTransformer/","title":"MiniBatchSupervisedTransformer","text":"

    A supervised transformer that can operate on mini-batches.

    "},{"location":"api/base/MiniBatchSupervisedTransformer/#methods","title":"Methods","text":"learn_many

    Update the model with a mini-batch of features X and targets y.

    Parameters

    • X \u2014 'pd.DataFrame'
    • y \u2014 'pd.Series'

    Returns

    MiniBatchSupervisedTransformer: self

    learn_one

    Update with a set of features x.

    A lot of transformers don't actually have to do anything during the learn_one step because they are stateless. For this reason the default behavior of this function is to do nothing. Transformers that however do something during the learn_one can override this method.

    Parameters

    • x \u2014 'dict'

    Returns

    Transformer: self

    transform_many

    Transform a mini-batch of features.

    Parameters

    • X \u2014 'pd.DataFrame'

    Returns

    pd.DataFrame: A new DataFrame.

    transform_one

    Transform a set of features x.

    Parameters

    • x \u2014 'dict'

    Returns

    dict: The transformed values.

    "},{"location":"api/base/MiniBatchTransformer/","title":"MiniBatchTransformer","text":"

    A transform that can operate on mini-batches.

    "},{"location":"api/base/MiniBatchTransformer/#methods","title":"Methods","text":"learn_many

    Update with a mini-batch of features.

    A lot of transformers don't actually have to do anything during the learn_many step because they are stateless. For this reason the default behavior of this function is to do nothing. Transformers that however do something during the learn_many can override this method.

    Parameters

    • X \u2014 'pd.DataFrame'

    Returns

    Transformer: self

    learn_one

    Update with a set of features x.

    A lot of transformers don't actually have to do anything during the learn_one step because they are stateless. For this reason the default behavior of this function is to do nothing. Transformers that however do something during the learn_one can override this method.

    Parameters

    • x \u2014 'dict'

    Returns

    Transformer: self

    transform_many

    Transform a mini-batch of features.

    Parameters

    • X \u2014 'pd.DataFrame'

    Returns

    pd.DataFrame: A new DataFrame.

    transform_one

    Transform a set of features x.

    Parameters

    • x \u2014 'dict'

    Returns

    dict: The transformed values.

    "},{"location":"api/base/MultiLabelClassifier/","title":"MultiLabelClassifier","text":"

    Multi-label classifier.

    "},{"location":"api/base/MultiLabelClassifier/#methods","title":"Methods","text":"learn_one

    Update the model with a set of features x and the labels y.

    Parameters

    • x \u2014 'dict'
    • y \u2014 'dict[FeatureName, bool]'

    Returns

    MultiLabelClassifier: self

    predict_one

    Predict the labels of a set of features x.

    Parameters

    • x \u2014 'dict'
    • kwargs

    Returns

    dict[FeatureName, bool]: The predicted labels.

    predict_proba_one

    Predict the probability of each label appearing given dictionary of features x.

    Parameters

    • x \u2014 'dict'
    • kwargs

    Returns

    dict[FeatureName, dict[bool, float]]: A dictionary that associates a probability which each label.

    "},{"location":"api/base/MultiTargetRegressor/","title":"MultiTargetRegressor","text":"

    Multi-target regressor.

    "},{"location":"api/base/MultiTargetRegressor/#methods","title":"Methods","text":"learn_one

    Fits to a set of features x and a real-valued target y.

    Parameters

    • x \u2014 'dict'
    • y \u2014 'dict[FeatureName, RegTarget]'
    • kwargs

    Returns

    MultiTargetRegressor: self

    predict_one

    Predict the outputs of features x.

    Parameters

    • x \u2014 'dict'

    Returns

    dict[FeatureName, RegTarget]: The predictions.

    "},{"location":"api/base/Regressor/","title":"Regressor","text":"

    A regressor.

    "},{"location":"api/base/Regressor/#methods","title":"Methods","text":"learn_one

    Fits to a set of features x and a real-valued target y.

    Parameters

    • x \u2014 'dict'
    • y \u2014 'base.typing.RegTarget'

    Returns

    Regressor: self

    predict_one

    Predict the output of features x.

    Parameters

    • x \u2014 'dict'

    Returns

    base.typing.RegTarget: The prediction.

    "},{"location":"api/base/SupervisedTransformer/","title":"SupervisedTransformer","text":"

    A supervised transformer.

    "},{"location":"api/base/SupervisedTransformer/#methods","title":"Methods","text":"learn_one

    Update with a set of features x and a target y.

    Parameters

    • x \u2014 'dict'
    • y \u2014 'base.typing.Target'

    Returns

    SupervisedTransformer: self

    transform_one

    Transform a set of features x.

    Parameters

    • x \u2014 'dict'

    Returns

    dict: The transformed values.

    "},{"location":"api/base/Transformer/","title":"Transformer","text":"

    A transformer.

    "},{"location":"api/base/Transformer/#methods","title":"Methods","text":"learn_one

    Update with a set of features x.

    A lot of transformers don't actually have to do anything during the learn_one step because they are stateless. For this reason the default behavior of this function is to do nothing. Transformers that however do something during the learn_one can override this method.

    Parameters

    • x \u2014 'dict'

    Returns

    Transformer: self

    transform_one

    Transform a set of features x.

    Parameters

    • x \u2014 'dict'

    Returns

    dict: The transformed values.

    "},{"location":"api/base/Wrapper/","title":"Wrapper","text":"

    A wrapper model.

    "},{"location":"api/base/WrapperEnsemble/","title":"WrapperEnsemble","text":"

    A wrapper ensemble is an ensemble composed of multiple copies of the same model.

    "},{"location":"api/base/WrapperEnsemble/#parameters","title":"Parameters","text":"
    • model

      The model to copy.

    • n_models

      The number of copies to make.

    • seed

      Random number generator seed for reproducibility.

    "},{"location":"api/base/WrapperEnsemble/#attributes","title":"Attributes","text":"
    • models
    "},{"location":"api/base/WrapperEnsemble/#methods","title":"Methods","text":""},{"location":"api/cluster/CluStream/","title":"CluStream","text":"

    CluStream

    The CluStream algorithm 1 maintains statistical information about the data using micro-clusters. These micro-clusters are temporal extensions of cluster feature vectors. The micro-clusters are stored at snapshots in time following a pyramidal pattern. This pattern allows to recall summary statistics from different time horizons.

    Training with a new point p is performed in two main tasks:

    • Determinate the closest micro-cluster to p.

    • Check whether p fits (memory) into the closest micro-cluster:

      • if p fits, put into micro-cluster

      • if p does not fit, free some space to insert a new micro-cluster.

      This is done in two ways, delete an old micro-cluster or merge the two micro-clusters closest to each other.

    This implementation is an improved version from the original algorithm. Instead of calculating the traditional cluster feature vector of the number of observations, linear sum and sum of squares of data points and time stamps, this implementation adopts the use of Welford's algorithm 2 to calculate the incremental variance, facilitated through stats.Var available within River.

    Since River does not support an actual \"off-line\" phase of the clustering algorithm (as data points are assumed to arrive continuously, one at a time), a time_gap parameter is introduced. After each time_gap, an incremental K-Means clustering algorithm will be initialized and applied on currently available micro-clusters to form the final solution, i.e. macro-clusters.

    "},{"location":"api/cluster/CluStream/#parameters","title":"Parameters","text":"
    • n_macro_clusters

      Type \u2192 int

      Default \u2192 5

      The number of clusters (k) for the k-means algorithm.

    • max_micro_clusters

      Type \u2192 int

      Default \u2192 100

      The maximum number of micro-clusters to use.

    • micro_cluster_r_factor

      Type \u2192 int

      Default \u2192 2

      Multiplier for the micro-cluster radius. When deciding to add a new data point to a micro-cluster, the maximum boundary is defined as a factor of the micro_cluster_r_factor of the RMS deviation of the data points in the micro-cluster from the centroid.

    • time_window

      Type \u2192 int

      Default \u2192 1000

      If the current time is T and the time window is h, we only consider the data that arrived within the period (T-h,T).

    • time_gap

      Type \u2192 int

      Default \u2192 100

      An incremental k-means is applied on the current set of micro-clusters after each time_gap to form the final macro-cluster solution.

    • seed

      Type \u2192 int | None

      Default \u2192 None

      Random seed used for generating initial centroid positions.

    • kwargs

      Other parameters passed to the incremental kmeans at cluster.KMeans.

    "},{"location":"api/cluster/CluStream/#attributes","title":"Attributes","text":"
    • centers (dict)

      Central positions of each cluster.

    "},{"location":"api/cluster/CluStream/#examples","title":"Examples","text":"

    In the following example, max_micro_clusters is set relatively low due to the limited number of training points. Moreover, all points are learnt before any predictions are made. The halflife is set at 0.4, to show that you can pass cluster.KMeans parameters via keyword arguments.

    from river import cluster\nfrom river import stream\n\nX = [\n    [1, 2],\n    [1, 4],\n    [1, 0],\n    [-4, 2],\n    [-4, 4],\n    [-4, 0],\n    [5, 0],\n    [5, 2],\n    [5, 4]\n]\n\nclustream = cluster.CluStream(\n    n_macro_clusters=3,\n    max_micro_clusters=5,\n    time_gap=3,\n    seed=0,\n    halflife=0.4\n)\n\nfor x, _ in stream.iter_array(X):\n    clustream = clustream.learn_one(x)\n\nclustream.predict_one({0: 1, 1: 1})\n
    1\n

    clustream.predict_one({0: -4, 1: 3})\n
    2\n

    clustream.predict_one({0: 4, 1: 3.5})\n
    0\n

    "},{"location":"api/cluster/CluStream/#methods","title":"Methods","text":"learn_one

    Update the model with a set of features x.

    Parameters

    • x \u2014 'dict'
    • w \u2014 defaults to 1.0

    Returns

    Clusterer: self

    predict_one

    Predicts the cluster number for a set of features x.

    Parameters

    • x \u2014 'dict'

    Returns

    int: A cluster number.

    1. Aggarwal, C.C., Philip, S.Y., Han, J. and Wang, J., 2003, A framework for clustering evolving data streams. In Proceedings 2003 VLDB conference (pp. 81-92). Morgan Kaufmann.\u00a0\u21a9

    2. Chan, T.F., Golub, G.H. and LeVeque, R.J., 1982. Updating formulae and a pairwise algorithm for computing sample variances. In COMPSTAT 1982 5th Symposium held at Toulouse 1982 (pp. 30-41). Physica, Heidelberg. https://doi.org/10.1007/978-3-642-51461-6_3.\u00a0\u21a9

    "},{"location":"api/cluster/DBSTREAM/","title":"DBSTREAM","text":"

    DBSTREAM

    DBSTREAM 1 is a clustering algorithm for evolving data streams. It is the first micro-cluster-based online clustering component that explicitely captures the density between micro-clusters via a shared density graph. The density information in the graph is then exploited for reclustering based on actual density between adjacent micro clusters.

    The algorithm is divided into two parts:

    Online micro-cluster maintenance (learning)

    For a new point p:

    • Find all micro clusters for which p falls within the fixed radius (clustering threshold). If no neighbor is found, a new micro cluster with a weight of 1 is created for p.

    • If no neighbor is found, a new micro cluster with a weight of 1 is created for p. If one or more neighbors of p are found, we update the micro clusters by applying the appropriate fading, increasing their weight and then we try to move them closer to p using the Gaussian neighborhood function.

    • Next, the shared density graph is updated. To prevent collapsing micro clusters, we will restrict the movement for micro clusters in case they come closer than \\(r\\) (clustering threshold) to each other. Finishing this process, the time stamp is also increased by 1.

    • Finally, the cleanup will be processed. It is executed every t_gap time steps, removing weak micro clusters and weak entries in the shared density graph to recover memory and improve the clustering algorithm's processing speed.

    Offline generation of macro clusters (clustering)

    The offline generation of macro clusters is generated through the two following steps:

    • The connectivity graph C is constructed using shared density entries between strong micro clusters. The edges in this connectivity graph with a connectivity value greater than the intersection threshold (\\(\\alpha\\)) are used to find connected components representing the final cluster.

    • After the connectivity graph is generated, a variant of the DBSCAN algorithm proposed by Ester et al. is applied to form all macro clusters from \\(\\alpha\\)-connected micro clusters.

    "},{"location":"api/cluster/DBSTREAM/#parameters","title":"Parameters","text":"
    • clustering_threshold

      Type \u2192 float

      Default \u2192 1.0

      DBStream represents each micro cluster by a leader (a data point defining the micro cluster's center) and the density in an area of a user-specified radius \\(r\\) (clustering_threshold) around the center.

    • fading_factor

      Type \u2192 float

      Default \u2192 0.01

      Parameter that controls the importance of historical data to current cluster. Note that fading_factor has to be different from 0.

    • cleanup_interval

      Type \u2192 float

      Default \u2192 2

      The time interval between two consecutive time points when the cleanup process is conducted.

    • intersection_factor

      Type \u2192 float

      Default \u2192 0.3

      The intersection factor related to the area of the overlap of the micro clusters relative to the area cover by micro clusters. This parameter is used to determine whether a micro cluster or a shared density is weak.

    • minimum_weight

      Type \u2192 float

      Default \u2192 1.0

      The minimum weight for a cluster to be not \"noisy\".

    "},{"location":"api/cluster/DBSTREAM/#attributes","title":"Attributes","text":"
    • n_clusters

      Number of clusters generated by the algorithm.

    • clusters

      A set of final clusters of type DBStreamMicroCluster. However, these are either micro clusters, or macro clusters that are generated by merging all \\(\\alpha\\)-connected micro clusters. This set is generated through the offline phase of the algorithm.

    • centers

      Final clusters' centers.

    • micro_clusters

      Micro clusters generated by the algorithm. Instead of updating directly the new instance points into a nearest micro cluster, through each iteration, the weight and center will be modified so that the clusters are closer to the new points, using the Gaussian neighborhood function.

    "},{"location":"api/cluster/DBSTREAM/#examples","title":"Examples","text":"

    from river import cluster\nfrom river import stream\n\nX = [\n    [1, 0.5], [1, 0.625], [1, 0.75], [1, 1.125], [1, 1.5], [1, 1.75],\n    [4, 1.5], [4, 2.25], [4, 2.5], [4, 3], [4, 3.25], [4, 3.5]\n]\n\ndbstream = cluster.DBSTREAM(\n    clustering_threshold=1.5,\n    fading_factor=0.05,\n    cleanup_interval=4,\n    intersection_factor=0.5,\n    minimum_weight=1\n)\n\nfor x, _ in stream.iter_array(X):\n    dbstream = dbstream.learn_one(x)\n\ndbstream.predict_one({0: 1, 1: 2})\n
    0\n

    dbstream.predict_one({0: 5, 1: 2})\n
    1\n

    dbstream._n_clusters\n
    2\n

    "},{"location":"api/cluster/DBSTREAM/#methods","title":"Methods","text":"learn_one

    Update the model with a set of features x.

    Parameters

    • x \u2014 'dict'
    • sample_weight \u2014 defaults to None

    Returns

    Clusterer: self

    predict_one

    Predicts the cluster number for a set of features x.

    Parameters

    • x \u2014 'dict'
    • sample_weight \u2014 defaults to None

    Returns

    int: A cluster number.

    1. Michael Hahsler and Matthew Bolanos (2016, pp 1449-1461). Clustering Data Streams Based on Shared Density between Micro-Clusters, IEEE Transactions on Knowledge and Data Engineering 28(6) . In Proceedings of the Sixth SIAM International Conference on Data Mining, April 20\u201322, 2006, Bethesda, MD, USA.\u00a0\u21a9

    2. Ester et al (1996). A Density-Based Algorithm for Discovering Clusters in Large Spatial Databases with Noise. In KDD-96 Proceedings, AAAI.\u00a0\u21a9

    "},{"location":"api/cluster/DenStream/","title":"DenStream","text":"

    DenStream

    DenStream 1 is a clustering algorithm for evolving data streams. DenStream can discover clusters with arbitrary shape and is robust against noise (outliers).

    \"Dense\" micro-clusters (named core-micro-clusters) summarise the clusters of arbitrary shape. A pruning strategy based on the concepts of potential and outlier micro-clusters guarantees the precision of the weights of the micro-clusters with limited memory.

    The algorithm is divided into two parts:

    Online micro-cluster maintenance (learning)

    For a new point p:

    • Try to merge p into either the nearest p-micro-cluster (potential), o-micro-cluster (outlier), or create a new o-micro-cluster and insert it into the outlier buffer.

    • For each T_p iterations, consider the weights of all potential and outlier micro-clusters. If their weights are smaller than a certain threshold (different for each type of micro-clusters), the micro-cluster is deleted.

    Offline generation of clusters on-demand (clustering)

    A variant of the DBSCAN algorithm 2 is used, such that all density-connected p-micro-clusters determine the final clusters. Moreover, in order for the algorithm to always be able to generate clusters, a certain number of points must be passed through the algorithm with a suitable streaming speed (number of points passed through within a unit time), indicated by n_samples_init and stream_speed.

    "},{"location":"api/cluster/DenStream/#parameters","title":"Parameters","text":"
    • decaying_factor

      Type \u2192 float

      Default \u2192 0.25

      Parameter that controls the importance of historical data to current cluster. Note that decaying_factor has to be different from 0.

    • beta

      Type \u2192 float

      Default \u2192 0.75

      Parameter to determine the threshold of outlier relative to core micro-clusters. The value of beta must be within the range (0,1].

    • mu

      Type \u2192 float

      Default \u2192 2

      Parameter to determine the threshold of outliers relative to core micro-cluster. As beta * mu must be greater than 1, mu must be within the range (1/beta, inf).

    • epsilon

      Type \u2192 float

      Default \u2192 0.02

      Defines the epsilon neighborhood

    • n_samples_init

      Type \u2192 int

      Default \u2192 1000

      Number of points to to initiqalize the online process

    • stream_speed

      Type \u2192 int

      Default \u2192 100

      Number of points arrived in unit time

    "},{"location":"api/cluster/DenStream/#attributes","title":"Attributes","text":"
    • n_clusters

      Number of clusters generated by the algorithm.

    • clusters

      A set of final clusters of type MicroCluster, which means that these cluster include all the required information, including number of points, creation time, weight, (weighted) linear sum, (weighted) square sum, center and radius.

    • p_micro_clusters

      The potential core-icro-clusters that are generated by the algorithm. When a generate cluster request arrives, these p-micro-clusters will go through a variant of the DBSCAN algorithm to determine the final clusters.

    • o_micro_clusters

      The outlier micro-clusters.

    "},{"location":"api/cluster/DenStream/#examples","title":"Examples","text":"

    The following example uses the default parameters of the algorithm to test its functionality. The set of evolving points X are designed so that clusters are easily identifiable.

    from river import cluster\nfrom river import stream\n\nX = [\n    [-1, -0.5], [-1, -0.625], [-1, -0.75], [-1, -1], [-1, -1.125],\n    [-1, -1.25], [-1.5, -0.5], [-1.5, -0.625], [-1.5, -0.75], [-1.5, -1],\n    [-1.5, -1.125], [-1.5, -1.25], [1, 1.5], [1, 1.75], [1, 2],\n    [4, 1.25], [4, 1.5], [4, 2.25], [4, 2.5], [4, 3],\n    [4, 3.25], [4, 3.5], [4, 3.75], [4, 4],\n]\n\ndenstream = cluster.DenStream(decaying_factor=0.01,\n                              beta=0.5,\n                              mu=2.5,\n                              epsilon=0.5,\n                              n_samples_init=10)\n\nfor x, _ in stream.iter_array(X):\n    denstream = denstream.learn_one(x)\n\ndenstream.predict_one({0: -1, 1: -2})\n
    0\n

    denstream.predict_one({0: 5, 1: 4})\n
    1\n

    denstream.predict_one({0: 1, 1: 1})\n
    0\n

    denstream.n_clusters\n
    2\n

    "},{"location":"api/cluster/DenStream/#methods","title":"Methods","text":"BufferItem learn_one

    Update the model with a set of features x.

    Parameters

    • x \u2014 'dict'
    • sample_weight \u2014 defaults to None

    Returns

    Clusterer: self

    predict_one

    Predicts the cluster number for a set of features x.

    Parameters

    • x \u2014 'dict'
    • sample_weight \u2014 defaults to None

    Returns

    int: A cluster number.

    1. Feng et al (2006, pp 328-339). Density-Based Clustering over an Evolving Data Stream with Noise. In Proceedings of the Sixth SIAM International Conference on Data Mining, April 20\u201322, 2006, Bethesda, MD, USA.\u00a0\u21a9

    2. Ester et al (1996). A Density-Based Algorithm for Discovering Clusters in Large Spatial Databases with Noise. In KDD-96 Proceedings, AAAI.\u00a0\u21a9

    "},{"location":"api/cluster/KMeans/","title":"KMeans","text":"

    Incremental k-means.

    The most common way to implement batch k-means is to use Lloyd's algorithm, which consists in assigning all the data points to a set of cluster centers and then moving the centers accordingly. This requires multiple passes over the data and thus isn't applicable in a streaming setting.

    In this implementation we start by finding the cluster that is closest to the current observation. We then move the cluster's central position towards the new observation. The halflife parameter determines by how much to move the cluster toward the new observation. You will get better results if you scale your data appropriately.

    "},{"location":"api/cluster/KMeans/#parameters","title":"Parameters","text":"
    • n_clusters

      Default \u2192 5

      Maximum number of clusters to assign.

    • halflife

      Default \u2192 0.5

      Amount by which to move the cluster centers, a reasonable value if between 0 and 1.

    • mu

      Default \u2192 0

      Mean of the normal distribution used to instantiate cluster positions.

    • sigma

      Default \u2192 1

      Standard deviation of the normal distribution used to instantiate cluster positions.

    • p

      Default \u2192 2

      Power parameter for the Minkowski metric. When p=1, this corresponds to the Manhattan distance, while p=2 corresponds to the Euclidean distance.

    • seed

      Type \u2192 int | None

      Default \u2192 None

      Random seed used for generating initial centroid positions.

    "},{"location":"api/cluster/KMeans/#attributes","title":"Attributes","text":"
    • centers (dict)

      Central positions of each cluster.

    "},{"location":"api/cluster/KMeans/#examples","title":"Examples","text":"

    In the following example the cluster assignments are exactly the same as when using sklearn's batch implementation. However changing the halflife parameter will produce different outputs.

    from river import cluster\nfrom river import stream\n\nX = [\n    [1, 2],\n    [1, 4],\n    [1, 0],\n    [-4, 2],\n    [-4, 4],\n    [-4, 0]\n]\n\nk_means = cluster.KMeans(n_clusters=2, halflife=0.1, sigma=3, seed=42)\n\nfor i, (x, _) in enumerate(stream.iter_array(X)):\n    k_means = k_means.learn_one(x)\n    print(f'{X[i]} is assigned to cluster {k_means.predict_one(x)}')\n
    [1, 2] is assigned to cluster 1\n[1, 4] is assigned to cluster 1\n[1, 0] is assigned to cluster 0\n[-4, 2] is assigned to cluster 1\n[-4, 4] is assigned to cluster 1\n[-4, 0] is assigned to cluster 0\n

    k_means.predict_one({0: 0, 1: 0})\n
    0\n

    k_means.predict_one({0: 4, 1: 4})\n
    1\n

    "},{"location":"api/cluster/KMeans/#methods","title":"Methods","text":"learn_one

    Update the model with a set of features x.

    Parameters

    • x \u2014 'dict'

    Returns

    Clusterer: self

    learn_predict_one

    Equivalent to k_means.learn_one(x).predict_one(x), but faster.

    Parameters

    • x

    predict_one

    Predicts the cluster number for a set of features x.

    Parameters

    • x \u2014 'dict'

    Returns

    int: A cluster number.

    1. Sequential k-Means Clustering \u21a9

    2. Sculley, D., 2010, April. Web-scale k-means clustering. In Proceedings of the 19th international conference on World wide web (pp. 1177-1178 \u21a9

    "},{"location":"api/cluster/STREAMKMeans/","title":"STREAMKMeans","text":"

    STREAMKMeans

    STREAMKMeans is an alternative version of the original algorithm STREAMLSEARCH proposed by O'Callaghan et al. 1, by replacing the k-medians using LSEARCH by the k-means algorithm.

    However, instead of using the traditional k-means, which requires a total reclustering each time the temporary chunk of data points is full, the implementation of this algorithm uses an increamental k-means.

    At first, the cluster centers are initialized with a KMeans instance. For a new point p:

    • If the size of chunk is less than the maximum size allowed, add the new point to the temporary chunk.

    • When the size of chunk reaches the maximum value size allowed

      • A new incremental KMeans instance is created. The latter will process all points in the

      temporary chunk. The centers of this new instance then become the new centers.

      • All points are deleted from the temporary chunk so that new points can be added.
    • When a prediction request arrives, the centers of the algorithm will be exactly the same as the centers of the original KMeans at the time of retrieval.

    "},{"location":"api/cluster/STREAMKMeans/#parameters","title":"Parameters","text":"
    • chunk_size

      Default \u2192 10

      Maximum size allowed for the temporary data chunk.

    • n_clusters

      Default \u2192 2

      Number of clusters generated by the algorithm.

    • kwargs

      Other parameters passed to the incremental kmeans at cluster.KMeans.

    "},{"location":"api/cluster/STREAMKMeans/#attributes","title":"Attributes","text":"
    • centers

      Cluster centers generated from running the incremental KMeans algorithm through centers of each chunk.

    "},{"location":"api/cluster/STREAMKMeans/#examples","title":"Examples","text":"

    from river import cluster\nfrom river import stream\n\nX = [\n    [1, 0.5], [1, 0.625], [1, 0.75], [1, 1.125], [1, 1.5], [1, 1.75],\n    [4, 1.5], [4, 2.25], [4, 2.5], [4, 3], [4, 3.25], [4, 3.5]\n]\n\nstreamkmeans = cluster.STREAMKMeans(chunk_size=3, n_clusters=2, halflife=0.5, sigma=1.5, seed=0)\n\nfor x, _ in stream.iter_array(X):\n    streamkmeans = streamkmeans.learn_one(x)\n\nstreamkmeans.predict_one({0: 1, 1: 0})\n
    0\n

    streamkmeans.predict_one({0: 5, 1: 2})\n
    1\n

    "},{"location":"api/cluster/STREAMKMeans/#methods","title":"Methods","text":"learn_one

    Update the model with a set of features x.

    Parameters

    • x \u2014 'dict'
    • sample_weight \u2014 defaults to None

    Returns

    Clusterer: self

    predict_one

    Predicts the cluster number for a set of features x.

    Parameters

    • x \u2014 'dict'
    • sample_weight \u2014 defaults to None

    Returns

    int: A cluster number.

    1. O'Callaghan et al. (2002). Streaming-data algorithms for high-quality clustering. In Proceedings 18th International Conference on Data Engineering, Feb 26 - March 1, San Jose, CA, USA. DOI: 10.1109/ICDE.2002.994785.\u00a0\u21a9

    "},{"location":"api/cluster/TextClust/","title":"TextClust","text":"

    textClust, a clustering algorithm for text data.

    textClust 12 is a stream clustering algorithm for textual data that can identify and track topics over time in a stream of texts. The algorithm uses a widely popular two-phase clustering approach where the stream is first summarised in real-time.

    The result is many small preliminary clusters in the stream called micro-clusters. Micro-clusters maintain enough information to update and efficiently calculate the cosine similarity between them over time, based on the TF-IDF vector of their texts. Upon request, the miro-clusters can be reclustered to generate the final result using any distance-based clustering algorithm, such as hierarchical clustering. To keep the micro-clusters up-to-date, our algorithm applies a fading strategy where micro-clusters that are not updated regularly lose relevance and are eventually removed.

    "},{"location":"api/cluster/TextClust/#parameters","title":"Parameters","text":"
    • radius

      Default \u2192 0.3

      Distance threshold to merge two micro-clusters. Must be within the range (0, 1]

    • fading_factor

      Default \u2192 0.0005

      Fading factor of micro-clusters

    • tgap

      Default \u2192 100

      Time between outlier removal

    • term_fading

      Default \u2192 True

      Determines whether individual terms should also be faded

    • real_time_fading

      Default \u2192 True

      Parameter that specifies whether natural time or the number of observations should be used for fading

    • micro_distance

      Default \u2192 tfidf_cosine_distance

      Distance metric used for clustering macro-clusters

    • macro_distance

      Default \u2192 tfidf_cosine_distance

      Distance metric used for clustering macro-clusters

    • num_macro

      Default \u2192 3

      Number of macro clusters that should be identified during the reclustering phase

    • min_weight

      Default \u2192 0

      Minimum weight of micro clusters to be used for reclustering

    • auto_r

      Default \u2192 False

      Parameter that specifies if radius should be automatically updated

    • auto_merge

      Default \u2192 True

      Determines, if close observations shall be merged together

    • sigma

      Default \u2192 1

      Parameter that influences the automated trheshold adaption technique

    "},{"location":"api/cluster/TextClust/#attributes","title":"Attributes","text":"
    • micro_clusters

      Micro-clusters generated by the algorithm. Micro-clusters are of type textclust.microcluster

    "},{"location":"api/cluster/TextClust/#examples","title":"Examples","text":"

    from river import compose\nfrom river import feature_extraction\nfrom river import metrics\nfrom river import cluster\n\ncorpus = [\n   {\"text\":'This is the first document.',\"idd\":1, \"cluster\": 1, \"cluster\":1},\n   {\"text\":'This document is the second document.',\"idd\":2,\"cluster\": 1},\n   {\"text\":'And this is super unrelated.',\"idd\":3,\"cluster\": 2},\n   {\"text\":'Is this the first document?',\"idd\":4,\"cluster\": 1},\n   {\"text\":'This is super unrelated as well',\"idd\":5,\"cluster\": 2},\n   {\"text\":'Test text',\"idd\":6,\"cluster\": 5}\n]\n\nstopwords = [ 'stop', 'the', 'to', 'and', 'a', 'in', 'it', 'is', 'I']\n\nmetric = metrics.AdjustedRand()\n\nmodel = compose.Pipeline(\n    feature_extraction.BagOfWords(lowercase=True, ngram_range=(1, 2), stop_words=stopwords),\n    cluster.TextClust(real_time_fading=False, fading_factor=0.001, tgap=100, auto_r=True,\n    radius=0.9)\n)\n\nfor x in corpus:\n    y_pred = model.predict_one(x[\"text\"])\n    y = x[\"cluster\"]\n    metric = metric.update(y,y_pred)\n    model = model.learn_one(x[\"text\"])\n\nprint(metric)\n
    AdjustedRand: -0.17647058823529413\n

    "},{"location":"api/cluster/TextClust/#methods","title":"Methods","text":"distances get_assignment get_macroclusters learn_one

    Update the model with a set of features x.

    Parameters

    • x \u2014 'dict'
    • t \u2014 defaults to None
    • sample_weight \u2014 defaults to None

    Returns

    Clusterer: self

    microcluster predict_one

    Predicts the cluster number for a set of features x.

    Parameters

    • x \u2014 'dict'
    • sample_weight \u2014 defaults to None
    • type \u2014 defaults to micro

    Returns

    int: A cluster number.

    showclusters tfcontainer updateMacroClusters
    1. Assenmacher, D. und Trautmann, H. (2022). Textual One-Pass Stream Clustering with Automated Distance Threshold Adaption. In: Asian Conference on Intelligent Information and Database Systems (Accepted)\u00a0\u21a9

    2. Carnein, M., Assenmacher, D., Trautmann, H. (2017). Stream Clustering of Chat Messages with Applications to Twitch Streams. In: Advances in Conceptual Modeling. ER 2017.\u00a0\u21a9

    "},{"location":"api/compat/River2SKLClassifier/","title":"River2SKLClassifier","text":"

    Compatibility layer from River to scikit-learn for classification.

    "},{"location":"api/compat/River2SKLClassifier/#parameters","title":"Parameters","text":"
    • river_estimator

      Type \u2192 base.Classifier

    "},{"location":"api/compat/River2SKLClassifier/#methods","title":"Methods","text":"fit

    Fits to an entire dataset contained in memory.

    Parameters

    • X
    • y

    Returns

    self

    get_metadata_routing

    Get metadata routing of this object.

    Please check :ref:User Guide <metadata_routing> on how the routing mechanism works.

    Returns

    MetadataRequest

    get_params

    Get parameters for this estimator.

    Parameters

    • deep \u2014 defaults to True

    Returns

    dict

    partial_fit

    Fits incrementally on a portion of a dataset.

    Parameters

    • X
    • y
    • classes \u2014 defaults to None

    Returns

    self

    predict

    Predicts the target of an entire dataset contained in memory.

    Parameters

    • X

    Returns

    Predicted target values for each row of X.

    predict_proba

    Predicts the target probability of an entire dataset contained in memory.

    Parameters

    • X

    Returns

    Predicted target values for each row of X.

    score

    Return the mean accuracy on the given test data and labels.

    In multi-label classification, this is the subset accuracy which is a harsh metric since you require for each sample that each label set be correctly predicted.

    Parameters

    • X
    • y
    • sample_weight \u2014 defaults to None

    Returns

    float

    set_params

    Set the parameters of this estimator.

    The method works on simple estimators as well as on nested objects (such as :class:~sklearn.pipeline.Pipeline). The latter have parameters of the form <component>__<parameter> so that it's possible to update each component of a nested object.

    Parameters

    • params

    Returns

    estimator instance

    set_partial_fit_request

    Request metadata passed to the partial_fit method.

    Note that this method is only relevant if enable_metadata_routing=True (see :func:sklearn.set_config). Please see :ref:User Guide <metadata_routing> on how the routing mechanism works. The options for each parameter are: - True: metadata is requested, and passed to partial_fit if provided. The request is ignored if metadata is not provided. - False: metadata is not requested and the meta-estimator will not pass it to partial_fit. - None: metadata is not requested, and the meta-estimator will raise an error if the user provides it. - str: metadata should be passed to the meta-estimator with this given alias instead of the original name. The default (sklearn.utils.metadata_routing.UNCHANGED) retains the existing request. This allows you to change the request for some parameters and not others. .. versionadded:: 1.3 .. note:: This method is only relevant if this estimator is used as a sub-estimator of a meta-estimator, e.g. used inside a :class:pipeline.Pipeline. Otherwise it has no effect.

    Parameters

    • classes \u2014 Union[bool, NoneType, str] \u2014 defaults to $UNCHANGED$

    Returns

    River2SKLClassifier: object

    set_score_request

    Request metadata passed to the score method.

    Note that this method is only relevant if enable_metadata_routing=True (see :func:sklearn.set_config). Please see :ref:User Guide <metadata_routing> on how the routing mechanism works. The options for each parameter are: - True: metadata is requested, and passed to score if provided. The request is ignored if metadata is not provided. - False: metadata is not requested and the meta-estimator will not pass it to score. - None: metadata is not requested, and the meta-estimator will raise an error if the user provides it. - str: metadata should be passed to the meta-estimator with this given alias instead of the original name. The default (sklearn.utils.metadata_routing.UNCHANGED) retains the existing request. This allows you to change the request for some parameters and not others. .. versionadded:: 1.3 .. note:: This method is only relevant if this estimator is used as a sub-estimator of a meta-estimator, e.g. used inside a :class:pipeline.Pipeline. Otherwise it has no effect.

    Parameters

    • sample_weight \u2014 Union[bool, NoneType, str] \u2014 defaults to $UNCHANGED$

    Returns

    River2SKLClassifier: object

    "},{"location":"api/compat/River2SKLClusterer/","title":"River2SKLClusterer","text":"

    Compatibility layer from River to scikit-learn for clustering.

    "},{"location":"api/compat/River2SKLClusterer/#parameters","title":"Parameters","text":"
    • river_estimator

      Type \u2192 base.Clusterer

    "},{"location":"api/compat/River2SKLClusterer/#methods","title":"Methods","text":"fit

    Fits to an entire dataset contained in memory.

    Parameters

    • X
    • y \u2014 defaults to None

    Returns

    self

    fit_predict

    Perform clustering on X and returns cluster labels.

    Parameters

    • X
    • y \u2014 defaults to None

    Returns

    ndarray of shape (n_samples,), dtype=np.int64

    get_metadata_routing

    Get metadata routing of this object.

    Please check :ref:User Guide <metadata_routing> on how the routing mechanism works.

    Returns

    MetadataRequest

    get_params

    Get parameters for this estimator.

    Parameters

    • deep \u2014 defaults to True

    Returns

    dict

    partial_fit

    Fits incrementally on a portion of a dataset.

    Parameters

    • X
    • y

    Returns

    self

    predict

    Predicts the target of an entire dataset contained in memory.

    Parameters

    • X

    Returns

    Transformed output.

    set_params

    Set the parameters of this estimator.

    The method works on simple estimators as well as on nested objects (such as :class:~sklearn.pipeline.Pipeline). The latter have parameters of the form <component>__<parameter> so that it's possible to update each component of a nested object.

    Parameters

    • params

    Returns

    estimator instance

    "},{"location":"api/compat/River2SKLRegressor/","title":"River2SKLRegressor","text":"

    Compatibility layer from River to scikit-learn for regression.

    "},{"location":"api/compat/River2SKLRegressor/#parameters","title":"Parameters","text":"
    • river_estimator

      Type \u2192 base.Regressor

    "},{"location":"api/compat/River2SKLRegressor/#methods","title":"Methods","text":"fit

    Fits to an entire dataset contained in memory.

    Parameters

    • X
    • y

    Returns

    self

    get_metadata_routing

    Get metadata routing of this object.

    Please check :ref:User Guide <metadata_routing> on how the routing mechanism works.

    Returns

    MetadataRequest

    get_params

    Get parameters for this estimator.

    Parameters

    • deep \u2014 defaults to True

    Returns

    dict

    partial_fit

    Fits incrementally on a portion of a dataset.

    Parameters

    • X
    • y

    Returns

    self

    predict

    Predicts the target of an entire dataset contained in memory.

    Parameters

    • X

    Returns

    np.ndarray: Predicted target values for each row of X.

    score

    Return the coefficient of determination of the prediction.

    The coefficient of determination :math:R^2 is defined as :math:(1 - \\frac{u}{v}), where :math:u is the residual sum of squares ((y_true - y_pred)** 2).sum() and :math:v is the total sum of squares ((y_true - y_true.mean()) ** 2).sum(). The best possible score is 1.0 and it can be negative (because the model can be arbitrarily worse). A constant model that always predicts the expected value of y, disregarding the input features, would get a :math:R^2 score of 0.0.

    Parameters

    • X
    • y
    • sample_weight \u2014 defaults to None

    Returns

    float

    set_params

    Set the parameters of this estimator.

    The method works on simple estimators as well as on nested objects (such as :class:~sklearn.pipeline.Pipeline). The latter have parameters of the form <component>__<parameter> so that it's possible to update each component of a nested object.

    Parameters

    • params

    Returns

    estimator instance

    set_score_request

    Request metadata passed to the score method.

    Note that this method is only relevant if enable_metadata_routing=True (see :func:sklearn.set_config). Please see :ref:User Guide <metadata_routing> on how the routing mechanism works. The options for each parameter are: - True: metadata is requested, and passed to score if provided. The request is ignored if metadata is not provided. - False: metadata is not requested and the meta-estimator will not pass it to score. - None: metadata is not requested, and the meta-estimator will raise an error if the user provides it. - str: metadata should be passed to the meta-estimator with this given alias instead of the original name. The default (sklearn.utils.metadata_routing.UNCHANGED) retains the existing request. This allows you to change the request for some parameters and not others. .. versionadded:: 1.3 .. note:: This method is only relevant if this estimator is used as a sub-estimator of a meta-estimator, e.g. used inside a :class:pipeline.Pipeline. Otherwise it has no effect.

    Parameters

    • sample_weight \u2014 Union[bool, NoneType, str] \u2014 defaults to $UNCHANGED$

    Returns

    River2SKLRegressor: object

    "},{"location":"api/compat/River2SKLTransformer/","title":"River2SKLTransformer","text":"

    Compatibility layer from River to scikit-learn for transformation.

    "},{"location":"api/compat/River2SKLTransformer/#parameters","title":"Parameters","text":"
    • river_estimator

      Type \u2192 base.Transformer

    "},{"location":"api/compat/River2SKLTransformer/#methods","title":"Methods","text":"fit

    Fits to an entire dataset contained in memory.

    Parameters

    • X
    • y \u2014 defaults to None

    Returns

    self

    fit_transform

    Fit to data, then transform it.

    Fits transformer to X and y with optional parameters fit_params and returns a transformed version of X.

    Parameters

    • X
    • y \u2014 defaults to None
    • fit_params

    Returns

    ndarray array of shape (n_samples, n_features_new)

    get_metadata_routing

    Get metadata routing of this object.

    Please check :ref:User Guide <metadata_routing> on how the routing mechanism works.

    Returns

    MetadataRequest

    get_params

    Get parameters for this estimator.

    Parameters

    • deep \u2014 defaults to True

    Returns

    dict

    partial_fit

    Fits incrementally on a portion of a dataset.

    Parameters

    • X
    • y \u2014 defaults to None

    Returns

    self

    set_output

    Set output container.

    See :ref:sphx_glr_auto_examples_miscellaneous_plot_set_output.py for an example on how to use the API.

    Parameters

    • transform \u2014 defaults to None

    Returns

    estimator instance

    set_params

    Set the parameters of this estimator.

    The method works on simple estimators as well as on nested objects (such as :class:~sklearn.pipeline.Pipeline). The latter have parameters of the form <component>__<parameter> so that it's possible to update each component of a nested object.

    Parameters

    • params

    Returns

    estimator instance

    transform

    Predicts the target of an entire dataset contained in memory.

    Parameters

    • X

    Returns

    Transformed output.

    "},{"location":"api/compat/SKL2RiverClassifier/","title":"SKL2RiverClassifier","text":"

    Compatibility layer from scikit-learn to River for classification.

    "},{"location":"api/compat/SKL2RiverClassifier/#parameters","title":"Parameters","text":"
    • estimator

      Type \u2192 sklearn_base.ClassifierMixin

      A scikit-learn regressor which has a partial_fit method.

    • classes

      Type \u2192 list

    "},{"location":"api/compat/SKL2RiverClassifier/#examples","title":"Examples","text":"

    from river import compat\nfrom river import evaluate\nfrom river import metrics\nfrom river import preprocessing\nfrom river import stream\nfrom sklearn import linear_model\nfrom sklearn import datasets\n\ndataset = stream.iter_sklearn_dataset(\n    dataset=datasets.load_breast_cancer(),\n    shuffle=True,\n    seed=42\n)\n\nmodel = preprocessing.StandardScaler()\nmodel |= compat.convert_sklearn_to_river(\n    estimator=linear_model.SGDClassifier(\n        loss='log_loss',\n        eta0=0.01,\n        learning_rate='constant'\n    ),\n    classes=[False, True]\n)\n\nmetric = metrics.LogLoss()\n\nevaluate.progressive_val_score(dataset, model, metric)\n
    LogLoss: 0.198029\n

    "},{"location":"api/compat/SKL2RiverClassifier/#methods","title":"Methods","text":"learn_many learn_one

    Update the model with a set of features x and a label y.

    Parameters

    • x
    • y

    Returns

    self

    predict_many predict_one

    Predict the label of a set of features x.

    Parameters

    • x

    Returns

    The predicted label.

    predict_proba_many predict_proba_one

    Predict the probability of each label for a dictionary of features x.

    Parameters

    • x

    Returns

    A dictionary that associates a probability which each label.

    "},{"location":"api/compat/SKL2RiverRegressor/","title":"SKL2RiverRegressor","text":"

    Compatibility layer from scikit-learn to River for regression.

    "},{"location":"api/compat/SKL2RiverRegressor/#parameters","title":"Parameters","text":"
    • estimator

      Type \u2192 sklearn_base.BaseEstimator

      A scikit-learn transformer which has a partial_fit method.

    "},{"location":"api/compat/SKL2RiverRegressor/#examples","title":"Examples","text":"

    from river import compat\nfrom river import evaluate\nfrom river import metrics\nfrom river import preprocessing\nfrom river import stream\nfrom sklearn import linear_model\nfrom sklearn import datasets\n\ndataset = stream.iter_sklearn_dataset(\n    dataset=datasets.load_diabetes(),\n    shuffle=True,\n    seed=42\n)\n\nscaler = preprocessing.StandardScaler()\nsgd_reg = compat.convert_sklearn_to_river(linear_model.SGDRegressor())\nmodel = scaler | sgd_reg\n\nmetric = metrics.MAE()\n\nevaluate.progressive_val_score(dataset, model, metric)\n
    MAE: 84.501421\n

    "},{"location":"api/compat/SKL2RiverRegressor/#methods","title":"Methods","text":"learn_many learn_one

    Fits to a set of features x and a real-valued target y.

    Parameters

    • x
    • y

    Returns

    self

    predict_many predict_one

    Predict the output of features x.

    Parameters

    • x

    Returns

    The prediction.

    "},{"location":"api/compat/convert-river-to-sklearn/","title":"convert_river_to_sklearn","text":"

    Wraps a river estimator to make it compatible with scikit-learn.

    "},{"location":"api/compat/convert-river-to-sklearn/#parameters","title":"Parameters","text":"
    • estimator

      Type \u2192 base.Estimator

    "},{"location":"api/compat/convert-sklearn-to-river/","title":"convert_sklearn_to_river","text":"

    Wraps a scikit-learn estimator to make it compatible with river.

    "},{"location":"api/compat/convert-sklearn-to-river/#parameters","title":"Parameters","text":"
    • estimator

      Type \u2192 sklearn_base.BaseEstimator

    • classes

      Type \u2192 list | None

      Default \u2192 None

      Class names necessary for classifiers.

    "},{"location":"api/compose/Discard/","title":"Discard","text":"

    Removes features.

    This can be used in a pipeline when you want to remove certain features. The transform_one method is pure, and therefore returns a fresh new dictionary instead of removing the specified keys from the input.

    "},{"location":"api/compose/Discard/#parameters","title":"Parameters","text":"
    • keys

      Type \u2192 tuple[base.typing.FeatureName]

      Key(s) to discard.

    "},{"location":"api/compose/Discard/#examples","title":"Examples","text":"

    from river import compose\n\nx = {'a': 42, 'b': 12, 'c': 13}\ncompose.Discard('a', 'b').transform_one(x)\n
    {'c': 13}\n

    You can chain a discarder with any estimator in order to apply said estimator to the desired features.

    from river import feature_extraction as fx\n\nx = {'sales': 10, 'shop': 'Ikea', 'country': 'Sweden'}\n\npipeline = (\n    compose.Discard('shop', 'country') |\n    fx.PolynomialExtender()\n)\npipeline.transform_one(x)\n
    {'sales': 10, 'sales*sales': 100}\n

    "},{"location":"api/compose/Discard/#methods","title":"Methods","text":"learn_one

    Update with a set of features x.

    A lot of transformers don't actually have to do anything during the learn_one step because they are stateless. For this reason the default behavior of this function is to do nothing. Transformers that however do something during the learn_one can override this method.

    Parameters

    • x \u2014 'dict'

    Returns

    Transformer: self

    transform_one

    Transform a set of features x.

    Parameters

    • x \u2014 'dict'

    Returns

    dict: The transformed values.

    "},{"location":"api/compose/FuncTransformer/","title":"FuncTransformer","text":"

    Wraps a function to make it usable in a pipeline.

    There is often a need to apply an arbitrary transformation to a set of features. For instance, this could involve parsing a date and then extracting the hour from said date. If you're processing a stream of data, then you can do this yourself by calling the necessary code at your leisure. On the other hand, if you want to do this as part of a pipeline, then you need to follow a simple convention.

    To use a function as part of a pipeline, take as input a dict of features and output a dict. Once you have initialized this class with your function, then you can use it like you would use any other (unsupervised) transformer.

    It is up to you if you want your function to be pure or not. By pure we refer to a function that doesn't modify its input. However, we recommend writing pure functions because this reduces the chances of inserting bugs into your pipeline.

    "},{"location":"api/compose/FuncTransformer/#parameters","title":"Parameters","text":"
    • func

      Type \u2192 typing.Callable[[dict], dict]

      A function that takes as input a dict and outputs a dict.

    "},{"location":"api/compose/FuncTransformer/#examples","title":"Examples","text":"

    from pprint import pprint\nimport datetime as dt\nfrom river import compose\n\nx = {'date': '2019-02-14'}\n\ndef parse_date(x):\n    date = dt.datetime.strptime(x['date'], '%Y-%m-%d')\n    x['is_weekend'] = date.day in (5, 6)\n    x['hour'] = date.hour\n    return x\n\nt = compose.FuncTransformer(parse_date)\npprint(t.transform_one(x))\n
    {'date': '2019-02-14', 'hour': 0, 'is_weekend': False}\n

    The above example is not pure because it modifies the input. The following example is pure and produces the same output:

    def parse_date(x):\n    date = dt.datetime.strptime(x['date'], '%Y-%m-%d')\n    return {'is_weekend': date.day in (5, 6), 'hour': date.hour}\n\nt = compose.FuncTransformer(parse_date)\npprint(t.transform_one(x))\n
    {'hour': 0, 'is_weekend': False}\n

    The previous example doesn't include the date feature because it returns a new dict. However, a common usecase is to add a feature to an existing set of features. You can do this in a pure way by unpacking the input dict into the output dict:

    def parse_date(x):\n    date = dt.datetime.strptime(x['date'], '%Y-%m-%d')\n    return {'is_weekend': date.day in (5, 6), 'hour': date.hour, **x}\n\nt = compose.FuncTransformer(parse_date)\npprint(t.transform_one(x))\n
    {'date': '2019-02-14', 'hour': 0, 'is_weekend': False}\n

    You can add FuncTransformer to a pipeline just like you would with any other transformer.

    from river import naive_bayes\n\npipeline = compose.FuncTransformer(parse_date) | naive_bayes.MultinomialNB()\npipeline\n
    Pipeline (\n  FuncTransformer (\n    func=\"parse_date\"\n  ),\n  MultinomialNB (\n    alpha=1.\n  )\n)\n

    If you provide a function without wrapping it, then the pipeline will do it for you:

    pipeline = parse_date | naive_bayes.MultinomialNB()\n
    "},{"location":"api/compose/FuncTransformer/#methods","title":"Methods","text":"learn_many

    Update with a mini-batch of features.

    A lot of transformers don't actually have to do anything during the learn_many step because they are stateless. For this reason the default behavior of this function is to do nothing. Transformers that however do something during the learn_many can override this method.

    Parameters

    • X \u2014 'pd.DataFrame'

    Returns

    Transformer: self

    learn_one

    Update with a set of features x.

    A lot of transformers don't actually have to do anything during the learn_one step because they are stateless. For this reason the default behavior of this function is to do nothing. Transformers that however do something during the learn_one can override this method.

    Parameters

    • x \u2014 'dict'

    Returns

    Transformer: self

    transform_many

    Transform a mini-batch of features.

    Parameters

    • X \u2014 'pd.DataFrame'

    Returns

    pd.DataFrame: A new DataFrame.

    transform_one

    Transform a set of features x.

    Parameters

    • x \u2014 'dict'

    Returns

    dict: The transformed values.

    "},{"location":"api/compose/Grouper/","title":"Grouper","text":"

    Applies a transformer within different groups.

    This transformer allows you to split your data into groups and apply a transformer within each group. This happens in a streaming manner, which means that the groups are discovered online. A separate copy of the provided transformer is made whenever a new group appears. The groups are defined according to one or more keys.

    "},{"location":"api/compose/Grouper/#parameters","title":"Parameters","text":"
    • transformer

      Type \u2192 base.Transformer

    • by

      Type \u2192 base.typing.FeatureName | list[base.typing.FeatureName]

      The field on which to group the data. This can either by a single value, or a list of values.

    "},{"location":"api/compose/Grouper/#methods","title":"Methods","text":"learn_one

    Update with a set of features x.

    A lot of transformers don't actually have to do anything during the learn_one step because they are stateless. For this reason the default behavior of this function is to do nothing. Transformers that however do something during the learn_one can override this method.

    Parameters

    • x \u2014 'dict'

    Returns

    Transformer: self

    transform_one

    Transform a set of features x.

    Parameters

    • x \u2014 'dict'

    Returns

    dict: The transformed values.

    "},{"location":"api/compose/Pipeline/","title":"Pipeline","text":"

    A pipeline of estimators.

    Pipelines allow you to chain different steps into a sequence. Typically, when doing supervised learning, a pipeline contains one ore more transformation steps, whilst it's is a regressor or a classifier. It is highly recommended to use pipelines with River. Indeed, in an online learning setting, it is very practical to have a model defined as a single object. Take a look at the user guide for further information and practical examples.

    One special thing to take notice to is the way transformers are handled. It is usual to predict something for a sample and wait for the ground truth to arrive. In such a scenario, the features are seen before the ground truth arrives. Therefore, the unsupervised parts of the pipeline are updated when predict_one and predict_proba_one are called. Usually the unsupervised parts of the pipeline are all the steps that precede the final step, which is a supervised model. However, some transformers are supervised and are therefore also updated during calls to learn_one.

    "},{"location":"api/compose/Pipeline/#parameters","title":"Parameters","text":"
    • steps

      Ideally, a list of (name, estimator) tuples. A name is automatically inferred if none is provided.

    "},{"location":"api/compose/Pipeline/#examples","title":"Examples","text":"

    The recommended way to declare a pipeline is to use the | operator. The latter allows you to chain estimators in a very terse manner:

    from river import linear_model\nfrom river import preprocessing\n\nscaler = preprocessing.StandardScaler()\nlog_reg = linear_model.LinearRegression()\nmodel = scaler | log_reg\n

    This results in a pipeline that stores each step inside a dictionary.

    model\n
    Pipeline (\n  StandardScaler (\n    with_std=True\n  ),\n  LinearRegression (\n    optimizer=SGD (\n      lr=Constant (\n        learning_rate=0.01\n      )\n    )\n    loss=Squared ()\n    l2=0.\n    l1=0.\n    intercept_init=0.\n    intercept_lr=Constant (\n      learning_rate=0.01\n    )\n    clip_gradient=1e+12\n    initializer=Zeros ()\n  )\n)\n

    You can access parts of a pipeline in the same manner as a dictionary:

    model['LinearRegression']\n
    LinearRegression (\n  optimizer=SGD (\n    lr=Constant (\n      learning_rate=0.01\n    )\n  )\n  loss=Squared ()\n  l2=0.\n  l1=0.\n  intercept_init=0.\n  intercept_lr=Constant (\n    learning_rate=0.01\n  )\n  clip_gradient=1e+12\n  initializer=Zeros ()\n)\n

    Note that you can also declare a pipeline by using the compose.Pipeline constructor method, which is slightly more verbose:

    from river import compose\n\nmodel = compose.Pipeline(scaler, log_reg)\n

    By using a compose.TransformerUnion, you can define complex pipelines that apply different steps to different parts of the data. For instance, we can extract word counts from text data, and extract polynomial features from numeric data.

    from river import feature_extraction as fx\n\ntfidf = fx.TFIDF('text')\ncounts = fx.BagOfWords('text')\ntext_part = compose.Select('text') | (tfidf + counts)\n\nnum_part = compose.Select('a', 'b') | fx.PolynomialExtender()\n\nmodel = text_part + num_part\nmodel |= preprocessing.StandardScaler()\nmodel |= linear_model.LinearRegression()\n

    The following shows an example of using debug_one to visualize how the information flows and changes throughout the pipeline.

    from river import compose\nfrom river import naive_bayes\n\ndataset = [\n    ('A positive comment', True),\n    ('A negative comment', False),\n    ('A happy comment', True),\n    ('A lovely comment', True),\n    ('A harsh comment', False)\n]\n\ntfidf = fx.TFIDF() | compose.Prefixer('tfidf_')\ncounts = fx.BagOfWords() | compose.Prefixer('count_')\nmnb = naive_bayes.MultinomialNB()\nmodel = (tfidf + counts) | mnb\n\nfor x, y in dataset:\n    model = model.learn_one(x, y)\n\nx = dataset[0][0]\nreport = model.debug_one(dataset[0][0])\nprint(report)\n
    0. Input\n--------\nA positive comment\n1. Transformer union\n--------------------\n    1.0 TFIDF | Prefixer\n    --------------------\n    tfidf_comment: 0.43017 (float)\n    tfidf_positive: 0.90275 (float)\n    1.1 BagOfWords | Prefixer\n    -------------------------\n    count_comment: 1 (int)\n    count_positive: 1 (int)\ncount_comment: 1 (int)\ncount_positive: 1 (int)\ntfidf_comment: 0.43017 (float)\ntfidf_positive: 0.90275 (float)\n2. MultinomialNB\n----------------\nFalse: 0.19221\nTrue: 0.80779\n

    "},{"location":"api/compose/Pipeline/#methods","title":"Methods","text":"debug_one

    Displays the state of a set of features as it goes through the pipeline.

    Parameters

    • x \u2014 'dict'
    • show_types \u2014 defaults to True
    • n_decimals \u2014 defaults to 5

    forecast

    Return a forecast.

    Only works if each estimator has a transform_one method and the final estimator has a forecast method. This is the case of time series models from the time_series module.

    Parameters

    • horizon \u2014 'int'
    • xs \u2014 'list[dict] | None' \u2014 defaults to None

    learn_many

    Fit to a mini-batch.

    Parameters

    • X \u2014 'pd.DataFrame'
    • y \u2014 'pd.Series | None' \u2014 defaults to None
    • params

    learn_one

    Fit to a single instance.

    Parameters

    • x \u2014 'dict'
    • y \u2014 defaults to None
    • params

    predict_many

    Call transform_many, and then predict_many on the final step.

    Parameters

    • X \u2014 'pd.DataFrame'

    predict_one

    Call transform_one on the first steps and predict_one on the last step.

    Parameters

    • x \u2014 'dict'
    • params

    predict_proba_many

    Call transform_many, and then predict_proba_many on the final step.

    Parameters

    • X \u2014 'pd.DataFrame'

    predict_proba_one

    Call transform_one on the first steps and predict_proba_one on the last step.

    Parameters

    • x \u2014 'dict'
    • params

    score_one

    Call transform_one on the first steps and score_one on the last step.

    Parameters

    • x \u2014 'dict'
    • params

    transform_many

    Apply each transformer in the pipeline to some features.

    The final step in the pipeline will be applied if it is a transformer. If not, then it will be ignored and the output from the penultimate step will be returned. Note that the steps that precede the final step are assumed to all be transformers.

    Parameters

    • X \u2014 'pd.DataFrame'

    transform_one

    Apply each transformer in the pipeline to some features.

    The final step in the pipeline will be applied if it is a transformer. If not, then it will be ignored and the output from the penultimate step will be returned. Note that the steps that precede the final step are assumed to all be transformers.

    Parameters

    • x \u2014 'dict'
    • params

    "},{"location":"api/compose/Prefixer/","title":"Prefixer","text":"

    Prepends a prefix on features names.

    "},{"location":"api/compose/Prefixer/#parameters","title":"Parameters","text":"
    • prefix

      Type \u2192 str

    "},{"location":"api/compose/Prefixer/#examples","title":"Examples","text":"

    from river import compose\n\nx = {'a': 42, 'b': 12}\ncompose.Prefixer('prefix_').transform_one(x)\n
    {'prefix_a': 42, 'prefix_b': 12}\n

    "},{"location":"api/compose/Prefixer/#methods","title":"Methods","text":"learn_one

    Update with a set of features x.

    A lot of transformers don't actually have to do anything during the learn_one step because they are stateless. For this reason the default behavior of this function is to do nothing. Transformers that however do something during the learn_one can override this method.

    Parameters

    • x \u2014 'dict'

    Returns

    Transformer: self

    transform_one

    Transform a set of features x.

    Parameters

    • x \u2014 'dict'

    Returns

    dict: The transformed values.

    "},{"location":"api/compose/Renamer/","title":"Renamer","text":"

    Renames features following substitution rules.

    "},{"location":"api/compose/Renamer/#parameters","title":"Parameters","text":"
    • mapping

      Type \u2192 dict[str, str]

      Dictionnary describing substitution rules. Keys in mapping that are not a feature's name are silently ignored.

    "},{"location":"api/compose/Renamer/#examples","title":"Examples","text":"

    from river import compose\n\nmapping = {'a': 'v', 'c': 'o'}\nx = {'a': 42, 'b': 12}\ncompose.Renamer(mapping).transform_one(x)\n
    {'b': 12, 'v': 42}\n

    "},{"location":"api/compose/Renamer/#methods","title":"Methods","text":"learn_one

    Update with a set of features x.

    A lot of transformers don't actually have to do anything during the learn_one step because they are stateless. For this reason the default behavior of this function is to do nothing. Transformers that however do something during the learn_one can override this method.

    Parameters

    • x \u2014 'dict'

    Returns

    Transformer: self

    transform_one

    Transform a set of features x.

    Parameters

    • x \u2014 'dict'

    Returns

    dict: The transformed values.

    "},{"location":"api/compose/Select/","title":"Select","text":"

    Selects features.

    This can be used in a pipeline when you want to select certain features. The transform_one method is pure, and therefore returns a fresh new dictionary instead of filtering the specified keys from the input.

    "},{"location":"api/compose/Select/#parameters","title":"Parameters","text":"
    • keys

      Type \u2192 tuple[base.typing.FeatureName]

      Key(s) to keep.

    "},{"location":"api/compose/Select/#examples","title":"Examples","text":"

    from river import compose\n\nx = {'a': 42, 'b': 12, 'c': 13}\ncompose.Select('c').transform_one(x)\n
    {'c': 13}\n

    You can chain a selector with any estimator in order to apply said estimator to the desired features.

    from river import feature_extraction as fx\n\nx = {'sales': 10, 'shop': 'Ikea', 'country': 'Sweden'}\n\npipeline = (\n    compose.Select('sales') |\n    fx.PolynomialExtender()\n)\npipeline.transform_one(x)\n
    {'sales': 10, 'sales*sales': 100}\n

    This transformer also supports mini-batch processing:

    import random\nfrom river import compose\n\nrandom.seed(42)\nX = [{\"x_1\": random.uniform(8, 12), \"x_2\": random.uniform(8, 12)} for _ in range(6)]\nfor x in X:\n    print(x)\n
    {'x_1': 10.557707193831535, 'x_2': 8.100043020890668}\n{'x_1': 9.100117273476478, 'x_2': 8.892842952595291}\n{'x_1': 10.94588485665605, 'x_2': 10.706797949691644}\n{'x_1': 11.568718270819382, 'x_2': 8.347755330517664}\n{'x_1': 9.687687278741082, 'x_2': 8.119188877752281}\n{'x_1': 8.874551899214413, 'x_2': 10.021421152413449}\n

    import pandas as pd\nX = pd.DataFrame.from_dict(X)\n

    You can then call transform_many to transform a mini-batch of features:

    compose.Select('x_2').transform_many(X)\n
        x_2\n0   8.100043\n1   8.892843\n2  10.706798\n3   8.347755\n4   8.119189\n5  10.021421\n

    "},{"location":"api/compose/Select/#methods","title":"Methods","text":"learn_many

    Update with a mini-batch of features.

    A lot of transformers don't actually have to do anything during the learn_many step because they are stateless. For this reason the default behavior of this function is to do nothing. Transformers that however do something during the learn_many can override this method.

    Parameters

    • X \u2014 'pd.DataFrame'

    Returns

    Transformer: self

    learn_one

    Update with a set of features x.

    A lot of transformers don't actually have to do anything during the learn_one step because they are stateless. For this reason the default behavior of this function is to do nothing. Transformers that however do something during the learn_one can override this method.

    Parameters

    • x \u2014 'dict'

    Returns

    Transformer: self

    transform_many

    Transform a mini-batch of features.

    Parameters

    • X \u2014 'pd.DataFrame'

    Returns

    pd.DataFrame: A new DataFrame.

    transform_one

    Transform a set of features x.

    Parameters

    • x \u2014 'dict'

    Returns

    dict: The transformed values.

    "},{"location":"api/compose/SelectType/","title":"SelectType","text":"

    Selects features based on their type.

    This is practical when you want to apply different preprocessing steps to different kinds of features. For instance, a common usecase is to apply a preprocessing.StandardScaler to numeric features and a preprocessing.OneHotEncoder to categorical features.

    "},{"location":"api/compose/SelectType/#parameters","title":"Parameters","text":"
    • types

      Type \u2192 tuple[type]

      Python types which you want to select. Under the hood, the isinstance method will be used to check if a value is of a given type.

    "},{"location":"api/compose/SelectType/#examples","title":"Examples","text":"
    import numbers\nfrom river import compose\nfrom river import linear_model\nfrom river import preprocessing\n\nnum = compose.SelectType(numbers.Number) | preprocessing.StandardScaler()\ncat = compose.SelectType(str) | preprocessing.OneHotEncoder()\nmodel = (num + cat) | linear_model.LogisticRegression()\n
    "},{"location":"api/compose/SelectType/#methods","title":"Methods","text":"learn_one

    Update with a set of features x.

    A lot of transformers don't actually have to do anything during the learn_one step because they are stateless. For this reason the default behavior of this function is to do nothing. Transformers that however do something during the learn_one can override this method.

    Parameters

    • x \u2014 'dict'

    Returns

    Transformer: self

    transform_one

    Transform a set of features x.

    Parameters

    • x \u2014 'dict'

    Returns

    dict: The transformed values.

    "},{"location":"api/compose/Suffixer/","title":"Suffixer","text":"

    Appends a suffix on features names.

    "},{"location":"api/compose/Suffixer/#parameters","title":"Parameters","text":"
    • suffix

      Type \u2192 str

    "},{"location":"api/compose/Suffixer/#examples","title":"Examples","text":"

    from river import compose\n\nx = {'a': 42, 'b': 12}\ncompose.Suffixer('_suffix').transform_one(x)\n
    {'a_suffix': 42, 'b_suffix': 12}\n

    "},{"location":"api/compose/Suffixer/#methods","title":"Methods","text":"learn_one

    Update with a set of features x.

    A lot of transformers don't actually have to do anything during the learn_one step because they are stateless. For this reason the default behavior of this function is to do nothing. Transformers that however do something during the learn_one can override this method.

    Parameters

    • x \u2014 'dict'

    Returns

    Transformer: self

    transform_one

    Transform a set of features x.

    Parameters

    • x \u2014 'dict'

    Returns

    dict: The transformed values.

    "},{"location":"api/compose/TargetTransformRegressor/","title":"TargetTransformRegressor","text":"

    Modifies the target before training.

    The user is expected to check that func and inverse_func are coherent with each other.

    "},{"location":"api/compose/TargetTransformRegressor/#parameters","title":"Parameters","text":"
    • regressor

      Type \u2192 base.Regressor

      Regression model to wrap.

    • func

      Type \u2192 typing.Callable

      A function modifying the target before training.

    • inverse_func

      Type \u2192 typing.Callable

      A function to return to the target's original space.

    "},{"location":"api/compose/TargetTransformRegressor/#examples","title":"Examples","text":"

    import math\nfrom river import compose\nfrom river import datasets\nfrom river import evaluate\nfrom river import linear_model\nfrom river import metrics\nfrom river import preprocessing\n\ndataset = datasets.TrumpApproval()\nmodel = (\n    preprocessing.StandardScaler() |\n    compose.TargetTransformRegressor(\n        regressor=linear_model.LinearRegression(intercept_lr=0.15),\n        func=math.log,\n        inverse_func=math.exp\n    )\n)\nmetric = metrics.MSE()\n\nevaluate.progressive_val_score(dataset, model, metric)\n
    MSE: 10.999752\n

    "},{"location":"api/compose/TargetTransformRegressor/#methods","title":"Methods","text":"learn_one

    Fits to a set of features x and a real-valued target y.

    Parameters

    • x
    • y

    Returns

    self

    predict_one

    Predict the output of features x.

    Parameters

    • x

    Returns

    The prediction.

    "},{"location":"api/compose/TransformerProduct/","title":"TransformerProduct","text":"

    Computes interactions between the outputs of a set transformers.

    This is for when you want to add interaction terms between groups of features. It may also be used an alternative to feature_extraction.PolynomialExtender when the latter is overkill.

    "},{"location":"api/compose/TransformerProduct/#parameters","title":"Parameters","text":"
    • transformers

      Ideally, a list of (name, estimator) tuples. A name is automatically inferred if none is provided.

    "},{"location":"api/compose/TransformerProduct/#examples","title":"Examples","text":"

    Let's say we have a certain set of features with two groups. In practice these may be different namespaces, such one for items and the other for users.

    x = dict(\n    a=0, b=1,  # group 1\n    x=2, y=3   # group 2\n)\n

    We might want to add interaction terms between groups ('a', 'b') and ('x', 'y'), as so:

    from pprint import pprint\nfrom river.compose import Select, TransformerProduct\n\nproduct = TransformerProduct(\n    Select('a', 'b'),\n    Select('x', 'y')\n)\npprint(product.transform_one(x))\n
    {'a*x': 0, 'a*y': 0, 'b*x': 2, 'b*y': 3}\n

    This can also be done with the following shorthand:

    product = Select('a', 'b') * Select('x', 'y')\npprint(product.transform_one(x))\n
    {'a*x': 0, 'a*y': 0, 'b*x': 2, 'b*y': 3}\n

    If you want to include the original terms, you can do something like this:

    group_1 = Select('a', 'b')\ngroup_2 = Select('x', 'y')\nproduct = group_1 + group_2 + group_1 * group_2\npprint(product.transform_one(x))\n
    {'a': 0, 'a*x': 0, 'a*y': 0, 'b': 1, 'b*x': 2, 'b*y': 3, 'x': 2, 'y': 3}\n

    "},{"location":"api/compose/TransformerProduct/#methods","title":"Methods","text":"learn_many

    Update each transformer.

    Parameters

    • X \u2014 'pd.DataFrame'
    • y \u2014 'pd.Series | None' \u2014 defaults to None

    learn_one

    Update each transformer.

    Parameters

    • x \u2014 'dict'
    • y \u2014 defaults to None

    transform_many

    Passes the data through each transformer and packs the results together.

    Parameters

    • X \u2014 'pd.DataFrame'

    transform_one

    Passes the data through each transformer and packs the results together.

    Parameters

    • x \u2014 'dict'

    "},{"location":"api/compose/TransformerUnion/","title":"TransformerUnion","text":"

    Packs multiple transformers into a single one.

    Pipelines allow you to apply steps sequentially. Therefore, the output of a step becomes the input of the next one. In many cases, you may want to pass the output of a step to multiple steps. This simple transformer allows you to do so. In other words, it enables you to apply particular steps to different parts of an input. A typical example is when you want to scale numeric features and one-hot encode categorical features.

    This transformer is essentially a list of transformers. Whenever it is updated, it loops through each transformer and updates them. Meanwhile, calling transform_one collects the output of each transformer and merges them into a single dictionary.

    "},{"location":"api/compose/TransformerUnion/#parameters","title":"Parameters","text":"
    • transformers

      Ideally, a list of (name, estimator) tuples. A name is automatically inferred if none is provided.

    "},{"location":"api/compose/TransformerUnion/#examples","title":"Examples","text":"

    Take the following dataset:

    X = [\n    {'place': 'Taco Bell', 'revenue': 42},\n    {'place': 'Burger King', 'revenue': 16},\n    {'place': 'Burger King', 'revenue': 24},\n    {'place': 'Taco Bell', 'revenue': 58},\n    {'place': 'Burger King', 'revenue': 20},\n    {'place': 'Taco Bell', 'revenue': 50}\n]\n

    As an example, let's assume we want to compute two aggregates of a dataset. We therefore define two feature_extraction.Aggs and initialize a TransformerUnion with them:

    from river import compose\nfrom river import feature_extraction\nfrom river import stats\n\nmean = feature_extraction.Agg(\n    on='revenue', by='place',\n    how=stats.Mean()\n)\ncount = feature_extraction.Agg(\n    on='revenue', by='place',\n    how=stats.Count()\n)\nagg = compose.TransformerUnion(mean, count)\n

    We can now update each transformer and obtain their output with a single function call:

    from pprint import pprint\nfor x in X:\n    agg = agg.learn_one(x)\n    pprint(agg.transform_one(x))\n
    {'revenue_count_by_place': 1, 'revenue_mean_by_place': 42.0}\n{'revenue_count_by_place': 1, 'revenue_mean_by_place': 16.0}\n{'revenue_count_by_place': 2, 'revenue_mean_by_place': 20.0}\n{'revenue_count_by_place': 2, 'revenue_mean_by_place': 50.0}\n{'revenue_count_by_place': 3, 'revenue_mean_by_place': 20.0}\n{'revenue_count_by_place': 3, 'revenue_mean_by_place': 50.0}\n

    Note that you can use the + operator as a shorthand notation:

    agg = mean + count

    This allows you to build complex pipelines in a very terse manner. For instance, we can create a pipeline that scales each feature and fits a logistic regression as so:

    from river import linear_model as lm\nfrom river import preprocessing as pp\n\nmodel = (\n    (mean + count) |\n    pp.StandardScaler() |\n    lm.LogisticRegression()\n)\n

    Whice is equivalent to the following code:

    model = compose.Pipeline(\n    compose.TransformerUnion(mean, count),\n    pp.StandardScaler(),\n    lm.LogisticRegression()\n)\n

    Note that you access any part of a TransformerUnion by name:

    model['TransformerUnion']['Agg']\n
    Agg (\n    on=\"revenue\"\n    by=['place']\n    how=Mean ()\n)\n

    model['TransformerUnion']['Agg1']\n
    Agg (\n    on=\"revenue\"\n    by=['place']\n    how=Count ()\n)\n

    You can also manually provide a name for each step:

    agg = compose.TransformerUnion(\n    ('Mean revenue by place', mean),\n    ('# by place', count)\n)\n

    Mini-batch example:

    X = pd.DataFrame([\n    {\"place\": 2, \"revenue\": 42},\n    {\"place\": 3, \"revenue\": 16},\n    {\"place\": 3, \"revenue\": 24},\n    {\"place\": 2, \"revenue\": 58},\n    {\"place\": 3, \"revenue\": 20},\n    {\"place\": 2, \"revenue\": 50},\n])\n

    Since we need a transformer with mini-batch support to demonstrate, we shall use a StandardScaler.

    from river import compose\nfrom river import preprocessing\n\nagg = (\n    compose.Select(\"place\") +\n    (compose.Select(\"revenue\") | preprocessing.StandardScaler())\n)\n\n_ = agg.learn_many(X)\nagg.transform_many(X)\n
       place   revenue\n0      2  0.441250\n1      3 -1.197680\n2      3 -0.693394\n3      2  1.449823\n4      3 -0.945537\n5      2  0.945537\n

    "},{"location":"api/compose/TransformerUnion/#methods","title":"Methods","text":"learn_many

    Update each transformer.

    Parameters

    • X \u2014 'pd.DataFrame'
    • y \u2014 'pd.Series | None' \u2014 defaults to None

    learn_one

    Update each transformer.

    Parameters

    • x \u2014 'dict'
    • y \u2014 defaults to None

    transform_many

    Passes the data through each transformer and packs the results together.

    Parameters

    • X \u2014 'pd.DataFrame'

    transform_one

    Passes the data through each transformer and packs the results together.

    Parameters

    • x \u2014 'dict'

    "},{"location":"api/compose/learn-during-predict/","title":"learn_during_predict","text":"

    A context manager for fitting unsupervised steps during prediction.

    Usually, unsupervised parts of a pipeline are updated during learn_one. However, in the case of online learning, it is possible to update them before, during the prediction step. This context manager allows you to do so.

    This usually brings a slight performance improvement. But it is not done by default because it is not intuitive and is more difficult to test. It also means that you have to call predict_one before learn_one in order for the whole pipeline to be updated.

    "},{"location":"api/compose/learn-during-predict/#examples","title":"Examples","text":"

    Let's first see what methods are called if we just call predict_one.

    import io\nimport logging\nfrom river import compose\nfrom river import datasets\nfrom river import linear_model\nfrom river import preprocessing\nfrom river import utils\n\nmodel = compose.Pipeline(\n    preprocessing.StandardScaler(),\n    linear_model.LinearRegression()\n)\n\nclass_condition = lambda x: x.__class__.__name__ in ('StandardScaler', 'LinearRegression')\n\nlogger = logging.getLogger()\nlogger.setLevel(logging.DEBUG)\n\nlogs = io.StringIO()\nsh = logging.StreamHandler(logs)\nsh.setLevel(logging.DEBUG)\nlogger.addHandler(sh)\n\nwith utils.log_method_calls(class_condition):\n    for x, y in datasets.TrumpApproval().take(1):\n        _ = model.predict_one(x)\n\nprint(logs.getvalue())\n
    StandardScaler.transform_one\nLinearRegression.predict_one\n

    Now let's use the context manager and see what methods get called.

    logs = io.StringIO()\nsh = logging.StreamHandler(logs)\nsh.setLevel(logging.DEBUG)\nlogger.addHandler(sh)\n\nwith utils.log_method_calls(class_condition), compose.learn_during_predict():\n    for x, y in datasets.TrumpApproval().take(1):\n        _ = model.predict_one(x)\n\nprint(logs.getvalue())\n
    StandardScaler.learn_one\nStandardScaler.transform_one\nLinearRegression.predict_one\n

    We can see that the scaler did not get updated before transforming the data.

    This also works when working with mini-batches.

    logs = io.StringIO()\nsh = logging.StreamHandler(logs)\nsh.setLevel(logging.DEBUG)\nlogger.addHandler(sh)\n\nwith utils.log_method_calls(class_condition):\n    for x, y in datasets.TrumpApproval().take(1):\n        _ = model.predict_many(pd.DataFrame([x]))\nprint(logs.getvalue())\n
    StandardScaler.transform_many\nLinearRegression.predict_many\n

    logs = io.StringIO()\nsh = logging.StreamHandler(logs)\nsh.setLevel(logging.DEBUG)\nlogger.addHandler(sh)\n\nwith utils.log_method_calls(class_condition), compose.learn_during_predict():\n    for x, y in datasets.TrumpApproval().take(1):\n        _ = model.predict_many(pd.DataFrame([x]))\nprint(logs.getvalue())\n
    StandardScaler.learn_many\nStandardScaler.transform_many\nLinearRegression.predict_many\n

    "},{"location":"api/conf/Interval/","title":"Interval","text":"

    An object to represent a (prediction) interval.

    Users are not expected to use this class as-is. Instead, they should use the with_interval parameter of the predict_one method of any regressor or classifier wrapped with a conformal prediction method.

    "},{"location":"api/conf/Interval/#parameters","title":"Parameters","text":"
    • lower

      Type \u2192 float

      The lower bound of the interval.

    • upper

      Type \u2192 float

      The upper bound of the interval.

    "},{"location":"api/conf/Interval/#attributes","title":"Attributes","text":"
    • center

      The center of the interval.

    • width

      The width of the interval.

    "},{"location":"api/conf/RegressionJackknife/","title":"RegressionJackknife","text":"

    Jackknife method for regression.

    This is a conformal prediction method for regression. It is based on the jackknife method. The idea is to compute the quantiles of the residuals of the regressor. The prediction interval is then computed as the prediction of the regressor plus the quantiles of the residuals.

    This works naturally online, as the quantiles of the residuals are updated at each iteration. Each residual is produced before the regressor is updated, which ensures the predicted intervals are not optimistic.

    Note that the produced intervals are marginal and not conditional. This means that the intervals are not adjusted for the features x. This is a limitation of the jackknife method. However, the jackknife method is very simple and efficient. It is also very robust to outliers.

    "},{"location":"api/conf/RegressionJackknife/#parameters","title":"Parameters","text":"
    • regressor

      Type \u2192 base.Regressor

      The regressor to be wrapped.

    • confidence_level

      Type \u2192 float

      Default \u2192 0.95

      The confidence level of the prediction intervals.

    • window_size

      Type \u2192 int | None

      Default \u2192 None

      The size of the window used to compute the quantiles of the residuals. If None, the quantiles are computed over the whole history. It is advised to set this if you expect the model's performance to change over time.

    "},{"location":"api/conf/RegressionJackknife/#examples","title":"Examples","text":"
    from river import conf\nfrom river import datasets\nfrom river import linear_model\nfrom river import metrics\nfrom river import preprocessing\nfrom river import stats\n\ndataset = datasets.TrumpApproval()\n\nmodel = conf.RegressionJackknife(\n    (\n        preprocessing.StandardScaler() |\n        linear_model.LinearRegression(intercept_lr=.1)\n    ),\n    confidence_level=0.9\n)\n\nvalidity = stats.Mean()\nefficiency = stats.Mean()\n\nfor x, y in dataset:\n    interval = model.predict_one(x, with_interval=True)\n    validity = validity.update(y in interval)\n    efficiency = efficiency.update(interval.width)\n    model = model.learn_one(x, y)\n

    The interval's validity is the proportion of times the true value is within the interval. We specified a confidence level of 90%, so we expect the validity to be around 90%.

    validity\n
    Mean: 0.939061\n

    The interval's efficiency is the average width of the intervals.

    efficiency\n
    Mean: 4.078361\n

    Lowering the confidence lowering will mechanically improve the efficiency.

    "},{"location":"api/conf/RegressionJackknife/#methods","title":"Methods","text":"learn_one

    Fits to a set of features x and a real-valued target y.

    Parameters

    • x
    • y
    • kwargs

    Returns

    self

    predict_one

    Predict the output of features x.

    Parameters

    • x
    • with_interval \u2014 defaults to False
    • kwargs

    Returns

    The prediction.

    1. Barber, Rina Foygel, Emmanuel J. Candes, Aaditya Ramdas, and Ryan J. Tibshirani. \"Predictive inference with the jackknife+.\" The Annals of Statistics 49, no. 1 (2021): 486-507. \u21a9

    "},{"location":"api/covariance/EmpiricalCovariance/","title":"EmpiricalCovariance","text":"

    Empirical covariance matrix.

    "},{"location":"api/covariance/EmpiricalCovariance/#parameters","title":"Parameters","text":"
    • ddof

      Default \u2192 1

      Delta Degrees of Freedom.

    "},{"location":"api/covariance/EmpiricalCovariance/#attributes","title":"Attributes","text":"
    • matrix
    "},{"location":"api/covariance/EmpiricalCovariance/#examples","title":"Examples","text":"

    import numpy as np\nimport pandas as pd\nfrom river import covariance\n\nnp.random.seed(42)\nX = pd.DataFrame(np.random.random((8, 3)), columns=[\"red\", \"green\", \"blue\"])\nX\n
            red     green      blue\n0  0.374540  0.950714  0.731994\n1  0.598658  0.156019  0.155995\n2  0.058084  0.866176  0.601115\n3  0.708073  0.020584  0.969910\n4  0.832443  0.212339  0.181825\n5  0.183405  0.304242  0.524756\n6  0.431945  0.291229  0.611853\n7  0.139494  0.292145  0.366362\n

    cov = covariance.EmpiricalCovariance()\nfor x in X.to_dict(orient=\"records\"):\n    cov = cov.update(x)\ncov\n
            blue     green    red\n blue    0.076    0.020   -0.010\ngreen    0.020    0.113   -0.053\n  red   -0.010   -0.053    0.079\n

    There is also an update_many method to process mini-batches. The results are identical.

    cov = covariance.EmpiricalCovariance()\ncov = cov.update_many(X)\ncov\n
            blue     green    red\n blue    0.076    0.020   -0.010\ngreen    0.020    0.113   -0.053\n  red   -0.010   -0.053    0.079\n

    The covariances are stored in a dictionary, meaning any one of them can be accessed as such:

    cov[\"blue\", \"green\"]\n
    Cov: 0.020292\n

    Diagonal entries are variances:

    cov[\"blue\", \"blue\"]\n
    Var: 0.076119\n

    "},{"location":"api/covariance/EmpiricalCovariance/#methods","title":"Methods","text":"revert

    Downdate with a single sample.

    Parameters

    • x \u2014 'dict'

    update

    Update with a single sample.

    Parameters

    • x \u2014 'dict'

    update_many

    Update with a dataframe of samples.

    Parameters

    • X \u2014 'pd.DataFrame'

    "},{"location":"api/covariance/EmpiricalPrecision/","title":"EmpiricalPrecision","text":"

    Empirical precision matrix.

    The precision matrix is the inverse of the covariance matrix.

    This implementation leverages the Sherman-Morrison formula. The resulting inverse covariance matrix is not guaranteed to be identical to a batch computation. However, the difference shrinks with the number of observations.

    "},{"location":"api/covariance/EmpiricalPrecision/#attributes","title":"Attributes","text":"
    • matrix
    "},{"location":"api/covariance/EmpiricalPrecision/#examples","title":"Examples","text":"

    import numpy as np\nimport pandas as pd\nfrom river import covariance\n\nnp.random.seed(42)\nX = pd.DataFrame(np.random.random((1000, 3)))\nX.head()\n
              0         1         2\n0  0.374540  0.950714  0.731994\n1  0.598658  0.156019  0.155995\n2  0.058084  0.866176  0.601115\n3  0.708073  0.020584  0.969910\n4  0.832443  0.212339  0.181825\n

    prec = covariance.EmpiricalPrecision()\nfor x in X.to_dict(orient=\"records\"):\n    prec = prec.update(x)\n\nprec\n
        0        1        2\n0   12.026   -0.122   -0.214\n1   -0.122   11.276   -0.026\n2   -0.214   -0.026   11.632\n

    pd.DataFrame(np.linalg.inv(np.cov(X.T, ddof=1)))\n
               0          1          2\n0  12.159791  -0.124966  -0.218671\n1  -0.124966  11.393394  -0.026662\n2  -0.218671  -0.026662  11.756907\n

    "},{"location":"api/covariance/EmpiricalPrecision/#methods","title":"Methods","text":"update

    Update with a single sample.

    Parameters

    • x

    update_many

    Update with a dataframe of samples.

    Parameters

    • X \u2014 'pd.DataFrame'

    1. Online Estimation of the Inverse Covariance Matrix - Markus Thill \u21a9

    2. Fast rank-one updates to matrix inverse? - Tim Vieira \u21a9

    3. Woodbury matrix identity \u21a9

    "},{"location":"api/datasets/AirlinePassengers/","title":"AirlinePassengers","text":"

    Monthly number of international airline passengers.

    The stream contains 144 items and only one single feature, which is the month. The goal is to predict the number of passengers each month by capturing the trend and the seasonality of the data.

    "},{"location":"api/datasets/AirlinePassengers/#attributes","title":"Attributes","text":"
    • desc

      Return the description from the docstring.

    • path

    "},{"location":"api/datasets/AirlinePassengers/#methods","title":"Methods","text":"take

    Iterate over the k samples.

    Parameters

    • k \u2014 'int'

    1. International airline passengers: monthly totals in thousands. Jan 49 \u2013 Dec 60 \u21a9

    "},{"location":"api/datasets/Bananas/","title":"Bananas","text":"

    Bananas dataset.

    An artificial dataset where instances belongs to several clusters with a banana shape. There are two attributes that correspond to the x and y axis, respectively.

    "},{"location":"api/datasets/Bananas/#attributes","title":"Attributes","text":"
    • desc

      Return the description from the docstring.

    • path

    "},{"location":"api/datasets/Bananas/#methods","title":"Methods","text":"take

    Iterate over the k samples.

    Parameters

    • k \u2014 'int'

    1. OpenML page \u21a9

    "},{"location":"api/datasets/Bikes/","title":"Bikes","text":"

    Bike sharing station information from the city of Toulouse.

    The goal is to predict the number of bikes in 5 different bike stations from the city of Toulouse.

    "},{"location":"api/datasets/Bikes/#attributes","title":"Attributes","text":"
    • desc

      Return the description from the docstring.

    • is_downloaded

      Indicate whether or the data has been correctly downloaded.

    • path

    "},{"location":"api/datasets/Bikes/#methods","title":"Methods","text":"download take

    Iterate over the k samples.

    Parameters

    • k \u2014 'int'

    1. A short introduction and conclusion to the OpenBikes 2016 Challenge \u21a9

    "},{"location":"api/datasets/ChickWeights/","title":"ChickWeights","text":"

    Chick weights along time.

    The stream contains 578 items and 3 features. The goal is to predict the weight of each chick along time, according to the diet the chick is on. The data is ordered by time and then by chick.

    "},{"location":"api/datasets/ChickWeights/#attributes","title":"Attributes","text":"
    • desc

      Return the description from the docstring.

    • path

    "},{"location":"api/datasets/ChickWeights/#methods","title":"Methods","text":"take

    Iterate over the k samples.

    Parameters

    • k \u2014 'int'

    1. Chick weight dataset overview \u21a9

    "},{"location":"api/datasets/CreditCard/","title":"CreditCard","text":"

    Credit card frauds.

    The datasets contains transactions made by credit cards in September 2013 by european cardholders. This dataset presents transactions that occurred in two days, where we have 492 frauds out of 284,807 transactions. The dataset is highly unbalanced, the positive class (frauds) account for 0.172% of all transactions.

    It contains only numerical input variables which are the result of a PCA transformation. Unfortunately, due to confidentiality issues, we cannot provide the original features and more background information about the data. Features V1, V2, ... V28 are the principal components obtained with PCA, the only features which have not been transformed with PCA are 'Time' and 'Amount'. Feature 'Time' contains the seconds elapsed between each transaction and the first transaction in the dataset. The feature 'Amount' is the transaction Amount, this feature can be used for example-dependant cost-senstive learning. Feature 'Class' is the response variable and it takes value 1 in case of fraud and 0 otherwise.

    "},{"location":"api/datasets/CreditCard/#attributes","title":"Attributes","text":"
    • desc

      Return the description from the docstring.

    • is_downloaded

      Indicate whether or the data has been correctly downloaded.

    • path

    "},{"location":"api/datasets/CreditCard/#methods","title":"Methods","text":"download take

    Iterate over the k samples.

    Parameters

    • k \u2014 'int'

    1. Andrea Dal Pozzolo, Olivier Caelen, Reid A. Johnson and Gianluca Bontempi. Calibrating Probability with Undersampling for Unbalanced Classification. In Symposium on Computational Intelligence and Data Mining (CIDM), IEEE, 2015\u00a0\u21a9

    2. Dal Pozzolo, Andrea; Caelen, Olivier; Le Borgne, Yann-Ael; Waterschoot, Serge; Bontempi, Gianluca. Learned lessons in credit card fraud detection from a practitioner perspective, Expert systems with applications,41,10,4915-4928,2014, Pergamon\u00a0\u21a9

    3. Dal Pozzolo, Andrea; Boracchi, Giacomo; Caelen, Olivier; Alippi, Cesare; Bontempi, Gianluca. Credit card fraud detection: a realistic modeling and a novel learning strategy, IEEE transactions on neural networks and learning systems,29,8,3784-3797,2018,IEEE\u00a0\u21a9

    4. Dal Pozzolo, Andrea Adaptive Machine learning for credit card fraud detection ULB MLG PhD thesis (supervised by G. Bontempi)\u00a0\u21a9

    5. Carcillo, Fabrizio; Dal Pozzolo, Andrea; Le Borgne, Yann-Ael; Caelen, Olivier; Mazzer, Yannis; Bontempi, Gianluca. Scarff: a scalable framework for streaming credit card fraud detection with Spark, Information fusion,41, 182-194,2018,Elsevier\u00a0\u21a9

    6. Carcillo, Fabrizio; Le Borgne, Yann-Ael; Caelen, Olivier; Bontempi, Gianluca. Streaming active learning strategies for real-life credit card fraud detection: assessment and visualization, International Journal of Data Science and Analytics, 5,4,285-300,2018,Springer International Publishing\u00a0\u21a9

    7. Bertrand Lebichot, Yann-Ael Le Borgne, Liyun He, Frederic Oble, Gianluca Bontempi Deep-Learning Domain Adaptation Techniques for Credit Cards Fraud Detection, INNSBDDL 2019: Recent Advances in Big Data and Deep Learning, pp 78-88, 2019\u00a0\u21a9

    8. Fabrizio Carcillo, Yann-Ael Le Borgne, Olivier Caelen, Frederic Oble, Gianluca Bontempi Combining Unsupervised and Supervised Learning in Credit Card Fraud Detection Information Sciences, 2019\u00a0\u21a9

    "},{"location":"api/datasets/Elec2/","title":"Elec2","text":"

    Electricity prices in New South Wales.

    This is a binary classification task, where the goal is to predict if the price of electricity will go up or down.

    This data was collected from the Australian New South Wales Electricity Market. In this market, prices are not fixed and are affected by demand and supply of the market. They are set every five minutes. Electricity transfers to/from the neighboring state of Victoria were done to alleviate fluctuations.

    "},{"location":"api/datasets/Elec2/#attributes","title":"Attributes","text":"
    • desc

      Return the description from the docstring.

    • is_downloaded

      Indicate whether or the data has been correctly downloaded.

    • path

    "},{"location":"api/datasets/Elec2/#methods","title":"Methods","text":"download take

    Iterate over the k samples.

    Parameters

    • k \u2014 'int'

    1. SPLICE-2 Comparative Evaluation: Electricity Pricing \u21a9

    2. DataHub description \u21a9

    "},{"location":"api/datasets/HTTP/","title":"HTTP","text":"

    HTTP dataset of the KDD 1999 cup.

    The goal is to predict whether or not an HTTP connection is anomalous or not. The dataset only contains 2,211 (0.4%) positive labels.

    "},{"location":"api/datasets/HTTP/#attributes","title":"Attributes","text":"
    • desc

      Return the description from the docstring.

    • is_downloaded

      Indicate whether or the data has been correctly downloaded.

    • path

    "},{"location":"api/datasets/HTTP/#methods","title":"Methods","text":"download take

    Iterate over the k samples.

    Parameters

    • k \u2014 'int'

    1. HTTP (KDDCUP99) dataset \u21a9

    "},{"location":"api/datasets/Higgs/","title":"Higgs","text":"

    Higgs dataset.

    The data has been produced using Monte Carlo simulations. The first 21 features (columns 2-22) are kinematic properties measured by the particle detectors in the accelerator. The last seven features are functions of the first 21 features; these are high-level features derived by physicists to help discriminate between the two classes.

    "},{"location":"api/datasets/Higgs/#attributes","title":"Attributes","text":"
    • desc

      Return the description from the docstring.

    • is_downloaded

      Indicate whether or the data has been correctly downloaded.

    • path

    "},{"location":"api/datasets/Higgs/#methods","title":"Methods","text":"download take

    Iterate over the k samples.

    Parameters

    • k \u2014 'int'

    1. UCI page \u21a9

    "},{"location":"api/datasets/ImageSegments/","title":"ImageSegments","text":"

    Image segments classification.

    This dataset contains features that describe image segments into 7 classes: brickface, sky, foliage, cement, window, path, and grass.

    "},{"location":"api/datasets/ImageSegments/#attributes","title":"Attributes","text":"
    • desc

      Return the description from the docstring.

    • path

    "},{"location":"api/datasets/ImageSegments/#methods","title":"Methods","text":"take

    Iterate over the k samples.

    Parameters

    • k \u2014 'int'

    1. UCI page \u21a9

    "},{"location":"api/datasets/Insects/","title":"Insects","text":"

    Insects dataset.

    This dataset has different variants, which are:

    • abrupt_balanced

    • abrupt_imbalanced

    • gradual_balanced

    • gradual_imbalanced

    • incremental-abrupt_balanced

    • incremental-abrupt_imbalanced

    • incremental-reoccurring_balanced

    • incremental-reoccurring_imbalanced

    • incremental_balanced

    • incremental_imbalanced

    • out-of-control

    The number of samples and the difficulty change from one variant to another. The number of classes is always the same (6), except for the last variant (24).

    "},{"location":"api/datasets/Insects/#parameters","title":"Parameters","text":"
    • variant

      Default \u2192 abrupt_balanced

      Indicates which variant of the dataset to load.

    "},{"location":"api/datasets/Insects/#attributes","title":"Attributes","text":"
    • desc

      Return the description from the docstring.

    • is_downloaded

      Indicate whether or the data has been correctly downloaded.

    • path

    "},{"location":"api/datasets/Insects/#methods","title":"Methods","text":"download take

    Iterate over the k samples.

    Parameters

    • k \u2014 'int'

    1. USP DS repository \u21a9

    2. Souza, V., Reis, D.M.D., Maletzke, A.G. and Batista, G.E., 2020. Challenges in Benchmarking Stream Learning Algorithms with Real-world Data. arXiv preprint arXiv:2005.00113. \u21a9

    "},{"location":"api/datasets/Keystroke/","title":"Keystroke","text":"

    CMU keystroke dataset.

    Users are tasked to type in a password. The task is to determine which user is typing in the password.

    The only difference with the original dataset is that the \"sessionIndex\" and \"rep\" attributes have been dropped.

    "},{"location":"api/datasets/Keystroke/#attributes","title":"Attributes","text":"
    • desc

      Return the description from the docstring.

    • is_downloaded

      Indicate whether or the data has been correctly downloaded.

    • path

    "},{"location":"api/datasets/Keystroke/#methods","title":"Methods","text":"download take

    Iterate over the k samples.

    Parameters

    • k \u2014 'int'

    1. Keystroke Dynamics - Benchmark Data Set \u21a9

    "},{"location":"api/datasets/MaliciousURL/","title":"MaliciousURL","text":"

    Malicious URLs dataset.

    This dataset contains features about URLs that are classified as malicious or not.

    "},{"location":"api/datasets/MaliciousURL/#attributes","title":"Attributes","text":"
    • desc

      Return the description from the docstring.

    • is_downloaded

      Indicate whether or the data has been correctly downloaded.

    • path

    "},{"location":"api/datasets/MaliciousURL/#methods","title":"Methods","text":"download take

    Iterate over the k samples.

    Parameters

    • k \u2014 'int'

    1. Detecting Malicious URLs \u21a9

    2. Identifying Suspicious URLs: An Application of Large-Scale Online Learning \u21a9

    "},{"location":"api/datasets/MovieLens100K/","title":"MovieLens100K","text":"

    MovieLens 100K dataset.

    MovieLens datasets were collected by the GroupLens Research Project at the University of Minnesota. This dataset consists of 100,000 ratings (1-5) from 943 users on 1682 movies. Each user has rated at least 20 movies. User and movie information are provided. The data was collected through the MovieLens web site (movielens.umn.edu) during the seven-month period from September 19th, 1997 through April 22nd, 1998.

    "},{"location":"api/datasets/MovieLens100K/#parameters","title":"Parameters","text":"
    • unpack_user_and_item

      Default \u2192 False

      Whether or not the user and item should be extracted from the context and included as extra keyword arguments.

    "},{"location":"api/datasets/MovieLens100K/#attributes","title":"Attributes","text":"
    • desc

      Return the description from the docstring.

    • is_downloaded

      Indicate whether or the data has been correctly downloaded.

    • path

    "},{"location":"api/datasets/MovieLens100K/#methods","title":"Methods","text":"download take

    Iterate over the k samples.

    Parameters

    • k \u2014 'int'

    1. The MovieLens Datasets: History and Context \u21a9

    "},{"location":"api/datasets/Music/","title":"Music","text":"

    Multi-label music mood prediction.

    The goal is to predict to which kinds of moods a song pertains to.

    "},{"location":"api/datasets/Music/#attributes","title":"Attributes","text":"
    • desc

      Return the description from the docstring.

    • is_downloaded

      Indicate whether or the data has been correctly downloaded.

    • path

    "},{"location":"api/datasets/Music/#methods","title":"Methods","text":"download take

    Iterate over the k samples.

    Parameters

    • k \u2014 'int'

    1. Read, J., Reutemann, P., Pfahringer, B. and Holmes, G., 2016. MEKA: a multi-label/multi-target extension to WEKA. The Journal of Machine Learning Research, 17(1), pp.667-671. \u21a9

    "},{"location":"api/datasets/Phishing/","title":"Phishing","text":"

    Phishing websites.

    This dataset contains features from web pages that are classified as phishing or not.

    "},{"location":"api/datasets/Phishing/#attributes","title":"Attributes","text":"
    • desc

      Return the description from the docstring.

    • path

    "},{"location":"api/datasets/Phishing/#methods","title":"Methods","text":"take

    Iterate over the k samples.

    Parameters

    • k \u2014 'int'

    1. UCI page \u21a9

    "},{"location":"api/datasets/Restaurants/","title":"Restaurants","text":"

    Data from the Kaggle Recruit Restaurants challenge.

    The goal is to predict the number of visitors in each of 829 Japanese restaurants over a priod of roughly 16 weeks. The data is ordered by date and then by restaurant ID.

    "},{"location":"api/datasets/Restaurants/#attributes","title":"Attributes","text":"
    • desc

      Return the description from the docstring.

    • is_downloaded

      Indicate whether or the data has been correctly downloaded.

    • path

    "},{"location":"api/datasets/Restaurants/#methods","title":"Methods","text":"download take

    Iterate over the k samples.

    Parameters

    • k \u2014 'int'

    1. Recruit Restaurant Visitor Forecasting \u21a9

    "},{"location":"api/datasets/SMSSpam/","title":"SMSSpam","text":"

    SMS Spam Collection dataset.

    The data contains 5,574 items and 1 feature (i.e. SMS body). Spam messages represent 13.4% of the dataset. The goal is to predict whether an SMS is a spam or not.

    "},{"location":"api/datasets/SMSSpam/#attributes","title":"Attributes","text":"
    • desc

      Return the description from the docstring.

    • is_downloaded

      Indicate whether or the data has been correctly downloaded.

    • path

    "},{"location":"api/datasets/SMSSpam/#methods","title":"Methods","text":"download take

    Iterate over the k samples.

    Parameters

    • k \u2014 'int'

    1. Almeida, T.A., Hidalgo, J.M.G. and Yamakami, A., 2011, September. Contributions to the study of SMS spam filtering: new collection and results. In Proceedings of the 11th ACM symposium on Document engineering (pp. 259-262). \u21a9

    "},{"location":"api/datasets/SMTP/","title":"SMTP","text":"

    SMTP dataset from the KDD 1999 cup.

    The goal is to predict whether or not an SMTP connection is anomalous or not. The dataset only contains 2,211 (0.4%) positive labels.

    "},{"location":"api/datasets/SMTP/#attributes","title":"Attributes","text":"
    • desc

      Return the description from the docstring.

    • is_downloaded

      Indicate whether or the data has been correctly downloaded.

    • path

    "},{"location":"api/datasets/SMTP/#methods","title":"Methods","text":"download take

    Iterate over the k samples.

    Parameters

    • k \u2014 'int'

    1. SMTP (KDDCUP99) dataset \u21a9

    "},{"location":"api/datasets/SolarFlare/","title":"SolarFlare","text":"

    Solar flare multi-output regression.

    "},{"location":"api/datasets/SolarFlare/#attributes","title":"Attributes","text":"
    • desc

      Return the description from the docstring.

    • path

    "},{"location":"api/datasets/SolarFlare/#methods","title":"Methods","text":"take

    Iterate over the k samples.

    Parameters

    • k \u2014 'int'

    1. UCI page \u21a9

    "},{"location":"api/datasets/TREC07/","title":"TREC07","text":"

    TREC's 2007 Spam Track dataset.

    The data contains 75,419 chronologically ordered items, i.e. 3 months of emails delivered to a particular server in 2007. Spam messages represent 66.6% of the dataset. The goal is to predict whether an email is a spam or not.

    The available raw features are: sender, recipients, date, subject, body.

    "},{"location":"api/datasets/TREC07/#attributes","title":"Attributes","text":"
    • desc

      Return the description from the docstring.

    • is_downloaded

      Indicate whether or the data has been correctly downloaded.

    • path

    "},{"location":"api/datasets/TREC07/#methods","title":"Methods","text":"download take

    Iterate over the k samples.

    Parameters

    • k \u2014 'int'

    1. TREC 2007 Spam Track Overview \u21a9

    2. Code ran to parse the dataset \u21a9

    "},{"location":"api/datasets/Taxis/","title":"Taxis","text":"

    Taxi ride durations in New York City.

    The goal is to predict the duration of taxi rides in New York City.

    "},{"location":"api/datasets/Taxis/#attributes","title":"Attributes","text":"
    • desc

      Return the description from the docstring.

    • is_downloaded

      Indicate whether or the data has been correctly downloaded.

    • path

    "},{"location":"api/datasets/Taxis/#methods","title":"Methods","text":"download take

    Iterate over the k samples.

    Parameters

    • k \u2014 'int'

    1. New York City Taxi Trip Duration competition on Kaggle \u21a9

    "},{"location":"api/datasets/TrumpApproval/","title":"TrumpApproval","text":"

    Donald Trump approval ratings.

    This dataset was obtained by reshaping the data used by FiveThirtyEight for analyzing Donald Trump's approval ratings. It contains 5 features, which are approval ratings collected by 5 polling agencies. The target is the approval rating from FiveThirtyEight's model. The goal of this task is to see if we can reproduce FiveThirtyEight's model.

    "},{"location":"api/datasets/TrumpApproval/#attributes","title":"Attributes","text":"
    • desc

      Return the description from the docstring.

    • path

    "},{"location":"api/datasets/TrumpApproval/#methods","title":"Methods","text":"take

    Iterate over the k samples.

    Parameters

    • k \u2014 'int'

    1. Trump Approval Ratings \u21a9

    "},{"location":"api/datasets/WaterFlow/","title":"WaterFlow","text":"

    Water flow through a pipeline branch.

    The series includes hourly values for about 2 months, March 2022 to May 2022. The values are expressed in liters per second. There are four anomalous segments in the series:

    • 3 \"low value moments\": this is due to water losses or human intervention for maintenance * A small peak in the water inflow after the first 2 segments: this is due to a pumping operation into the main pipeline, when more water pressure is needed

    This dataset is well suited for time series forecasting models, as well as anomaly detection methods. Ideally, the goal is to build a time series forecasting model that is robust to the anomalous segments.

    This data has been kindly donated by the Tecnojest s.r.l. company (www.invidea.it) from Italy.

    "},{"location":"api/datasets/WaterFlow/#attributes","title":"Attributes","text":"
    • desc

      Return the description from the docstring.

    • path

    "},{"location":"api/datasets/WaterFlow/#methods","title":"Methods","text":"take

    Iterate over the k samples.

    Parameters

    • k \u2014 'int'

    "},{"location":"api/datasets/base/Dataset/","title":"Dataset","text":"

    Base class for all datasets.

    All datasets inherit from this class, be they stored in a file or generated on the fly.

    "},{"location":"api/datasets/base/Dataset/#parameters","title":"Parameters","text":"
    • task

      Type of task the dataset is meant for. Should be one of the following: - \"Regression\" - \"Binary classification\" - \"Multi-class classification\" - \"Multi-output binary classification\" - \"Multi-output regression\"

    • n_features

      Number of features in the dataset.

    • n_samples

      Default \u2192 None

      Number of samples in the dataset.

    • n_classes

      Default \u2192 None

      Number of classes in the dataset, only applies to classification datasets.

    • n_outputs

      Default \u2192 None

      Number of outputs the target is made of, only applies to multi-output datasets.

    • sparse

      Default \u2192 False

      Whether the dataset is sparse or not.

    "},{"location":"api/datasets/base/Dataset/#attributes","title":"Attributes","text":"
    • desc

      Return the description from the docstring.

    "},{"location":"api/datasets/base/Dataset/#methods","title":"Methods","text":"take

    Iterate over the k samples.

    Parameters

    • k \u2014 'int'

    "},{"location":"api/datasets/base/FileDataset/","title":"FileDataset","text":"

    Base class for datasets that are stored in a local file.

    Small datasets that are part of the river package inherit from this class.

    "},{"location":"api/datasets/base/FileDataset/#parameters","title":"Parameters","text":"
    • filename

      The file's name.

    • directory

      Default \u2192 None

      The directory where the file is contained. Defaults to the location of the datasets module.

    • desc

      Extra dataset parameters to pass as keyword arguments.

    "},{"location":"api/datasets/base/FileDataset/#attributes","title":"Attributes","text":"
    • desc

      Return the description from the docstring.

    • path

    "},{"location":"api/datasets/base/FileDataset/#methods","title":"Methods","text":"take

    Iterate over the k samples.

    Parameters

    • k \u2014 'int'

    "},{"location":"api/datasets/base/RemoteDataset/","title":"RemoteDataset","text":"

    Base class for datasets that are stored in a remote file.

    Medium and large datasets that are not part of the river package inherit from this class.

    The filename doesn't have to be provided if unpack is False. Indeed in the latter case the filename will be inferred from the URL.

    "},{"location":"api/datasets/base/RemoteDataset/#parameters","title":"Parameters","text":"
    • url

      The URL the dataset is located at.

    • size

      The expected download size.

    • unpack

      Default \u2192 True

      Whether to unpack the download or not.

    • filename

      Default \u2192 None

      An optional name to given to the file if the file is unpacked.

    • desc

      Extra dataset parameters to pass as keyword arguments.

    "},{"location":"api/datasets/base/RemoteDataset/#attributes","title":"Attributes","text":"
    • desc

      Return the description from the docstring.

    • is_downloaded

      Indicate whether or the data has been correctly downloaded.

    • path

    "},{"location":"api/datasets/base/RemoteDataset/#methods","title":"Methods","text":"download take

    Iterate over the k samples.

    Parameters

    • k \u2014 'int'

    "},{"location":"api/datasets/base/SyntheticDataset/","title":"SyntheticDataset","text":"

    A synthetic dataset.

    "},{"location":"api/datasets/base/SyntheticDataset/#parameters","title":"Parameters","text":"
    • task

      Type of task the dataset is meant for. Should be one of: - \"Regression\" - \"Binary classification\" - \"Multi-class classification\" - \"Multi-output binary classification\" - \"Multi-output regression\"

    • n_features

      Number of features in the dataset.

    • n_samples

      Default \u2192 None

      Number of samples in the dataset.

    • n_classes

      Default \u2192 None

      Number of classes in the dataset, only applies to classification datasets.

    • n_outputs

      Default \u2192 None

      Number of outputs the target is made of, only applies to multi-output datasets.

    • sparse

      Default \u2192 False

      Whether the dataset is sparse or not.

    "},{"location":"api/datasets/base/SyntheticDataset/#attributes","title":"Attributes","text":"
    • desc

      Return the description from the docstring.

    "},{"location":"api/datasets/base/SyntheticDataset/#methods","title":"Methods","text":"take

    Iterate over the k samples.

    Parameters

    • k \u2014 'int'

    "},{"location":"api/datasets/synth/Agrawal/","title":"Agrawal","text":"

    Agrawal stream generator.

    The generator was introduced by Agrawal et al. 1, and was a common source of data for early work on scaling up decision tree learners. The generator produces a stream containing nine features, six numeric and three categorical. There are 10 functions defined for generating binary class labels from the features. Presumably these determine whether the loan should be approved. Classification functions are listed in the original paper 1.

    Feature | Description | Values

    • salary | salary | uniformly distributed from 20k to 150k

    • commission | commission | 0 if salary < 75k else uniformly distributed from 10k to 75k

    • age | age | uniformly distributed from 20 to 80

    • elevel | education level | uniformly chosen from 0 to 4

    • car | car maker | uniformly chosen from 1 to 20

    • zipcode | zip code of the town | uniformly chosen from 0 to 8

    • hvalue | house value | uniformly distributed from 50k x zipcode to 100k x zipcode

    • hyears | years house owned | uniformly distributed from 1 to 30

    • loan | total loan amount | uniformly distributed from 0 to 500k

    "},{"location":"api/datasets/synth/Agrawal/#parameters","title":"Parameters","text":"
    • classification_function

      Type \u2192 int

      Default \u2192 0

      The classification function to use for the generation. Valid values are from 0 to 9.

    • seed

      Type \u2192 int | None

      Default \u2192 None

      Random seed for reproducibility.

    • balance_classes

      Type \u2192 bool

      Default \u2192 False

      If True, the class distribution will converge to a uniform distribution.

    • perturbation

      Type \u2192 float

      Default \u2192 0.0

      The probability that noise will happen in the generation. Each new sample will be perturbed by the magnitude of perturbation. Valid values are in the range [0.0 to 1.0].

    "},{"location":"api/datasets/synth/Agrawal/#attributes","title":"Attributes","text":"
    • desc

      Return the description from the docstring.

    "},{"location":"api/datasets/synth/Agrawal/#examples","title":"Examples","text":"

    from river.datasets import synth\n\ndataset = synth.Agrawal(\n    classification_function=0,\n    seed=42\n)\n\ndataset\n
    Synthetic data generator\n<BLANKLINE>\n    Name  Agrawal\n    Task  Binary classification\n Samples  \u221e\nFeatures  9\n Outputs  1\n Classes  2\n  Sparse  False\n<BLANKLINE>\nConfiguration\n-------------\nclassification_function  0\n                   seed  42\n        balance_classes  False\n           perturbation  0.0\n

    for x, y in dataset.take(5):\n    print(list(x.values()), y)\n
    [103125.4837, 0, 21, 2, 8, 3, 319768.9642, 4, 338349.7437] 1\n[135983.3438, 0, 25, 4, 14, 0, 423837.7755, 7, 116330.4466] 1\n[98262.4347, 0, 55, 1, 18, 6, 144088.1244, 19, 139095.3541] 0\n[133009.0417, 0, 68, 1, 14, 5, 233361.4025, 7, 478606.5361] 1\n[63757.2908, 16955.9382, 26, 2, 12, 4, 522851.3093, 24, 229712.4398] 1\n

    "},{"location":"api/datasets/synth/Agrawal/#methods","title":"Methods","text":"generate_drift

    Generate drift by switching the classification function randomly.

    take

    Iterate over the k samples.

    Parameters

    • k \u2014 'int'

    "},{"location":"api/datasets/synth/Agrawal/#notes","title":"Notes","text":"

    The sample generation works as follows: The 9 features are generated with the random generator, initialized with the seed passed by the user. Then, the classification function decides, as a function of all the attributes, whether to classify the instance as class 0 or class 1. The next step is to verify if the classes should be balanced, and if so, balance the classes. Finally, add noise if perturbation > 0.0.

    1. Rakesh Agrawal, Tomasz Imielinksi, and Arun Swami. \"Database Mining: A Performance Perspective\", IEEE Transactions on Knowledge and Data Engineering, 5(6), December 1993.\u00a0\u21a9\u21a9

    "},{"location":"api/datasets/synth/AnomalySine/","title":"AnomalySine","text":"

    Simulate a stream with anomalies in sine waves.

    The amount of data generated by this generator is finite.

    The data generated corresponds to sine and cosine functions. Anomalies are induced by replacing the cosine values with values from a different a sine function. The contextual flag can be used to introduce contextual anomalies which are values in the normal global range, but abnormal compared to the seasonal pattern. Contextual attributes are introduced by replacing cosine entries with sine values.

    The target indicates whether or not the instances are anomalous.

    "},{"location":"api/datasets/synth/AnomalySine/#parameters","title":"Parameters","text":"
    • n_samples

      Type \u2192 int

      Default \u2192 10000

      The number of samples to generate. This generator creates a batch of data affected by contextual anomalies and noise.

    • n_anomalies

      Type \u2192 int

      Default \u2192 2500

      Number of anomalies. Can't be larger than n_samples.

    • contextual

      Type \u2192 bool

      Default \u2192 False

      If True, will add contextual anomalies.

    • n_contextual

      Type \u2192 int

      Default \u2192 2500

      Number of contextual anomalies. Can't be larger than n_samples.

    • shift

      Type \u2192 int

      Default \u2192 4

      Shift in number of samples applied when retrieving contextual anomalies.

    • noise

      Type \u2192 float

      Default \u2192 0.5

      Amount of noise.

    • replace

      Type \u2192 bool

      Default \u2192 True

      If True, anomalies are randomly sampled with replacement.

    • seed

      Type \u2192 int | None

      Default \u2192 None

      Random seed for reproducibility.

    "},{"location":"api/datasets/synth/AnomalySine/#attributes","title":"Attributes","text":"
    • desc

      Return the description from the docstring.

    "},{"location":"api/datasets/synth/AnomalySine/#examples","title":"Examples","text":"

    from river.datasets import synth\n\ndataset = synth.AnomalySine(\n    seed=12345,\n    n_samples=100,\n    n_anomalies=25,\n    contextual=True,\n    n_contextual=10\n)\n\nfor x, y in dataset.take(5):\n    print(x, y)\n
    {'sine': -0.7119, 'cosine': 0.8777} False\n{'sine': 0.8792, 'cosine': -0.0290} False\n{'sine': 0.0440, 'cosine': 3.0852} True\n{'sine': 0.5520, 'cosine': 3.4515} True\n{'sine': 0.8037, 'cosine': 0.4027} False\n

    "},{"location":"api/datasets/synth/AnomalySine/#methods","title":"Methods","text":"take

    Iterate over the k samples.

    Parameters

    • k \u2014 'int'

    "},{"location":"api/datasets/synth/ConceptDriftStream/","title":"ConceptDriftStream","text":"

    Generates a stream with concept drift.

    A stream generator that adds concept drift or change by joining two streams. This is done by building a weighted combination of two pure distributions that characterizes the target concepts before and after the change.

    The sigmoid function is an elegant and practical solution to define the probability that each new instance of the stream belongs to the new concept after the drift. The sigmoid function introduces a gradual, smooth transition whose duration is controlled with two parameters:

    • \\(p\\), the position of the change.

    • \\(w\\), the width of the transition.

    The sigmoid function at sample \\(t\\) is

    \\[f(t) = 1/(1+e^{-4(t-p)/w})\\]"},{"location":"api/datasets/synth/ConceptDriftStream/#parameters","title":"Parameters","text":"
    • stream

      Type \u2192 datasets.base.SyntheticDataset | None

      Default \u2192 None

      Original stream

    • drift_stream

      Type \u2192 datasets.base.SyntheticDataset | None

      Default \u2192 None

      Drift stream

    • position

      Type \u2192 int

      Default \u2192 5000

      Central position of the concept drift change.

    • width

      Type \u2192 int

      Default \u2192 1000

      Width of concept drift change.

    • seed

      Type \u2192 int | None

      Default \u2192 None

      Random seed for reproducibility.

    • alpha

      Type \u2192 float | None

      Default \u2192 None

      Angle of change used to estimate the width of concept drift change. If set, it will override the width parameter. Valid values are in the range (0.0, 90.0].

    "},{"location":"api/datasets/synth/ConceptDriftStream/#attributes","title":"Attributes","text":"
    • desc

      Return the description from the docstring.

    "},{"location":"api/datasets/synth/ConceptDriftStream/#examples","title":"Examples","text":"

    from river.datasets import synth\n\ndataset = synth.ConceptDriftStream(\n    stream=synth.SEA(seed=42, variant=0),\n    drift_stream=synth.SEA(seed=42, variant=1),\n    seed=1, position=5, width=2\n)\n\nfor x, y in dataset.take(10):\n    print(x, y)\n
    {0: 6.3942, 1: 0.2501, 2: 2.7502} False\n{0: 2.2321, 1: 7.3647, 2: 6.7669} True\n{0: 8.9217, 1: 0.8693, 2: 4.2192} True\n{0: 0.2979, 1: 2.1863, 2: 5.0535} False\n{0: 6.3942, 1: 0.2501, 2: 2.7502} False\n{0: 2.2321, 1: 7.3647, 2: 6.7669} True\n{0: 8.9217, 1: 0.8693, 2: 4.2192} True\n{0: 0.2979, 1: 2.1863, 2: 5.0535} False\n{0: 0.2653, 1: 1.9883, 2: 6.4988} False\n{0: 5.4494, 1: 2.2044, 2: 5.8926} False\n

    "},{"location":"api/datasets/synth/ConceptDriftStream/#methods","title":"Methods","text":"take

    Iterate over the k samples.

    Parameters

    • k \u2014 'int'

    "},{"location":"api/datasets/synth/ConceptDriftStream/#notes","title":"Notes","text":"

    An optional way to estimate the width of the transition \\(w\\) is based on the angle \\(\\alpha\\), \\(w = 1/ tan(\\alpha)\\). Since width corresponds to the number of samples for the transition, the width is rounded to the nearest smaller integer. Notice that larger values of \\(\\alpha\\) result in smaller widths. For \\(\\alpha > 45.0\\), the width is smaller than 1 so values are rounded to 1 to avoid division by zero errors.

    "},{"location":"api/datasets/synth/Friedman/","title":"Friedman","text":"

    Friedman synthetic dataset.

    Each observation is composed of 10 features. Each feature value is sampled uniformly in [0, 1]. The target is defined by the following function:

    \\[y = 10 sin(\\pi x_0 x_1) + 20 (x_2 - 0.5)^2 + 10 x_3 + 5 x_4 + \\epsilon\\]

    In the last expression, \\(\\epsilon \\sim \\mathcal{N}(0, 1)\\), is the noise. Therefore, only the first 5 features are relevant.

    "},{"location":"api/datasets/synth/Friedman/#parameters","title":"Parameters","text":"
    • seed

      Type \u2192 int | None

      Default \u2192 None

      Random seed number used for reproducibility.

    "},{"location":"api/datasets/synth/Friedman/#attributes","title":"Attributes","text":"
    • desc

      Return the description from the docstring.

    "},{"location":"api/datasets/synth/Friedman/#examples","title":"Examples","text":"

    from river.datasets import synth\n\ndataset = synth.Friedman(seed=42)\n\nfor x, y in dataset.take(5):\n    print(list(x.values()), y)\n
    [0.63, 0.02, 0.27, 0.22, 0.73, 0.67, 0.89, 0.08, 0.42, 0.02] 7.66\n[0.02, 0.19, 0.64, 0.54, 0.22, 0.58, 0.80, 0.00, 0.80, 0.69] 8.33\n[0.34, 0.15, 0.95, 0.33, 0.09, 0.09, 0.84, 0.60, 0.80, 0.72] 7.04\n[0.37, 0.55, 0.82, 0.61, 0.86, 0.57, 0.70, 0.04, 0.22, 0.28] 18.16\n[0.07, 0.23, 0.10, 0.27, 0.63, 0.36, 0.37, 0.20, 0.26, 0.93] 8.90\n

    "},{"location":"api/datasets/synth/Friedman/#methods","title":"Methods","text":"take

    Iterate over the k samples.

    Parameters

    • k \u2014 'int'

    1. Friedman, J.H., 1991. Multivariate adaptive regression splines. The annals of statistics, pp.1-67. \u21a9

    "},{"location":"api/datasets/synth/FriedmanDrift/","title":"FriedmanDrift","text":"

    Friedman synthetic dataset with concept drifts.

    Each observation is composed of 10 features. Each feature value is sampled uniformly in [0, 1]. Only the first 5 features are relevant. The target is defined by different functions depending on the type of the drift.

    The three available modes of operation of the data generator are described in 1.

    "},{"location":"api/datasets/synth/FriedmanDrift/#parameters","title":"Parameters","text":"
    • drift_type

      Type \u2192 str

      Default \u2192 lea

      The variant of concept drift. - 'lea': Local Expanding Abrupt drift. The concept drift appears in two distinct regions of the instance space, while the remaining regions are left unaltered. There are three points of abrupt change in the training dataset. At every consecutive change the regions of drift are expanded. - 'gra': Global Recurring Abrupt drift. The concept drift appears over the whole instance space. There are two points of concept drift. At the second point of drift the old concept reoccurs. - 'gsg': Global and Slow Gradual drift. The concept drift affects all the instance space. However, the change is gradual and not abrupt. After each one of the two change points covered by this variant, and during a window of length transition_window, examples from both old and the new concepts are generated with equal probability. After the transition period, only the examples from the new concept are generated.

    • position

      Type \u2192 tuple[int, ...]

      Default \u2192 (50000, 100000, 150000)

      The amount of monitored instances after which each concept drift occurs. A tuple with at least two element must be passed, where each number is greater than the preceding one. If drift_type='lea', then the tuple must have three elements.

    • transition_window

      Type \u2192 int

      Default \u2192 10000

      The length of the transition window between two concepts. Only applicable when drift_type='gsg'. If set to zero, the drifts will be abrupt. Anytime transition_window > 0, it defines a window in which instances of the new concept are gradually introduced among the examples from the old concept. During this transition phase, both old and new concepts appear with equal probability.

    • seed

      Type \u2192 int | None

      Default \u2192 None

      Random seed number used for reproducibility.

    "},{"location":"api/datasets/synth/FriedmanDrift/#attributes","title":"Attributes","text":"
    • desc

      Return the description from the docstring.

    "},{"location":"api/datasets/synth/FriedmanDrift/#examples","title":"Examples","text":"

    from river.datasets import synth\n\ndataset = synth.FriedmanDrift(\n    drift_type='lea',\n    position=(1, 2, 3),\n    seed=42\n)\n\nfor x, y in dataset.take(5):\n    print(list(x.values()), y)\n
    [0.63, 0.02, 0.27, 0.22, 0.73, 0.67, 0.89, 0.08, 0.42, 0.02] 7.66\n[0.02, 0.19, 0.64, 0.54, 0.22, 0.58, 0.80, 0.00, 0.80, 0.69] 8.33\n[0.34, 0.15, 0.95, 0.33, 0.09, 0.09, 0.84, 0.60, 0.80, 0.72] 7.04\n[0.37, 0.55, 0.82, 0.61, 0.86, 0.57, 0.70, 0.04, 0.22, 0.28] 18.16\n[0.07, 0.23, 0.10, 0.27, 0.63, 0.36, 0.37, 0.20, 0.26, 0.93] -2.65\n

    dataset = synth.FriedmanDrift(\n    drift_type='gra',\n    position=(2, 3),\n    seed=42\n)\n\nfor x, y in dataset.take(5):\n    print(list(x.values()), y)\n
    [0.63, 0.02, 0.27, 0.22, 0.73, 0.67, 0.89, 0.08, 0.42, 0.02] 7.66\n[0.02, 0.19, 0.64, 0.54, 0.22, 0.58, 0.80, 0.00, 0.80, 0.69] 8.33\n[0.34, 0.15, 0.95, 0.33, 0.09, 0.09, 0.84, 0.60, 0.80, 0.72] 8.96\n[0.37, 0.55, 0.82, 0.61, 0.86, 0.57, 0.70, 0.04, 0.22, 0.28] 18.16\n[0.07, 0.23, 0.10, 0.27, 0.63, 0.36, 0.37, 0.20, 0.26, 0.93] 8.90\n

    dataset = synth.FriedmanDrift(\n    drift_type='gsg',\n    position=(1, 4),\n    transition_window=2,\n    seed=42\n)\n\nfor x, y in dataset.take(5):\n    print(list(x.values()), y)\n
    [0.63, 0.02, 0.27, 0.22, 0.73, 0.67, 0.89, 0.08, 0.42, 0.02] 7.66\n[0.02, 0.19, 0.64, 0.54, 0.22, 0.58, 0.80, 0.00, 0.80, 0.69] 8.33\n[0.34, 0.15, 0.95, 0.33, 0.09, 0.09, 0.84, 0.60, 0.80, 0.72] 8.92\n[0.37, 0.55, 0.82, 0.61, 0.86, 0.57, 0.70, 0.04, 0.22, 0.28] 17.32\n[0.07, 0.23, 0.10, 0.27, 0.63, 0.36, 0.37, 0.20, 0.26, 0.93] 6.05\n

    "},{"location":"api/datasets/synth/FriedmanDrift/#methods","title":"Methods","text":"take

    Iterate over the k samples.

    Parameters

    • k \u2014 'int'

    1. Ikonomovska, E., Gama, J. and D\u017eeroski, S., 2011. Learning model trees from evolving data streams. Data mining and knowledge discovery, 23(1), pp.128-168.\u00a0\u21a9

    "},{"location":"api/datasets/synth/Hyperplane/","title":"Hyperplane","text":"

    Hyperplane stream generator.

    Generates a problem of prediction class of a rotation hyperplane. It was used as testbed for CVFDT and VFDT in 1.

    A hyperplane in d-dimensional space is the set of points \\(x\\) that satisfy

    \\[\\sum^{d}_{i=1} w_i x_i = w_0 = \\sum^{d}_{i=1} w_i\\]

    where \\(x_i\\) is the i-th coordinate of \\(x\\).

    • Examples for which \\(\\sum^{d}_{i=1} w_i x_i > w_0\\), are labeled positive.

    • Examples for which \\(\\sum^{d}_{i=1} w_i x_i \\leq w_0\\), are labeled negative.

    Hyperplanes are useful for simulating time-changing concepts because we can change the orientation and position of the hyperplane in a smooth manner by changing the relative size of the weights. We introduce change to this dataset by adding drift to each weighted feature \\(w_i = w_i + d \\sigma\\), where \\(\\sigma\\) is the probability that the direction of change is reversed and \\(d\\) is the change applied to each example.

    "},{"location":"api/datasets/synth/Hyperplane/#parameters","title":"Parameters","text":"
    • seed

      Type \u2192 int | None

      Default \u2192 None

      Random seed for reproducibility.

    • n_features

      Type \u2192 int

      Default \u2192 10

      The number of attributes to generate. Higher than 2.

    • n_drift_features

      Type \u2192 int

      Default \u2192 2

      The number of attributes with drift. Higher than 2.

    • mag_change

      Type \u2192 float

      Default \u2192 0.0

      Magnitude of the change for every example. From 0.0 to 1.0.

    • noise_percentage

      Type \u2192 float

      Default \u2192 0.05

      Percentage of noise to add to the data. From 0.0 to 1.0.

    • sigma

      Type \u2192 float

      Default \u2192 0.1

      Probability that the direction of change is reversed. From 0.0 to 1.0.

    "},{"location":"api/datasets/synth/Hyperplane/#attributes","title":"Attributes","text":"
    • desc

      Return the description from the docstring.

    "},{"location":"api/datasets/synth/Hyperplane/#examples","title":"Examples","text":"

    from river.datasets import synth\n\ndataset = synth.Hyperplane(seed=42, n_features=2)\n\nfor x, y in dataset.take(5):\n    print(x, y)\n
    {0: 0.2750, 1: 0.2232} 0\n{0: 0.0869, 1: 0.4219} 1\n{0: 0.0265, 1: 0.1988} 0\n{0: 0.5892, 1: 0.8094} 0\n{0: 0.3402, 1: 0.1554} 0\n

    "},{"location":"api/datasets/synth/Hyperplane/#methods","title":"Methods","text":"take

    Iterate over the k samples.

    Parameters

    • k \u2014 'int'

    "},{"location":"api/datasets/synth/Hyperplane/#notes","title":"Notes","text":"

    The sample generation works as follows: The features are generated with the random number generator, initialized with the seed passed by the user. Then the classification function decides, as a function of the sum of the weighted features and the sum of the weights, whether the instance belongs to class 0 or class 1. The last step is to add noise and generate drift.

    1. G. Hulten, L. Spencer, and P. Domingos. Mining time-changing data streams. In KDD'01, pages 97-106, San Francisco, CA, 2001. ACM Press.\u00a0\u21a9

    "},{"location":"api/datasets/synth/LED/","title":"LED","text":"

    LED stream generator.

    This data source originates from the CART book 1. An implementation in C was donated to the UCI 2 machine learning repository by David Aha. The goal is to predict the digit displayed on a seven-segment LED display, where each attribute has a 10% chance of being inverted. It has an optimal Bayes classification rate of 74%. The particular configuration of the generator used for experiments (LED) produces 24 binary attributes, 17 of which are irrelevant.

    "},{"location":"api/datasets/synth/LED/#parameters","title":"Parameters","text":"
    • seed

      Type \u2192 int | None

      Default \u2192 None

      Random seed for reproducibility.

    • noise_percentage

      Type \u2192 float

      Default \u2192 0.0

      The probability that noise will happen in the generation. At each new sample generated, a random number is generated, and if it is equal or less than the noise_percentage, the led value will be switched

    • irrelevant_features

      Type \u2192 bool

      Default \u2192 False

      Adds 17 non-relevant attributes to the stream.

    "},{"location":"api/datasets/synth/LED/#attributes","title":"Attributes","text":"
    • desc

      Return the description from the docstring.

    "},{"location":"api/datasets/synth/LED/#examples","title":"Examples","text":"

    from river.datasets import synth\n\ndataset = synth.LED(seed = 112, noise_percentage = 0.28, irrelevant_features= False)\n\nfor x, y in dataset.take(5):\n    print(x, y)\n
    {0: 1, 1: 0, 2: 1, 3: 0, 4: 0, 5: 1, 6: 0} 7\n{0: 1, 1: 1, 2: 1, 3: 1, 4: 1, 5: 1, 6: 0} 8\n{0: 1, 1: 1, 2: 1, 3: 1, 4: 0, 5: 1, 6: 0} 9\n{0: 0, 1: 0, 2: 1, 3: 0, 4: 0, 5: 1, 6: 0} 1\n{0: 0, 1: 1, 2: 1, 3: 0, 4: 0, 5: 0, 6: 0} 1\n

    "},{"location":"api/datasets/synth/LED/#methods","title":"Methods","text":"take

    Iterate over the k samples.

    Parameters

    • k \u2014 'int'

    "},{"location":"api/datasets/synth/LED/#notes","title":"Notes","text":"

    An instance is generated based on the parameters passed. If has_noise is set then the total number of attributes will be 24, otherwise there will be 7 attributes.

    1. Leo Breiman, Jerome Friedman, R. Olshen, and Charles J. Stone. Classification and Regression Trees. Wadsworth and Brooks, Monterey, CA,1984.\u00a0\u21a9

    2. A. Asuncion and D. J. Newman. UCI Machine Learning Repository [http://www.ics.uci.edu/~mlearn/mlrepository.html]. University of California, Irvine, School of Information and Computer Sciences,2007.\u00a0\u21a9

    "},{"location":"api/datasets/synth/LEDDrift/","title":"LEDDrift","text":"

    LED stream generator with concept drift.

    This class is an extension of the LED generator whose purpose is to add concept drift to the stream.

    "},{"location":"api/datasets/synth/LEDDrift/#parameters","title":"Parameters","text":"
    • seed

      Type \u2192 int | None

      Default \u2192 None

      Random seed for reproducibility.

    • noise_percentage

      Type \u2192 float

      Default \u2192 0.0

      The probability that noise will happen in the generation. At each new sample generated, a random number is generated, and if it is equal or less than the noise_percentage, the led value will be switched

    • irrelevant_features

      Type \u2192 bool

      Default \u2192 False

      Adds 17 non-relevant attributes to the stream.

    • n_drift_features

      Type \u2192 int

      Default \u2192 0

      The number of attributes that have drift.

    "},{"location":"api/datasets/synth/LEDDrift/#attributes","title":"Attributes","text":"
    • desc

      Return the description from the docstring.

    "},{"location":"api/datasets/synth/LEDDrift/#examples","title":"Examples","text":"

    from river.datasets import synth\n\ndataset = synth.LEDDrift(seed = 112, noise_percentage = 0.28,\n                         irrelevant_features= True, n_drift_features=4)\n\nfor x, y in dataset.take(5):\n    print(list(x.values()), y)\n
    [1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1] 7\n[1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0] 6\n[0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1] 1\n[1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1] 6\n[1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0] 7\n

    "},{"location":"api/datasets/synth/LEDDrift/#methods","title":"Methods","text":"take

    Iterate over the k samples.

    Parameters

    • k \u2014 'int'

    "},{"location":"api/datasets/synth/LEDDrift/#notes","title":"Notes","text":"

    An instance is generated based on the parameters passed. If has_noise is set then the total number of attributes will be 24, otherwise there will be 7 attributes.

    "},{"location":"api/datasets/synth/Logical/","title":"Logical","text":"

    Logical functions stream generator.

    Make a toy dataset with three labels that represent the logical functions: OR, XOR, AND (functions of the 2D input).

    Data is generated in 'tiles' which contain the complete set of logical operations results. The tiles are repeated n_tiles times. Optionally, the generated data can be shuffled.

    "},{"location":"api/datasets/synth/Logical/#parameters","title":"Parameters","text":"
    • n_tiles

      Type \u2192 int

      Default \u2192 1

      Number of tiles to generate.

    • shuffle

      Type \u2192 bool

      Default \u2192 True

      If set, generated data will be shuffled.

    • seed

      Type \u2192 int | None

      Default \u2192 None

      Random seed for reproducibility.

    "},{"location":"api/datasets/synth/Logical/#attributes","title":"Attributes","text":"
    • desc

      Return the description from the docstring.

    "},{"location":"api/datasets/synth/Logical/#examples","title":"Examples","text":"

    from river.datasets import synth\n\ndataset = synth.Logical(n_tiles=2, shuffle=True, seed=42)\n\nfor x, y in dataset.take(5):\n    print(x, y)\n
    {'A': 1, 'B': 1} {'OR': 1, 'XOR': 0, 'AND': 1}\n{'A': 0, 'B': 0} {'OR': 0, 'XOR': 0, 'AND': 0}\n{'A': 1, 'B': 0} {'OR': 1, 'XOR': 1, 'AND': 0}\n{'A': 1, 'B': 1} {'OR': 1, 'XOR': 0, 'AND': 1}\n{'A': 1, 'B': 0} {'OR': 1, 'XOR': 1, 'AND': 0}\n

    "},{"location":"api/datasets/synth/Logical/#methods","title":"Methods","text":"take

    Iterate over the k samples.

    Parameters

    • k \u2014 'int'

    "},{"location":"api/datasets/synth/Mixed/","title":"Mixed","text":"

    Mixed data stream generator.

    This generator is an implementation of a data stream with abrupt concept drift and boolean noise-free examples as described in 1.

    It has four relevant attributes, two boolean attributes \\(v, w\\) and two numeric attributes \\(x, y\\) uniformly distributed from 0 to 1. The examples are labeled depending on the classification function chosen from below.

    • function 0: if \\(v\\) and \\(w\\) are true or \\(v\\) and \\(z\\) are true or \\(w\\) and \\(z\\) are true then 0 else 1, where \\(z\\) is \\(y < 0.5 + 0.3 sin(3 \\pi x)\\)

    • function 1: The opposite of function 0.

    Concept drift can be introduced by changing the classification function. This can be done manually or using ConceptDriftStream.

    "},{"location":"api/datasets/synth/Mixed/#parameters","title":"Parameters","text":"
    • classification_function

      Type \u2192 int

      Default \u2192 0

      Which of the two classification functions to use for the generation. Valid options are 0 or 1.

    • seed

      Type \u2192 int | None

      Default \u2192 None

      Random seed for reproducibility.

    • balance_classes

      Type \u2192 bool

      Default \u2192 False

      Whether to balance classes or not. If balanced, the class distribution will converge to a uniform distribution.

    "},{"location":"api/datasets/synth/Mixed/#attributes","title":"Attributes","text":"
    • desc

      Return the description from the docstring.

    "},{"location":"api/datasets/synth/Mixed/#examples","title":"Examples","text":"

    from river.datasets import synth\ndataset = synth.Mixed(seed = 42, classification_function=1, balance_classes = True)\nfor x, y in dataset.take(5):\n    print(x, y)\n
    {0: True, 1: False, 2: 0.2750, 3: 0.2232} 1\n{0: False, 1: False, 2: 0.2186, 3: 0.5053} 0\n{0: False, 1: True, 2: 0.8094, 3: 0.0064} 1\n{0: False, 1: False, 2: 0.1010, 3: 0.2779} 0\n{0: True, 1: False, 2: 0.37018, 3: 0.2095} 1\n

    "},{"location":"api/datasets/synth/Mixed/#methods","title":"Methods","text":"generate_drift

    Generate drift by switching the classification function.

    take

    Iterate over the k samples.

    Parameters

    • k \u2014 'int'

    "},{"location":"api/datasets/synth/Mixed/#notes","title":"Notes","text":"

    The sample generation works as follows: The two numeric attributes are generated with the random generator initialized with the seed passed by the user (optional). The boolean attributes are either 0 or 1 based on the comparison of the random number generator and 0.5, the classification function decides whether to classify the instance as class 0 or class 1. The next step is to verify if the classes should be balanced, and if so, balance the classes.

    The generated sample will have 4 relevant features and 1 label (it is a binary-classification task).

    1. Gama, Joao, et al. \"Learning with drift detection.\" Advances in artificial intelligence-SBIA 2004. Springer Berlin Heidelberg, 2004. 286-295\"\u00a0\u21a9

    "},{"location":"api/datasets/synth/Mv/","title":"Mv","text":"

    Mv artificial dataset.

    Artificial dataset composed of both nominal and numeric features, whose features present co-dependencies. Originally described in 1.

    The features are generated using the following expressions:

    • \\(x_1\\): uniformly distributed over [-5, 5].

    • \\(x_2\\): uniformly distributed over [-15, -10].

    • \\(x_3\\):

      • if \\(x_1 > 0\\), \\(x_3 \\leftarrow\\) 'green'

      • else \\(x_3 \\leftarrow\\) 'red' with probability \\(0.4\\) and \\(x_3 \\leftarrow\\) 'brown' with probability \\(0.6\\).

    • \\(x_4\\):

      • if \\(x_3 =\\) 'green', \\(x_4 \\leftarrow x_1 + 2 x_2\\)

      • else \\(x_4 = \\frac{x_1}{2}\\) with probability \\(0.3\\) and \\(x_4 = \\frac{x_2}{2}\\) with probability \\(0.7\\).

    • \\(x_5\\): uniformly distributed over [-1, 1].

    • \\(x_6 \\leftarrow x_4 \\times \\epsilon\\), where \\(\\epsilon\\) is uniformly distributed

    over [0, 5].

    • \\(x_7\\): 'yes' with probability \\(0.3\\), and 'no' with probability \\(0.7\\).

    • \\(x_8\\): 'normal' if \\(x_5 < 0.5\\) else 'large'.

    • \\(x_9\\): uniformly distributed over [100, 500].

    • \\(x_{10}\\): uniformly distributed integer over the interval [1000, 1200].

    The target value is generated using the following rules:

    • if \\(x_2 > 2\\), \\(y \\leftarrow 35 - 0.5 x_4\\)

    • else if \\(-2 \\le x_4 \\le 2\\), \\(y \\leftarrow 10 - 2 x_1\\)

    • else if \\(x_7 =\\) 'yes', \\(y \\leftarrow 3 - \\frac{x_1}{x_4}\\)

    • else if \\(x_8 =\\) 'normal', \\(y \\leftarrow x_6 + x_1\\)

    • else \\(y \\leftarrow \\frac{x_1}{2}\\).

    "},{"location":"api/datasets/synth/Mv/#parameters","title":"Parameters","text":"
    • seed

      Type \u2192 int | None

      Default \u2192 None

      Random seed number used for reproducibility.

    "},{"location":"api/datasets/synth/Mv/#attributes","title":"Attributes","text":"
    • desc

      Return the description from the docstring.

    "},{"location":"api/datasets/synth/Mv/#examples","title":"Examples","text":"

    from river.datasets import synth\n\ndataset = synth.Mv(seed=42)\n\nfor x, y in dataset.take(5):\n    print(list(x.values()), y)\n
    [1.39, -14.87, 'green', -28.35, -0.44, -31.64, 'no', 'normal', 370.67, 1178.43] -30.25\n[-4.13, -12.89, 'red', -2.06, 0.01, -0.27, 'yes', 'normal', 359.95, 1108.98] 1.00\n[-2.79, -12.05, 'brown', -1.39, 0.61, -4.87, 'no', 'large', 162.19, 1191.44] 15.59\n[-1.63, -14.53, 'red', -7.26, 0.20, -29.33, 'no', 'normal', 314.49, 1194.62] -30.96\n[-1.21, -12.23, 'brown', -6.11, 0.72, -17.66, 'no', 'large', 118.32, 1045.57] -0.60\n

    "},{"location":"api/datasets/synth/Mv/#methods","title":"Methods","text":"take

    Iterate over the k samples.

    Parameters

    • k \u2014 'int'

    1. Mv in Lu\u00eds Torgo regression datasets \u21a9

    "},{"location":"api/datasets/synth/Planes2D/","title":"Planes2D","text":"

    2D Planes synthetic dataset.

    This dataset is described in 1 and was adapted from 2. The features are generated using the following probabilities:

    \\[P(x_1 = -1) = P(x_1 = 1) = \\frac{1}{2}\\] \\[P(x_m = -1) = P(x_m = 0) = P(x_m = 1) = \\frac{1}{3}, m=2,\\ldots, 10\\]

    The target value is defined by the following rule:

    \\[\\text{if}~x_1 = 1, y \\leftarrow 3 + 3x_2 + 2x_3 + x_4 + \\epsilon\\] \\[\\text{if}~x_1 = -1, y \\leftarrow -3 + 3x_5 + 2x_6 + x_7 + \\epsilon\\]

    In the expressions, \\(\\epsilon \\sim \\mathcal{N}(0, 1)\\), is the noise.

    "},{"location":"api/datasets/synth/Planes2D/#parameters","title":"Parameters","text":"
    • seed

      Type \u2192 int | None

      Default \u2192 None

      Random seed number used for reproducibility.

    "},{"location":"api/datasets/synth/Planes2D/#attributes","title":"Attributes","text":"
    • desc

      Return the description from the docstring.

    "},{"location":"api/datasets/synth/Planes2D/#examples","title":"Examples","text":"

    from river.datasets import synth\n\ndataset = synth.Planes2D(seed=42)\n\nfor x, y in dataset.take(5):\n    print(list(x.values()), y)\n
    [-1, -1, 1, 0, -1, -1, -1, 1, -1, 1] -9.07\n[1, -1, -1, -1, -1, -1, 1, 1, -1, 1] -4.25\n[-1, 1, 1, 1, 1, 0, -1, 0, 1, 0] -0.95\n[-1, 1, 0, 0, 0, -1, -1, 0, -1, -1] -6.10\n[1, -1, 0, 0, 1, 0, -1, 1, 0, 1] 1.60\n

    "},{"location":"api/datasets/synth/Planes2D/#methods","title":"Methods","text":"take

    Iterate over the k samples.

    Parameters

    • k \u2014 'int'

    1. 2DPlanes in Lu\u00eds Torgo regression datasets \u21a9

    2. Breiman, L., Friedman, J., Stone, C.J. and Olshen, R.A., 1984. Classification and regression trees. CRC press.\u00a0\u21a9

    "},{"location":"api/datasets/synth/RandomRBF/","title":"RandomRBF","text":"

    Random Radial Basis Function generator.

    Produces a radial basis function stream. A number of centroids, having a random central position, a standard deviation, a class label and weight are generated. A new sample is created by choosing one of the centroids at random, taking into account their weights, and offsetting the attributes in a random direction from the centroid's center. The offset length is drawn from a Gaussian distribution.

    This process will create a normally distributed hypersphere of samples on the surrounds of each centroid.

    "},{"location":"api/datasets/synth/RandomRBF/#parameters","title":"Parameters","text":"
    • seed_model

      Type \u2192 int | None

      Default \u2192 None

      Model's random seed to generate centroids.

    • seed_sample

      Type \u2192 int | None

      Default \u2192 None

      Sample's random seed.

    • n_classes

      Type \u2192 int

      Default \u2192 2

      The number of class labels to generate.

    • n_features

      Type \u2192 int

      Default \u2192 10

      The number of numerical features to generate.

    • n_centroids

      Type \u2192 int

      Default \u2192 50

      The number of centroids to generate.

    "},{"location":"api/datasets/synth/RandomRBF/#attributes","title":"Attributes","text":"
    • desc

      Return the description from the docstring.

    "},{"location":"api/datasets/synth/RandomRBF/#examples","title":"Examples","text":"

    from river.datasets import synth\ndataset = synth.RandomRBF(seed_model=42, seed_sample=42,\n                          n_classes=4, n_features=4, n_centroids=20)\nfor x, y in dataset.take(5):\n    print(x, y)\n
    {0: 1.0989, 1: 0.3840, 2: 0.7759, 3: 0.6592} 2\n{0: 0.2366, 1: 1.3233, 2: 0.5691, 3: 0.2083} 0\n{0: 1.3540, 1: -0.3306, 2: 0.1683, 3: 0.8865} 0\n{0: 0.2585, 1: -0.2217, 2: 0.4739, 3: 0.6522} 0\n{0: 0.1295, 1: 0.5953, 2: 0.1774, 3: 0.6673} 1\n

    "},{"location":"api/datasets/synth/RandomRBF/#methods","title":"Methods","text":"take

    Iterate over the k samples.

    Parameters

    • k \u2014 'int'

    "},{"location":"api/datasets/synth/RandomRBFDrift/","title":"RandomRBFDrift","text":"

    Random Radial Basis Function generator with concept drift.

    This class is an extension from the RandomRBF generator. Concept drift can be introduced in instances of this class.

    The drift is created by adding a \"speed\" to certain centroids. As the samples are generated each of the moving centroids' centers is changed by an amount determined by its speed.

    "},{"location":"api/datasets/synth/RandomRBFDrift/#parameters","title":"Parameters","text":"
    • seed_model

      Type \u2192 int | None

      Default \u2192 None

      Model's random seed to generate centroids.

    • seed_sample

      Type \u2192 int | None

      Default \u2192 None

      Sample's random seed.

    • n_classes

      Type \u2192 int

      Default \u2192 2

      The number of class labels to generate.

    • n_features

      Type \u2192 int

      Default \u2192 10

      The number of numerical features to generate.

    • n_centroids

      Type \u2192 int

      Default \u2192 50

      The number of centroids to generate.

    • change_speed

      Type \u2192 float

      Default \u2192 0.0

      The concept drift speed.

    • n_drift_centroids

      Type \u2192 int

      Default \u2192 50

      The number of centroids that will drift.

    "},{"location":"api/datasets/synth/RandomRBFDrift/#attributes","title":"Attributes","text":"
    • desc

      Return the description from the docstring.

    "},{"location":"api/datasets/synth/RandomRBFDrift/#examples","title":"Examples","text":"

    from river.datasets import synth\ndataset = synth.RandomRBFDrift(seed_model=42, seed_sample=42,\n                               n_classes=4, n_features=4, n_centroids=20,\n                               change_speed=0.87, n_drift_centroids=10)\nfor x, y in dataset.take(5):\n    print(x, y)\n
    {0: 1.0989, 1: 0.3840, 2: 0.7759, 3: 0.6592} 2\n{0: 1.1496, 1: 1.9014, 2: 1.5393, 3: 0.3210} 0\n{0: 0.7146, 1: -0.2414, 2: 0.8933, 3: 1.6633} 0\n{0: 0.3797, 1: -0.1027, 2: 0.8717, 3: 1.1635} 0\n{0: 0.1295, 1: 0.5953, 2: 0.1774, 3: 0.6673} 1\n

    "},{"location":"api/datasets/synth/RandomRBFDrift/#methods","title":"Methods","text":"take

    Iterate over the k samples.

    Parameters

    • k \u2014 'int'

    "},{"location":"api/datasets/synth/RandomTree/","title":"RandomTree","text":"

    Random Tree generator.

    This generator is based on 1. The generator creates a random tree by splitting features at random and setting labels at its leaves.

    The tree structure is composed of node objects, which can be either inner nodes or leaf nodes. The choice comes as a function of the parameters passed to its initializer.

    Since the concepts are generated and classified according to a tree structure, in theory, it should favor decision tree learners.

    "},{"location":"api/datasets/synth/RandomTree/#parameters","title":"Parameters","text":"
    • seed_tree

      Type \u2192 int | None

      Default \u2192 None

      Seed for random generation of tree.

    • seed_sample

      Type \u2192 int | None

      Default \u2192 None

      Seed for random generation of instances.

    • n_classes

      Type \u2192 int

      Default \u2192 2

      The number of classes to generate.

    • n_num_features

      Type \u2192 int

      Default \u2192 5

      The number of numerical features to generate.

    • n_cat_features

      Type \u2192 int

      Default \u2192 5

      The number of categorical features to generate.

    • n_categories_per_feature

      Type \u2192 int

      Default \u2192 5

      The number of values to generate per categorical feature.

    • max_tree_depth

      Type \u2192 int

      Default \u2192 5

      The maximum depth of the tree concept.

    • first_leaf_level

      Type \u2192 int

      Default \u2192 3

      The first level of the tree above max_tree_depth that can have leaves.

    • fraction_leaves_per_level

      Type \u2192 float

      Default \u2192 0.15

      The fraction of leaves per level from first_leaf_level onwards.

    "},{"location":"api/datasets/synth/RandomTree/#attributes","title":"Attributes","text":"
    • desc

      Return the description from the docstring.

    "},{"location":"api/datasets/synth/RandomTree/#examples","title":"Examples","text":"

    from river.datasets import synth\n\ndataset = synth.RandomTree(seed_tree=42, seed_sample=42, n_classes=2,\n                           n_num_features=2, n_cat_features=2,\n                           n_categories_per_feature=2, max_tree_depth=6,\n                           first_leaf_level=3, fraction_leaves_per_level=0.15)\n\nfor x, y in dataset.take(5):\n    print(x, y)\n
    {'x_num_0': 0.6394, 'x_num_1': 0.0250, 'x_cat_0': 1, 'x_cat_1': 0} 0\n{'x_num_0': 0.2232, 'x_num_1': 0.7364, 'x_cat_0': 0, 'x_cat_1': 1} 1\n{'x_num_0': 0.0317, 'x_num_1': 0.0936, 'x_cat_0': 0, 'x_cat_1': 0} 0\n{'x_num_0': 0.5612, 'x_num_1': 0.7160, 'x_cat_0': 1, 'x_cat_1': 0} 0\n{'x_num_0': 0.4492, 'x_num_1': 0.2781, 'x_cat_0': 0, 'x_cat_1': 0} 0\n

    "},{"location":"api/datasets/synth/RandomTree/#methods","title":"Methods","text":"take

    Iterate over the k samples.

    Parameters

    • k \u2014 'int'

    1. Domingos, Pedro, and Geoff Hulten. \"Mining high-speed data streams.\" In Proceedings of the sixth ACM SIGKDD international conference on Knowledge discovery and data mining, pp. 71-80. 2000.\u00a0\u21a9

    "},{"location":"api/datasets/synth/SEA/","title":"SEA","text":"

    SEA synthetic dataset.

    Implementation of the data stream with abrupt drift described in 1. Each observation is composed of 3 features. Only the first two features are relevant. The target is binary, and is positive if the sum of the features exceeds a certain threshold. There are 4 thresholds to choose from. Concept drift can be introduced by switching the threshold anytime during the stream.

    • Variant 0: True if \\(att1 + att2 > 8\\)

    • Variant 1: True if \\(att1 + att2 > 9\\)

    • Variant 2: True if \\(att1 + att2 > 7\\)

    • Variant 3: True if \\(att1 + att2 > 9.5\\)

    "},{"location":"api/datasets/synth/SEA/#parameters","title":"Parameters","text":"
    • variant

      Default \u2192 0

      Determines the classification function to use. Possible choices are 0, 1, 2, 3.

    • noise

      Default \u2192 0.0

      Determines the amount of observations for which the target sign will be flipped.

    • seed

      Type \u2192 int | None

      Default \u2192 None

      Random seed number used for reproducibility.

    "},{"location":"api/datasets/synth/SEA/#attributes","title":"Attributes","text":"
    • desc

      Return the description from the docstring.

    "},{"location":"api/datasets/synth/SEA/#examples","title":"Examples","text":"

    from river.datasets import synth\n\ndataset = synth.SEA(variant=0, seed=42)\n\nfor x, y in dataset.take(5):\n    print(x, y)\n
    {0: 6.39426, 1: 0.25010, 2: 2.75029} False\n{0: 2.23210, 1: 7.36471, 2: 6.76699} True\n{0: 8.92179, 1: 0.86938, 2: 4.21921} True\n{0: 0.29797, 1: 2.18637, 2: 5.05355} False\n{0: 0.26535, 1: 1.98837, 2: 6.49884} False\n

    "},{"location":"api/datasets/synth/SEA/#methods","title":"Methods","text":"take

    Iterate over the k samples.

    Parameters

    • k \u2014 'int'

    1. A Streaming Ensemble Algorithm (SEA) for Large-Scale Classification \u21a9

    "},{"location":"api/datasets/synth/STAGGER/","title":"STAGGER","text":"

    STAGGER concepts stream generator.

    This generator is an implementation of the dara stream with abrupt concept drift, as described in 1.

    The STAGGER concepts are boolean functions f with three features describing objects: size (small, medium and large), shape (circle, square and triangle) and colour (red, blue and green).

    f options:

    1. True if the size is small and the color is red.

    2. True if the color is green or the shape is a circle.

    3. True if the size is medium or large

    Concept drift can be introduced by changing the classification function. This can be done manually or using datasets.synth.ConceptDriftStream.

    One important feature is the possibility to balance classes, which means the class distribution will tend to a uniform one.

    "},{"location":"api/datasets/synth/STAGGER/#parameters","title":"Parameters","text":"
    • classification_function

      Type \u2192 int

      Default \u2192 0

      Classification functions to use. From 0 to 2.

    • seed

      Type \u2192 int | None

      Default \u2192 None

      Random seed for reproducibility.

    • balance_classes

      Type \u2192 bool

      Default \u2192 False

      Whether to balance classes or not. If balanced, the class distribution will converge to an uniform distribution.

    "},{"location":"api/datasets/synth/STAGGER/#attributes","title":"Attributes","text":"
    • desc

      Return the description from the docstring.

    "},{"location":"api/datasets/synth/STAGGER/#examples","title":"Examples","text":"

    from river.datasets import synth\n\ndataset = synth.STAGGER(classification_function = 2, seed = 112,\n                     balance_classes = False)\n\nfor x, y in dataset.take(5):\n    print(x, y)\n
    {'size': 1, 'color': 2, 'shape': 2} 1\n{'size': 2, 'color': 1, 'shape': 2} 1\n{'size': 1, 'color': 1, 'shape': 2} 1\n{'size': 0, 'color': 1, 'shape': 0} 0\n{'size': 2, 'color': 1, 'shape': 0} 1\n

    "},{"location":"api/datasets/synth/STAGGER/#methods","title":"Methods","text":"generate_drift

    Generate drift by switching the classification function at random.

    take

    Iterate over the k samples.

    Parameters

    • k \u2014 'int'

    "},{"location":"api/datasets/synth/STAGGER/#notes","title":"Notes","text":"

    The sample generation works as follows: The 3 attributes are generated with the random number generator. The classification function defines whether to classify the instance as class 0 or class 1. Finally, data is balanced, if this option is set by the user.

    1. Schlimmer, J. C., & Granger, R. H. (1986). Incremental learning from noisy data. Machine learning, 1(3), 317-354.\u00a0\u21a9

    "},{"location":"api/datasets/synth/Sine/","title":"Sine","text":"

    Sine generator.

    This generator is an implementation of the dara stream with abrupt concept drift, as described in Gama, Joao, et al. 1.

    It generates up to 4 relevant numerical features, that vary from 0 to 1, where only 2 of them are relevant to the classification task and the other 2 are optionally added by as noise. A classification function is chosen among four options:

    1. SINE1. Abrupt concept drift, noise-free examples. It has two relevant attributes. Each attributes has values uniformly distributed in [0, 1]. In the first context all points below the curve \\(y = sin(x)\\) are classified as positive.

    2. Reversed SINE1. The reversed classification of SINE1.

    3. SINE2. The same two relevant attributes. The classification function is \\(y < 0.5 + 0.3 sin(3 \\pi x)\\).

    4. Reversed SINE2. The reversed classification of SINE2.

    Concept drift can be introduced by changing the classification function. This can be done manually or using ConceptDriftStream.

    Two important features are the possibility to balance classes, which means the class distribution will tend to a uniform one, and the possibility to add noise, which will, add two non relevant attributes.

    "},{"location":"api/datasets/synth/Sine/#parameters","title":"Parameters","text":"
    • classification_function

      Type \u2192 int

      Default \u2192 0

      Classification functions to use. From 0 to 3.

    • seed

      Type \u2192 int | None

      Default \u2192 None

      Random seed for reproducibility.

    • balance_classes

      Type \u2192 bool

      Default \u2192 False

      Whether to balance classes or not. If balanced, the class distribution will converge to an uniform distribution.

    • has_noise

      Type \u2192 bool

      Default \u2192 False

      Adds 2 non relevant features to the stream.

    "},{"location":"api/datasets/synth/Sine/#attributes","title":"Attributes","text":"
    • desc

      Return the description from the docstring.

    "},{"location":"api/datasets/synth/Sine/#examples","title":"Examples","text":"

    from river.datasets import synth\n\ndataset = synth.Sine(classification_function = 2, seed = 112,\n                     balance_classes = False, has_noise = True)\n\nfor x, y in dataset.take(5):\n    print(x, y)\n
    {0: 0.4812, 1: 0.6660, 2: 0.6198, 3: 0.6994} 1\n{0: 0.9022, 1: 0.7518, 2: 0.1625, 3: 0.2209} 0\n{0: 0.4547, 1: 0.3901, 2: 0.9629, 3: 0.7287} 0\n{0: 0.4683, 1: 0.3515, 2: 0.2273, 3: 0.6027} 0\n{0: 0.9238, 1: 0.1673, 2: 0.4522, 3: 0.3447} 0\n

    "},{"location":"api/datasets/synth/Sine/#methods","title":"Methods","text":"generate_drift

    Generate drift by switching the classification function at random.

    take

    Iterate over the k samples.

    Parameters

    • k \u2014 'int'

    "},{"location":"api/datasets/synth/Sine/#notes","title":"Notes","text":"

    The sample generation works as follows: The two attributes are generated with the random number generator. The classification function defines whether to classify the instance as class 0 or class 1. Finally, data is balanced and noise is added, if these options are set by the user.

    The generated sample will have 2 relevant features, and an additional two noise features if has_noise is set.

    1. Gama, Joao, et al.'s 'Learning with drift detection.' Advances in artificial intelligence-SBIA 2004. Springer Berlin Heidelberg, 2004. 286-295.\"\u00a0\u21a9

    "},{"location":"api/datasets/synth/Waveform/","title":"Waveform","text":"

    Waveform stream generator.

    Generates samples with 21 numeric features and 3 classes, based on a random differentiation of some base waveforms. Supports noise addition, in this case the samples will have 40 features.

    "},{"location":"api/datasets/synth/Waveform/#parameters","title":"Parameters","text":"
    • seed

      Type \u2192 int | None

      Default \u2192 None

      Random seed for reproducibility.

    • has_noise

      Type \u2192 bool

      Default \u2192 False

      Adds 19 unrelated features to the stream.

    "},{"location":"api/datasets/synth/Waveform/#attributes","title":"Attributes","text":"
    • desc

      Return the description from the docstring.

    "},{"location":"api/datasets/synth/Waveform/#examples","title":"Examples","text":"

    from river.datasets import synth\n\ndataset = synth.Waveform(seed=42, has_noise=True)\n\nfor x, y in dataset:\n    break\n\nx\n
    {0: -0.0397, 1: -0.7484, 2: 0.2974, 3: 0.3574, 4: -0.0735, 5: -0.3647, 6: 1.5631,     7: 2.5291, 8: 4.1599, 9: 4.9587, 10: 4.52587, 11: 4.0097, 12: 3.6705, 13: 1.7033,     14: 1.4898, 15: 1.9743, 16: 0.0898, 17: 2.319, 18: 0.2552, 19: -0.4775, 20: -0.71339,     21: 0.3770, 22: 0.3671, 23: 1.6579, 24: 0.7828, 25: 0.5855, 26: -0.5807, 27: 0.7112,     28: -0.0271, 29: 0.2968, 30: -0.4997, 31: 0.1302, 32: 0.3578, 33: -0.1900, 34: -0.3771,     35: 1.3560, 36: 0.7124, 37: -0.6245, 38: 0.1346, 39: 0.3550}\n

    y\n
    2\n

    "},{"location":"api/datasets/synth/Waveform/#methods","title":"Methods","text":"take

    Iterate over the k samples.

    Parameters

    • k \u2014 'int'

    "},{"location":"api/datasets/synth/Waveform/#notes","title":"Notes","text":"

    An instance is generated based on the parameters passed. The generator will randomly choose one of the hard coded waveforms, as well as random multipliers. For each feature, the actual value generated will be a a combination of the hard coded functions, with the multipliers and a random value.

    If noise is added then the features 21 to 40 will be replaced with a random normal value.

    "},{"location":"api/drift/ADWIN/","title":"ADWIN","text":"

    Adaptive Windowing method for concept drift detection.

    ADWIN (ADaptive WINdowing) is a popular drift detection method with mathematical guarantees. ADWIN efficiently keeps a variable-length window of recent items; such that it holds that there has no been change in the data distribution. This window is further divided into two sub-windows \\((W_0, W_1)\\) used to determine if a change has happened. ADWIN compares the average of \\(W_0\\) and \\(W_1\\) to confirm that they correspond to the same distribution. Concept drift is detected if the distribution equality no longer holds. Upon detecting a drift, \\(W_0\\) is replaced by \\(W_1\\) and a new \\(W_1\\) is initialized. ADWIN uses a significance value \\(\\delta=\\in(0,1)\\) to determine if the two sub-windows correspond to the same distribution.

    "},{"location":"api/drift/ADWIN/#parameters","title":"Parameters","text":"
    • delta

      Default \u2192 0.002

      Significance value.

    • clock

      Default \u2192 32

      How often ADWIN should check for change. 1 means every new data point, default is 32. Higher values speed up processing, but may also lead to increased delay in change detection.

    • max_buckets

      Default \u2192 5

      The maximum number of buckets of each size that ADWIN should keep before merging buckets (default is 5).

    • min_window_length

      Default \u2192 5

      The minimum length of each subwindow (default is 5). Lower values may decrease delay in change detection but may also lead to more false positives.

    • grace_period

      Default \u2192 10

      ADWIN does not perform any change detection until at least this many data points have arrived (default is 10).

    "},{"location":"api/drift/ADWIN/#attributes","title":"Attributes","text":"
    • drift_detected

      Whether or not a drift is detected following the last update.

    • estimation

      Estimate of mean value in the window.

    • n_detections

    • total

    • variance

    • width

      Window size

    "},{"location":"api/drift/ADWIN/#examples","title":"Examples","text":"

    import random\nfrom river import drift\n\nrng = random.Random(12345)\nadwin = drift.ADWIN()\n\ndata_stream = rng.choices([0, 1], k=1000) + rng.choices(range(4, 8), k=1000)\n\nfor i, val in enumerate(data_stream):\n    _ = adwin.update(val)\n    if adwin.drift_detected:\n        print(f\"Change detected at index {i}, input value: {val}\")\n
    Change detected at index 1023, input value: 4\n

    "},{"location":"api/drift/ADWIN/#methods","title":"Methods","text":"update

    Update the change detector with a single data point.

    Apart from adding the element value to the window, by inserting it in the correct bucket, it will also update the relevant statistics, in this case the total sum of all values, the window width and the total variance.

    Parameters

    • x \u2014 'numbers.Number'

    Returns

    DriftDetector: self

    1. Albert Bifet and Ricard Gavalda. \"Learning from time-changing data with adaptive windowing.\" In Proceedings of the 2007 SIAM international conference on data mining, pp. 443-448. Society for Industrial and Applied Mathematics, 2007.\u00a0\u21a9

    "},{"location":"api/drift/DriftRetrainingClassifier/","title":"DriftRetrainingClassifier","text":"

    Drift retraining classifier.

    This classifier is a wrapper for any classifier. It monitors the incoming data for concept drifts and warnings in the model's accurary. In case a warning is detected, a background model starts to train. If a drift is detected, the model will be replaced by the background model, and the background model will be reset.

    "},{"location":"api/drift/DriftRetrainingClassifier/#parameters","title":"Parameters","text":"
    • model

      Type \u2192 base.Classifier

      The classifier and background classifier class.

    • drift_detector

      Type \u2192 base.DriftAndWarningDetector | base.BinaryDriftAndWarningDetector | None

      Default \u2192 None

      Algorithm to track warnings and concept drifts. Attention! If the parameter train_in_background is True, the drift_detector must have a warning tracker.

    • train_in_background

      Type \u2192 bool

      Default \u2192 True

      Parameter to determine if a background model will be used.

    "},{"location":"api/drift/DriftRetrainingClassifier/#examples","title":"Examples","text":"

    from river import datasets\nfrom river import evaluate\nfrom river import drift\nfrom river import metrics\nfrom river import tree\n\ndataset = datasets.Elec2().take(3000)\n\nmodel = drift.DriftRetrainingClassifier(\n    model=tree.HoeffdingTreeClassifier(),\n    drift_detector=drift.binary.DDM()\n)\n\nmetric = metrics.Accuracy()\n\nevaluate.progressive_val_score(dataset, model, metric)\n
    Accuracy: 86.46%\n

    "},{"location":"api/drift/DriftRetrainingClassifier/#methods","title":"Methods","text":"learn_one

    Update the model with a set of features x and a label y.

    Parameters

    • x
    • y
    • kwargs

    Returns

    self

    predict_one

    Predict the label of a set of features x.

    Parameters

    • x \u2014 'dict'
    • kwargs

    Returns

    base.typing.ClfTarget | None: The predicted label.

    predict_proba_one

    Predict the probability of each label for a dictionary of features x.

    Parameters

    • x
    • kwargs

    Returns

    A dictionary that associates a probability which each label.

    "},{"location":"api/drift/DummyDriftDetector/","title":"DummyDriftDetector","text":"

    Baseline drift detector that generates pseudo drift detection signals.

    There are two approaches1:

    • fixed where the drift signal is generated every t_0 samples.

    • random corresponds to a pseudo-random drift detection strategy.

    "},{"location":"api/drift/DummyDriftDetector/#parameters","title":"Parameters","text":"
    • trigger_method

      Type \u2192 str

      Default \u2192 fixed

      The trigger method to use. * fixed * random

    • t_0

      Type \u2192 int

      Default \u2192 300

      Reference point to define triggers.

    • w

      Type \u2192 int

      Default \u2192 0

      Auxiliary parameter whose purpose is twofold: - if trigger_method=\"fixed\", the periodic drift signals will only start after an initial warm-up period randomly defined between [0, w]. Useful to avoid that all ensemble members are reset at the same time when periodic triggers are used as the adaptation strategy. - if trigger_method=\"random\", w defines the probability bounds of triggering a drift. The chance of triggering a drift is \\(0.5\\) after observing t_0 instances and becomes \\(1\\) after monitoring t_0 + w / 2 instances. A sigmoid function is used to produce values between [0, 1] that are used as the reset probabilities.

    • dynamic_cloning

      Type \u2192 bool

      Default \u2192 False

      Whether to change the seed and w values each time clone() is called.

    • seed

      Type \u2192 int | None

      Default \u2192 None

      Random seed for reproducibility.

    "},{"location":"api/drift/DummyDriftDetector/#attributes","title":"Attributes","text":"
    • drift_detected

      Whether or not a drift is detected following the last update.

    "},{"location":"api/drift/DummyDriftDetector/#examples","title":"Examples","text":"
    import random\nfrom river import drift\n\nrng = random.Random(42)\n

    The observed values will not affect the periodic triggers.

    data = [rng.gauss(0, 1) for _ in range(1000)]\n

    Let's start with the fixed drift signals:

    ptrigger = DummyDriftDetector(t_0=500, seed=42)\nfor i, v in enumerate(data):\n    _ = ptrigger.update(v)\n    if ptrigger.drift_detected:\n        print(f\"Drift detected at instance {i}.\")\n
    Drift detected at instance 499.\nDrift detected at instance 999.\n

    Now, the random drift signals:

    rtrigger = DummyDriftDetector(\n    trigger_method=\"random\",\n    t_0=500,\n    w=100,\n    dynamic_cloning=True,\n    seed=42\n)\nfor i, v in enumerate(data):\n    _ = rtrigger.update(v)\n    if rtrigger.drift_detected:\n        print(f\"Drift detected at instance {i}.\")\n
    Drift detected at instance 368.\nDrift detected at instance 817.\n

    Remember to set a w > 0 value if random triggers are used:

    try:\n    DummyDriftDetector(trigger_method=\"random\")\nexcept ValueError as ve:\n    print(ve)\n
    The 'w' value must be greater than zero when 'trigger_method' is 'random'.\n

    Since we set dynamic_cloning to True, a clone of the periodic trigger will have its internal paramenters changed:

    rtrigger = rtrigger.clone()\nfor i, v in enumerate(data):\n    _ = rtrigger.update(v)\n    if rtrigger.drift_detected:\n        print(f\"Drift detected at instance {i}.\")\n
    Drift detected at instance 429.\nDrift detected at instance 728.\n

    "},{"location":"api/drift/DummyDriftDetector/#methods","title":"Methods","text":"update

    Update the detector with a single data point.

    Parameters

    • x \u2014 'numbers.Number'

    Returns

    DriftDetector: self

    "},{"location":"api/drift/DummyDriftDetector/#notes","title":"Notes","text":"

    When used in ensembles, a naive implementation of periodic drift signals would make all ensemble members reset at the same time. To avoid that, the dynamic_cloning parameter can be set to True. In this case, every time the clone method of this detector is called in an ensemble a new seed is defined. If dynamic_cloning=True and trigger_method=\"fixed\", a new w between [0, t_0] will also be created for the new cloned instance.

    1. Heitor Gomes, Jacob Montiel, Saulo Martiello Mastelini, Bernhard Pfahringer, and Albert Bifet. On Ensemble Techniques for Data Stream Regression. IJCNN'20. International Joint Conference on Neural Networks. 2020.\u00a0\u21a9

    "},{"location":"api/drift/KSWIN/","title":"KSWIN","text":"

    Kolmogorov-Smirnov Windowing method for concept drift detection.

    "},{"location":"api/drift/KSWIN/#parameters","title":"Parameters","text":"
    • alpha

      Type \u2192 float

      Default \u2192 0.005

      Probability for the test statistic of the Kolmogorov-Smirnov-Test. The alpha parameter is very sensitive, therefore should be set below 0.01.

    • window_size

      Type \u2192 int

      Default \u2192 100

      Size of the sliding window.

    • stat_size

      Type \u2192 int

      Default \u2192 30

      Size of the statistic window.

    • seed

      Type \u2192 int | None

      Default \u2192 None

      Random seed for reproducibility.

    • window

      Type \u2192 typing.Iterable | None

      Default \u2192 None

      Already collected data to avoid cold start.

    "},{"location":"api/drift/KSWIN/#attributes","title":"Attributes","text":"
    • drift_detected

      Whether or not a drift is detected following the last update.

    "},{"location":"api/drift/KSWIN/#examples","title":"Examples","text":"

    import random\nfrom river import drift\n\nrng = random.Random(12345)\nkswin = drift.KSWIN(alpha=0.0001, seed=42)\n\ndata_stream = rng.choices([0, 1], k=1000) + rng.choices(range(4, 8), k=1000)\n\nfor i, val in enumerate(data_stream):\n    _ = kswin.update(val)\n    if kswin.drift_detected:\n        print(f\"Change detected at index {i}, input value: {val}\")\n
    Change detected at index 1016, input value: 6\n

    "},{"location":"api/drift/KSWIN/#methods","title":"Methods","text":"update

    Update the change detector with a single data point.

    Adds an element on top of the sliding window and removes the oldest one from the window. Afterwards, the KS-test is performed.

    Parameters

    • x \u2014 'numbers.Number'

    Returns

    DriftDetector: self

    "},{"location":"api/drift/KSWIN/#notes","title":"Notes","text":"

    KSWIN (Kolmogorov-Smirnov Windowing) is a concept change detection method based on the Kolmogorov-Smirnov (KS) statistical test. KS-test is a statistical test with no assumption of underlying data distribution. KSWIN can monitor data or performance distributions. Note that the detector accepts one dimensional input as array.

    KSWIN maintains a sliding window \\(\\Psi\\) of fixed size \\(n\\) (window_size). The last \\(r\\) (stat_size) samples of \\(\\Psi\\) are assumed to represent the last concept considered as \\(R\\). From the first \\(n-r\\) samples of \\(\\Psi\\), \\(r\\) samples are uniformly drawn, representing an approximated last concept \\(W\\).

    The KS-test is performed on the windows \\(R\\) and \\(W\\) of the same size. KS -test compares the distance of the empirical cumulative data distribution \\(dist(R,W)\\).

    A concept drift is detected by KSWIN if:

    \\[ dist(R,W) > \\sqrt{-\\frac{ln\\alpha}{r}} \\]

    The difference in empirical data distributions between the windows \\(R\\) and \\(W\\) is too large since \\(R\\) and \\(W\\) come from the same distribution.

    1. Christoph Raab, Moritz Heusinger, Frank-Michael Schleif, Reactive Soft Prototype Computing for Concept Drift Streams, Neurocomputing, 2020,\u00a0\u21a9

    "},{"location":"api/drift/PageHinkley/","title":"PageHinkley","text":"

    Page-Hinkley method for concept drift detection.

    This change detection method works by computing the observed values and their mean up to the current moment. Page-Hinkley does not signal warning zones, only change detections.

    This detector implements the CUSUM control chart for detecting changes. This implementation also supports the two-sided Page-Hinkley test to detect increasing and decreasing changes in the mean of the input values.

    "},{"location":"api/drift/PageHinkley/#parameters","title":"Parameters","text":"
    • min_instances

      Type \u2192 int

      Default \u2192 30

      The minimum number of instances before detecting change.

    • delta

      Type \u2192 float

      Default \u2192 0.005

      The delta factor for the Page-Hinkley test.

    • threshold

      Type \u2192 float

      Default \u2192 50.0

      The change detection threshold (lambda).

    • alpha

      Type \u2192 float

      Default \u2192 0.9999

      The forgetting factor, used to weight the observed value and the mean.

    • mode

      Type \u2192 str

      Default \u2192 both

      Whether to consider increases (\"up\"), decreases (\"down\") or both (\"both\") when monitoring the fading mean.

    "},{"location":"api/drift/PageHinkley/#attributes","title":"Attributes","text":"
    • drift_detected

      Whether or not a drift is detected following the last update.

    "},{"location":"api/drift/PageHinkley/#examples","title":"Examples","text":"

    import random\nfrom river import drift\n\nrng = random.Random(12345)\nph = drift.PageHinkley()\n\ndata_stream = rng.choices([0, 1], k=1000) + rng.choices(range(4, 8), k=1000)\n\nfor i, val in enumerate(data_stream):\n    _ = ph.update(val)\n    if ph.drift_detected:\n        print(f\"Change detected at index {i}, input value: {val}\")\n
    Change detected at index 1006, input value: 5\n

    "},{"location":"api/drift/PageHinkley/#methods","title":"Methods","text":"update

    Update the detector with a single data point.

    Parameters

    • x \u2014 'numbers.Number'

    Returns

    DriftDetector: self

    1. E. S. Page. 1954. Continuous Inspection Schemes. Biometrika 41, 1/2 (1954), 100-115.\u00a0\u21a9

    2. Sebasti\u00e3o, R., & Fernandes, J. M. (2017, June). Supporting the Page-Hinkley test with empirical mode decomposition for change detection. In International Symposium on Methodologies for Intelligent Systems (pp. 492-498). Springer, Cham.\u00a0\u21a9

    "},{"location":"api/drift/binary/DDM/","title":"DDM","text":"

    Drift Detection Method.

    DDM (Drift Detection Method) is a concept change detection method based on the PAC learning model premise, that the learner's error rate will decrease as the number of analysed samples increase, as long as the data distribution is stationary.

    If the algorithm detects an increase in the error rate, that surpasses a calculated threshold, either change is detected or the algorithm will warn the user that change may occur in the near future, which is called the warning zone.

    The detection threshold is calculated in function of two statistics, obtained when \\((p_i + s_i)\\) is minimum:

    • \\(p_{min}\\): The minimum recorded error rate.

    • \\(s_{min}\\): The minimum recorded standard deviation.

    At instant \\(i\\), the detection algorithm uses:

    • \\(p_i\\): The error rate at instant \\(i\\).

    • \\(s_i\\): The standard deviation at instant \\(i\\).

    The conditions for entering the warning zone and detecting change are as follows [see implementation note below]:

    • if \\(p_i + s_i \\geq p_{min} + w_l * s_{min}\\) -> Warning zone

    • if \\(p_i + s_i \\geq p_{min} + d_l * s_{min}\\) -> Change detected

    In the above expressions, \\(w_l\\) and \\(d_l\\) represent, respectively, the warning and drift thresholds.

    Input: x is an entry in a stream of bits, where 1 indicates error/failure and 0 represents correct/normal values.

    For example, if a classifier's prediction \\(y'\\) is right or wrong w.r.t. the true target label \\(y\\):

    • 0: Correct, \\(y=y'\\)

    • 1: Error, \\(y \\neq y'\\)

    "},{"location":"api/drift/binary/DDM/#parameters","title":"Parameters","text":"
    • warm_start

      Type \u2192 int

      Default \u2192 30

      The minimum required number of analyzed samples so change can be detected. Warm start parameter for the drift detector.

    • warning_threshold

      Type \u2192 float

      Default \u2192 2.0

      Threshold to decide if the detector is in a warning zone. The default value gives 95\\% of confidence level to the warning assessment.

    • drift_threshold

      Type \u2192 float

      Default \u2192 3.0

      Threshold to decide if a drift was detected. The default value gives a 99\\% of confidence level to the drift assessment.

    "},{"location":"api/drift/binary/DDM/#attributes","title":"Attributes","text":"
    • drift_detected

      Whether or not a drift is detected following the last update.

    • warning_detected

      Whether or not a drift is detected following the last update.

    "},{"location":"api/drift/binary/DDM/#examples","title":"Examples","text":"

    import random\nfrom river import drift\n\nrng = random.Random(42)\nddm = drift.binary.DDM()\n\ndata_stream = rng.choices([0, 1], k=1000)\ndata_stream = data_stream + rng.choices([0, 1], k=1000, weights=[0.3, 0.7])\n\nprint_warning = True\nfor i, x in enumerate(data_stream):\n    _ = ddm.update(x)\n    if ddm.warning_detected and print_warning:\n        print(f\"Warning detected at index {i}\")\n        print_warning = False\n    if ddm.drift_detected:\n        print(f\"Change detected at index {i}\")\n        print_warning = True\n
    Warning detected at index 1084\nChange detected at index 1334\nWarning detected at index 1492\n

    "},{"location":"api/drift/binary/DDM/#methods","title":"Methods","text":"update

    Update the detector with a single boolean input.

    Parameters

    • x \u2014 'bool'

    Returns

    BinaryDriftDetector: self

    1. Jo\u00e3o Gama, Pedro Medas, Gladys Castillo, Pedro Pereira Rodrigues: Learning with Drift Detection. SBIA 2004: 286-295\u00a0\u21a9

    "},{"location":"api/drift/binary/EDDM/","title":"EDDM","text":"

    Early Drift Detection Method.

    EDDM (Early Drift Detection Method) aims to improve the detection rate of gradual concept drift in DDM, while keeping a good performance against abrupt concept drift.

    This method works by keeping track of the average distance between two errors instead of only the error rate. For this, it is necessary to keep track of the running average distance and the running standard deviation, as well as the maximum distance and the maximum standard deviation.

    The algorithm works similarly to the DDM algorithm, by keeping track of statistics only. It works with the running average distance (\\(p_i'\\)) and the running standard deviation (\\(s_i'\\)), as well as \\(p'_{max}\\) and \\(s'_{max}\\), which are the values of \\(p_i'\\) and \\(s_i'\\) when \\((p_i' + 2 * s_i')\\) reaches its maximum.

    Like DDM, there are two threshold values that define the borderline between no change, warning zone, and drift detected. These are as follows:

    • if \\((p_i' + 2 * s_i') / (p'_{max} + 2 * s'_{max}) < \\alpha\\) -> Warning zone

    • if \\((p_i' + 2 * s_i') / (p'_{max} + 2 * s'_{max}) < \\beta\\) -> Change detected

    \\(\\alpha\\) and \\(\\beta\\) are set to 0.95 and 0.9, respectively.

    Input: x is an entry in a stream of bits, where 1 indicates error/failure and 0 represents correct/normal values.

    For example, if a classifier's prediction \\(y'\\) is right or wrong w.r.t. the true target label \\(y\\):

    • 0: Correct, \\(y=y'\\)

    • 1: Error, \\(y \\\\neq y'\\)

    "},{"location":"api/drift/binary/EDDM/#parameters","title":"Parameters","text":"
    • warm_start

      Type \u2192 int

      Default \u2192 30

      The minimum required number of monitored errors/failures so change can be detected. Warm start parameter for the drift detector.

    • alpha

      Type \u2192 float

      Default \u2192 0.95

      Threshold for triggering a warning. Must be between 0 and 1. The smaller the value, the more conservative the detector becomes.

    • beta

      Type \u2192 float

      Default \u2192 0.9

      Threshold for triggering a drift. Must be between 0 and 1. The smaller the value, the more conservative the detector becomes.

    "},{"location":"api/drift/binary/EDDM/#attributes","title":"Attributes","text":"
    • drift_detected

      Whether or not a drift is detected following the last update.

    • warning_detected

      Whether or not a drift is detected following the last update.

    "},{"location":"api/drift/binary/EDDM/#examples","title":"Examples","text":"

    import random\nfrom river import drift\n\nrng = random.Random(42)\neddm = drift.binary.EDDM(alpha=0.8, beta=0.75)\n\ndata_stream = rng.choices([0, 1], k=1000)\ndata_stream = data_stream + rng.choices([0, 1], k=1000, weights=[0.3, 0.7])\n\nprint_warning = True\nfor i, x in enumerate(data_stream):\n    _ = eddm.update(x)\n    if eddm.warning_detected and print_warning:\n        print(f\"Warning detected at index {i}\")\n        print_warning = False\n    if eddm.drift_detected:\n        print(f\"Change detected at index {i}\")\n        print_warning = True\n
    Warning detected at index 1059\nChange detected at index 1278\n

    "},{"location":"api/drift/binary/EDDM/#methods","title":"Methods","text":"update

    Update the change detector with a single data point.

    Parameters

    • x \u2014 'bool'

    Returns

    BinaryDriftDetector: self

    1. Early Drift Detection Method. Manuel Baena-Garcia, Jose Del Campo-Avila, Ra\u00fal Fidalgo, Albert Bifet, Ricard Gavalda, Rafael Morales-Bueno. In Fourth International Workshop on Knowledge Discovery from Data Streams, 2006.\u00a0\u21a9

    "},{"location":"api/drift/binary/HDDM-A/","title":"HDDM_A","text":"

    Drift Detection Method based on Hoeffding's bounds with moving average-test.

    HDDM_A is a drift detection method based on the Hoeffding's inequality which uses the input average as estimator.

    Input: x is an entry in a stream of bits, where 1 indicates error/failure and 0 represents correct/normal values.

    For example, if a classifier's prediction \\(y'\\) is right or wrong w.r.t. the true target label \\(y\\):

    • 0: Correct, \\(y=y'\\)

    • 1: Error, \\(y \\neq y'\\)

    Implementation based on MOA.

    "},{"location":"api/drift/binary/HDDM-A/#parameters","title":"Parameters","text":"
    • drift_confidence

      Default \u2192 0.001

      Confidence to the drift

    • warning_confidence

      Default \u2192 0.005

      Confidence to the warning

    • two_sided_test

      Default \u2192 False

      If True, will monitor error increments and decrements (two-sided). By default will only monitor increments (one-sided).

    "},{"location":"api/drift/binary/HDDM-A/#attributes","title":"Attributes","text":"
    • drift_detected

      Whether or not a drift is detected following the last update.

    • warning_detected

      Whether or not a drift is detected following the last update.

    "},{"location":"api/drift/binary/HDDM-A/#examples","title":"Examples","text":"

    import random\nfrom river import drift\n\nrng = random.Random(42)\nhddm_a = drift.binary.HDDM_A()\n\ndata_stream = rng.choices([0, 1], k=1000)\ndata_stream = data_stream + rng.choices([0, 1], k=1000, weights=[0.3, 0.7])\n\nprint_warning = True\nfor i, x in enumerate(data_stream):\n    _ = hddm_a.update(x)\n    if hddm_a.warning_detected and print_warning:\n        print(f\"Warning detected at index {i}\")\n        print_warning = False\n    if hddm_a.drift_detected:\n        print(f\"Change detected at index {i}\")\n        print_warning = True\n
    Warning detected at index 451\nChange detected at index 1206\n

    "},{"location":"api/drift/binary/HDDM-A/#methods","title":"Methods","text":"update

    Update the change detector with a single data point.

    Parameters

    • x \u2014 'bool'

    Returns

    BinaryDriftDetector: self

    1. Fr\u00edas-Blanco I, del Campo-\u00c1vila J, Ramos-Jimenez G, et al. Online and non-parametric drift detection methods based on Hoeffding's bounds. IEEE Transactions on Knowledge and Data Engineering, 2014, 27(3): 810-823.\u00a0\u21a9

    2. Albert Bifet, Geoff Holmes, Richard Kirkby, Bernhard Pfahringer. MOA: Massive Online Analysis; Journal of Machine Learning Research 11: 1601-1604, 2010.\u00a0\u21a9

    "},{"location":"api/drift/binary/HDDM-W/","title":"HDDM_W","text":"

    Drift Detection Method based on Hoeffding's bounds with moving weighted average-test.

    HDDM_W is an online drift detection method based on McDiarmid's bounds. HDDM_W uses the Exponentially Weighted Moving Average (EWMA) statistic as estimator.

    Input: x is an entry in a stream of bits, where 1 indicates error/failure and 0 represents correct/normal values.

    For example, if a classifier's prediction \\(y'\\) is right or wrong w.r.t. the true target label \\(y\\):

    • 0: Correct, \\(y=y'\\)

    • 1: Error, \\(y \\neq y'\\)

    Implementation based on MOA.

    "},{"location":"api/drift/binary/HDDM-W/#parameters","title":"Parameters","text":"
    • drift_confidence

      Default \u2192 0.001

      Confidence to the drift

    • warning_confidence

      Default \u2192 0.005

      Confidence to the warning

    • lambda_val

      Default \u2192 0.05

      The weight given to recent data. Smaller values mean less weight given to recent data.

    • two_sided_test

      Default \u2192 False

      If True, will monitor error increments and decrements (two-sided). By default will only monitor increments (one-sided).

    "},{"location":"api/drift/binary/HDDM-W/#attributes","title":"Attributes","text":"
    • drift_detected

      Whether or not a drift is detected following the last update.

    • warning_detected

      Whether or not a drift is detected following the last update.

    "},{"location":"api/drift/binary/HDDM-W/#examples","title":"Examples","text":"

    import random\nfrom river import drift\n\nrng = random.Random(42)\nhddm_w = drift.binary.HDDM_W()\n\ndata_stream = rng.choices([0, 1], k=1000)\ndata_stream = data_stream + rng.choices([0, 1], k=1000, weights=[0.3, 0.7])\n\nprint_warning = True\nfor i, x in enumerate(data_stream):\n    _ = hddm_w.update(x)\n    if hddm_w.warning_detected and print_warning:\n        print(f\"Warning detected at index {i}\")\n        print_warning = False\n    if hddm_w.drift_detected:\n        print(f\"Change detected at index {i}\")\n        print_warning = True\n
    Warning detected at index 451\nChange detected at index 1077\n

    "},{"location":"api/drift/binary/HDDM-W/#methods","title":"Methods","text":"update

    Update the change detector with a single data point.

    Parameters

    • x \u2014 'bool'

    Returns

    BinaryDriftDetector: self

    1. Fr\u00edas-Blanco I, del Campo-\u00c1vila J, Ramos-Jimenez G, et al. Online and non-parametric drift detection methods based on Hoeffding\u2019s bounds. IEEE Transactions on Knowledge and Data Engineering, 2014, 27(3): 810-823.\u00a0\u21a9

    2. Albert Bifet, Geoff Holmes, Richard Kirkby, Bernhard Pfahringer. MOA: Massive Online Analysis; Journal of Machine Learning Research 11: 1601-1604, 2010.\u00a0\u21a9

    "},{"location":"api/drift/datasets/AirlinePassengers/","title":"AirlinePassengers","text":"

    JFK Airline Passengers

    This dataset gives the number of passengers arriving and departing at JFK. The data is obtained from New York State's official Kaggle page for this dataset.

    "},{"location":"api/drift/datasets/AirlinePassengers/#attributes","title":"Attributes","text":"
    • desc

      Return the description from the docstring.

    • path

    "},{"location":"api/drift/datasets/AirlinePassengers/#methods","title":"Methods","text":"take

    Iterate over the k samples.

    Parameters

    • k \u2014 'int'

    1. https://www.kaggle.com/new-york-state/nys-air-passenger-traffic,-port-authority-of-ny-nj#air-passenger-traffic-per-month-port-authority-of-ny-nj-beginning-1977.csv\u00a0\u21a9

    "},{"location":"api/drift/datasets/Apple/","title":"Apple","text":"

    Apple Stock

    This dataset concerns the daily close price and volume of Apple stock around the year 2000. The dataset is sampled every 3 observations to reduce the length of the time series. This dataset is retrieved from Yahoo Finance.

    "},{"location":"api/drift/datasets/Apple/#attributes","title":"Attributes","text":"
    • desc

      Return the description from the docstring.

    • path

    "},{"location":"api/drift/datasets/Apple/#methods","title":"Methods","text":"take

    Iterate over the k samples.

    Parameters

    • k \u2014 'int'

    1. https://finance.yahoo.com/quote/AAPL/history?period1=850348800&period2=1084579200&interval=1d&filter=history&frequency=1d\u00a0\u21a9

    "},{"location":"api/drift/datasets/Bitcoin/","title":"Bitcoin","text":"

    Bitcoin Market Price

    This is a regression task, where the goal is to predict the average USD market price across major bitcoin exchanges. This data was collected from the official Blockchain website. There is only one feature given, the day of exchange, which is in increments of three. The first 500 lines have been removed because they are not interesting.

    "},{"location":"api/drift/datasets/Bitcoin/#attributes","title":"Attributes","text":"
    • desc

      Return the description from the docstring.

    • path

    "},{"location":"api/drift/datasets/Bitcoin/#methods","title":"Methods","text":"take

    Iterate over the k samples.

    Parameters

    • k \u2014 'int'

    1. https://www.blockchain.com/fr/explorer/charts/market-price?timespan=all\u00a0\u21a9

    "},{"location":"api/drift/datasets/BrentSpotPrice/","title":"BrentSpotPrice","text":"

    Brent Spot Price

    This is the USD price for Brent Crude oil, measured daily. We include the time series from 2000 onwards. The data is sampled at every 10 original observations to reduce the length of the series.

    The data is obtained from the U.S. Energy Information Administration. Since the data is in the public domain, we distribute it as part of this repository.

    Since the original data has observations only on trading days, there are arguably gaps in this time series (on non-trading days). However we consider these to be consecutive, and thus also consider the sampled time series to have consecutive observations.

    "},{"location":"api/drift/datasets/BrentSpotPrice/#attributes","title":"Attributes","text":"
    • desc

      Return the description from the docstring.

    • path

    "},{"location":"api/drift/datasets/BrentSpotPrice/#methods","title":"Methods","text":"take

    Iterate over the k samples.

    Parameters

    • k \u2014 'int'

    1. U.S. Energy Information Administration (Sep. 2019)\u00a0\u21a9

    2. https://www.eia.gov/opendata/v1/qb.php?sdid=PET.RBRTE.D\u00a0\u21a9

    "},{"location":"api/drift/datasets/Occupancy/","title":"Occupancy","text":"

    Room occupancy data.

    Dataset on detecting room occupancy based on several variables. The dataset contains temperature, humidity, light, and CO2 variables.

    The data is sampled at every 16 observations to reduce the length of the series.

    "},{"location":"api/drift/datasets/Occupancy/#attributes","title":"Attributes","text":"
    • desc

      Return the description from the docstring.

    • path

    "},{"location":"api/drift/datasets/Occupancy/#methods","title":"Methods","text":"take

    Iterate over the k samples.

    Parameters

    • k \u2014 'int'

    Candanedo, Luis M., and V\u00e9ronique Feldheim. \"Accurate occupancy detection of an office room from light, temperature, humidity and CO2 measurements using statistical learning models.\" Energy and Buildings 112 (2016): 28-39.

    "},{"location":"api/drift/datasets/RunLog/","title":"RunLog","text":"

    Interval Training Running Pace.

    This dataset shows the pace of a runner during an interval training session, where a mobile application provides instructions on when to run and when to walk.

    "},{"location":"api/drift/datasets/RunLog/#attributes","title":"Attributes","text":"
    • desc

      Return the description from the docstring.

    • path

    "},{"location":"api/drift/datasets/RunLog/#methods","title":"Methods","text":"take

    Iterate over the k samples.

    Parameters

    • k \u2014 'int'

    "},{"location":"api/drift/datasets/UKCoalEmploy/","title":"UKCoalEmploy","text":"

    Historic Employment in UK Coal Mines

    This is historic data obtained from the UK government. We use the employment column for the number of workers employed in the British coal mines Missing values in the data are replaced with the value of the preceding year.

    "},{"location":"api/drift/datasets/UKCoalEmploy/#attributes","title":"Attributes","text":"
    • desc

      Return the description from the docstring.

    • path

    "},{"location":"api/drift/datasets/UKCoalEmploy/#methods","title":"Methods","text":"take

    Iterate over the k samples.

    Parameters

    • k \u2014 'int'

    1. https://www.gov.uk/government/statistical-data-sets/historical-coal-data-coal-production-availability-and-consumption\u00a0\u21a9

    "},{"location":"api/dummy/NoChangeClassifier/","title":"NoChangeClassifier","text":"

    Dummy classifier which returns the last class seen.

    The predict_one method will output the last class seen whilst predict_proba_one will return 1 for the last class seen and 0 for the others.

    "},{"location":"api/dummy/NoChangeClassifier/#attributes","title":"Attributes","text":"
    • last_class

      The last class seen.

    • classes

      The set of classes seen.

    "},{"location":"api/dummy/NoChangeClassifier/#examples","title":"Examples","text":"

    Taken from example 2.1 from this page.

    import pprint\nfrom river import dummy\n\nsentences = [\n    ('glad happy glad', '+'),\n    ('glad glad joyful', '+'),\n    ('glad pleasant', '+'),\n    ('miserable sad glad', '\u2212')\n]\n\nmodel = dummy.NoChangeClassifier()\n\nfor sentence, label in sentences:\n    model = model.learn_one(sentence, label)\n\nnew_sentence = 'glad sad miserable pleasant glad'\nmodel.predict_one(new_sentence)\n
    '\u2212'\n

    pprint.pprint(model.predict_proba_one(new_sentence))\n
    {'+': 0, '\u2212': 1}\n

    "},{"location":"api/dummy/NoChangeClassifier/#methods","title":"Methods","text":"learn_one

    Update the model with a set of features x and a label y.

    Parameters

    • x \u2014 'dict'
    • y \u2014 'base.typing.ClfTarget'

    Returns

    Classifier: self

    predict_one

    Predict the label of a set of features x.

    Parameters

    • x \u2014 'dict'

    Returns

    base.typing.ClfTarget | None: The predicted label.

    predict_proba_one

    Predict the probability of each label for a dictionary of features x.

    Parameters

    • x \u2014 'dict'

    Returns

    dict[base.typing.ClfTarget, float]: A dictionary that associates a probability which each label.

    "},{"location":"api/dummy/PriorClassifier/","title":"PriorClassifier","text":"

    Dummy classifier which uses the prior distribution.

    The predict_one method will output the most common class whilst predict_proba_one will return the normalized class counts.

    "},{"location":"api/dummy/PriorClassifier/#attributes","title":"Attributes","text":"
    • counts (collections.Counter)

      Class counts.

    • n (int)

      Total number of seen instances.

    "},{"location":"api/dummy/PriorClassifier/#examples","title":"Examples","text":"

    Taken from example 2.1 from this page

    from river import dummy\n\nsentences = [\n    ('glad happy glad', '+'),\n    ('glad glad joyful', '+'),\n    ('glad pleasant', '+'),\n    ('miserable sad glad', '\u2212')\n]\n\nmodel = dummy.PriorClassifier()\n\nfor sentence, label in sentences:\n    model = model.learn_one(sentence, label)\n\nnew_sentence = 'glad sad miserable pleasant glad'\nmodel.predict_one(new_sentence)\n
    '+'\n
    model.predict_proba_one(new_sentence)\n
    {'+': 0.75, '\u2212': 0.25}\n

    "},{"location":"api/dummy/PriorClassifier/#methods","title":"Methods","text":"learn_one

    Update the model with a set of features x and a label y.

    Parameters

    • x \u2014 'dict'
    • y \u2014 'base.typing.ClfTarget'

    Returns

    Classifier: self

    predict_one

    Predict the label of a set of features x.

    Parameters

    • x \u2014 'dict'
    • kwargs

    Returns

    base.typing.ClfTarget | None: The predicted label.

    predict_proba_one

    Predict the probability of each label for a dictionary of features x.

    Parameters

    • x \u2014 'dict'

    Returns

    dict[base.typing.ClfTarget, float]: A dictionary that associates a probability which each label.

    1. Krichevsky\u2013Trofimov estimator \u21a9

    "},{"location":"api/dummy/StatisticRegressor/","title":"StatisticRegressor","text":"

    Dummy regressor that uses a univariate statistic to make predictions.

    "},{"location":"api/dummy/StatisticRegressor/#parameters","title":"Parameters","text":"
    • statistic

      Type \u2192 stats.base.Univariate

    "},{"location":"api/dummy/StatisticRegressor/#examples","title":"Examples","text":"

    from pprint import pprint\nfrom river import dummy\nfrom river import stats\n\nsentences = [\n    ('glad happy glad', 3),\n    ('glad glad joyful', 3),\n    ('glad pleasant', 2),\n    ('miserable sad glad', -3)\n]\n\nmodel = dummy.StatisticRegressor(stats.Mean())\n\nfor sentence, score in sentences:\n    model = model.learn_one(sentence, score)\n\nnew_sentence = 'glad sad miserable pleasant glad'\nmodel.predict_one(new_sentence)\n
    1.25\n

    "},{"location":"api/dummy/StatisticRegressor/#methods","title":"Methods","text":"learn_one

    Fits to a set of features x and a real-valued target y.

    Parameters

    • x \u2014 'dict'
    • y \u2014 'base.typing.RegTarget'

    Returns

    Regressor: self

    predict_one

    Predict the output of features x.

    Parameters

    • x \u2014 'dict'

    Returns

    base.typing.RegTarget: The prediction.

    "},{"location":"api/ensemble/ADWINBaggingClassifier/","title":"ADWINBaggingClassifier","text":"

    ADWIN Bagging classifier.

    ADWIN Bagging 1 is the online bagging method of Oza and Russell 2 with the addition of the ADWIN algorithm as a change detector. If concept drift is detected, the worst member of the ensemble (based on the error estimation by ADWIN) is replaced by a new (empty) classifier.

    "},{"location":"api/ensemble/ADWINBaggingClassifier/#parameters","title":"Parameters","text":"
    • model

      Type \u2192 base.Classifier

      The classifier to bag.

    • n_models

      Default \u2192 10

      The number of models in the ensemble.

    • seed

      Type \u2192 int | None

      Default \u2192 None

      Random number generator seed for reproducibility.

    "},{"location":"api/ensemble/ADWINBaggingClassifier/#attributes","title":"Attributes","text":"
    • models
    "},{"location":"api/ensemble/ADWINBaggingClassifier/#examples","title":"Examples","text":"

    from river import datasets\nfrom river import ensemble\nfrom river import evaluate\nfrom river import linear_model\nfrom river import metrics\nfrom river import optim\nfrom river import preprocessing\n\ndataset = datasets.Phishing()\n\nmodel = ensemble.ADWINBaggingClassifier(\n    model=(\n        preprocessing.StandardScaler() |\n        linear_model.LogisticRegression()\n    ),\n    n_models=3,\n    seed=42\n)\n\nmetric = metrics.F1()\n\nevaluate.progressive_val_score(dataset, model, metric)\n
    F1: 87.65%\n

    "},{"location":"api/ensemble/ADWINBaggingClassifier/#methods","title":"Methods","text":"learn_one

    Update the model with a set of features x and a label y.

    Parameters

    • x
    • y
    • kwargs

    Returns

    self

    predict_one

    Predict the label of a set of features x.

    Parameters

    • x \u2014 'dict'
    • kwargs

    Returns

    base.typing.ClfTarget | None: The predicted label.

    predict_proba_one

    Averages the predictions of each classifier.

    Parameters

    • x
    • kwargs

    1. Albert Bifet, Geoff Holmes, Bernhard Pfahringer, Richard Kirkby, and Ricard Gavald\u00e0. \"New ensemble methods for evolving data streams.\" In 15th ACM SIGKDD International Conference on Knowledge Discovery and Data Mining, 2009.\u00a0\u21a9

    2. Oza, N., Russell, S. \"Online bagging and boosting.\" In: Artificial Intelligence and Statistics 2001, pp. 105\u2013112. Morgan Kaufmann, 2001.\u00a0\u21a9

    "},{"location":"api/ensemble/ADWINBoostingClassifier/","title":"ADWINBoostingClassifier","text":"

    ADWIN Boosting classifier.

    ADWIN Boosting 1 is the online boosting method of Oza and Russell 2 with the addition of the ADWIN algorithm as a change detector. If concept drift is detected, the worst member of the ensemble (based on the error estimation by ADWIN) is replaced by a new (empty) classifier.

    "},{"location":"api/ensemble/ADWINBoostingClassifier/#parameters","title":"Parameters","text":"
    • model

      Type \u2192 base.Classifier

      The classifier to boost.

    • n_models

      Default \u2192 10

      The number of models in the ensemble.

    • seed

      Type \u2192 int | None

      Default \u2192 None

      Random number generator seed for reproducibility.

    "},{"location":"api/ensemble/ADWINBoostingClassifier/#attributes","title":"Attributes","text":"
    • models
    "},{"location":"api/ensemble/ADWINBoostingClassifier/#examples","title":"Examples","text":"

    from river import datasets\nfrom river import ensemble\nfrom river import evaluate\nfrom river import linear_model\nfrom river import metrics\nfrom river import preprocessing\n\ndataset = datasets.Phishing()\nmodel = ensemble.ADWINBoostingClassifier(\n    model=(\n        preprocessing.StandardScaler() |\n        linear_model.LogisticRegression()\n    ),\n    n_models=3,\n    seed=42\n)\nmetric = metrics.F1()\n\nevaluate.progressive_val_score(dataset, model, metric)\n
    F1: 87.61%\n

    "},{"location":"api/ensemble/ADWINBoostingClassifier/#methods","title":"Methods","text":"learn_one

    Update the model with a set of features x and a label y.

    Parameters

    • x
    • y
    • kwargs

    Returns

    self

    predict_one

    Predict the label of a set of features x.

    Parameters

    • x \u2014 'dict'
    • kwargs

    Returns

    base.typing.ClfTarget | None: The predicted label.

    predict_proba_one

    Predict the probability of each label for a dictionary of features x.

    Parameters

    • x
    • kwargs

    Returns

    A dictionary that associates a probability which each label.

    1. Albert Bifet, Geoff Holmes, Bernhard Pfahringer, Richard Kirkby, and Ricard Gavald\u00e0. \"New ensemble methods for evolving data streams.\" In 15th ACM SIGKDD International Conference on Knowledge Discovery and Data Mining, 2009.\u00a0\u21a9

    2. Oza, N., Russell, S. \"Online bagging and boosting.\" In: Artificial Intelligence and Statistics 2001, pp. 105\u2013112. Morgan Kaufmann, 2001.\u00a0\u21a9

    "},{"location":"api/ensemble/AdaBoostClassifier/","title":"AdaBoostClassifier","text":"

    Boosting for classification.

    For each incoming observation, each model's learn_one method is called k times where k is sampled from a Poisson distribution of parameter lambda. The lambda parameter is updated when the weaks learners fit successively the same observation.

    "},{"location":"api/ensemble/AdaBoostClassifier/#parameters","title":"Parameters","text":"
    • model

      Type \u2192 base.Classifier

      The classifier to boost.

    • n_models

      Default \u2192 10

      The number of models in the ensemble.

    • seed

      Type \u2192 int | None

      Default \u2192 None

      Random number generator seed for reproducibility.

    "},{"location":"api/ensemble/AdaBoostClassifier/#attributes","title":"Attributes","text":"
    • models
    "},{"location":"api/ensemble/AdaBoostClassifier/#examples","title":"Examples","text":"

    In the following example three tree classifiers are boosted together. The performance is slightly better than when using a single tree.

    from river import datasets\nfrom river import ensemble\nfrom river import evaluate\nfrom river import metrics\nfrom river import tree\n\ndataset = datasets.Phishing()\n\nmetric = metrics.LogLoss()\n\nmodel = ensemble.AdaBoostClassifier(\n    model=(\n        tree.HoeffdingTreeClassifier(\n            split_criterion='gini',\n            delta=1e-5,\n            grace_period=2000\n        )\n    ),\n    n_models=5,\n    seed=42\n)\n\nevaluate.progressive_val_score(dataset, model, metric)\n
    LogLoss: 0.370805\n

    print(model)\n
    AdaBoostClassifier(HoeffdingTreeClassifier)\n

    "},{"location":"api/ensemble/AdaBoostClassifier/#methods","title":"Methods","text":"learn_one

    Update the model with a set of features x and a label y.

    Parameters

    • x
    • y
    • kwargs

    Returns

    self

    predict_one

    Predict the label of a set of features x.

    Parameters

    • x \u2014 'dict'
    • kwargs

    Returns

    base.typing.ClfTarget | None: The predicted label.

    predict_proba_one

    Predict the probability of each label for a dictionary of features x.

    Parameters

    • x
    • kwargs

    Returns

    A dictionary that associates a probability which each label.

    1. Oza, N.C., 2005, October. Online bagging and boosting. In 2005 IEEE international conference on systems, man and cybernetics (Vol. 3, pp. 2340-2345). Ieee. \u21a9

    "},{"location":"api/ensemble/BOLEClassifier/","title":"BOLEClassifier","text":"

    Boosting Online Learning Ensemble (BOLE).

    A modified version of Oza Online Boosting Algorithm 1. For each incoming observation, each model's learn_one method is called k times where k is sampled from a Poisson distribution of parameter lambda. The first model to be trained will be the one with worst correct_weight / (correct_weight + wrong_weight). The worst model not yet trained will receive lambda values for training from the models that incorrectly classified an instance, and the best model's not yet trained will receive lambda values for training from the models that correctly classified an instance. For more details, see 2.

    "},{"location":"api/ensemble/BOLEClassifier/#parameters","title":"Parameters","text":"
    • model

      Type \u2192 base.Classifier

      The classifier to boost.

    • n_models

      Default \u2192 10

      The number of models in the ensemble.

    • seed

      Type \u2192 int | None

      Default \u2192 None

      Random number generator seed for reproducibility.

    • error_bound

      Default \u2192 0.5

      Error bound percentage for allowing models to vote.

    "},{"location":"api/ensemble/BOLEClassifier/#attributes","title":"Attributes","text":"
    • models
    "},{"location":"api/ensemble/BOLEClassifier/#examples","title":"Examples","text":"

    from river import datasets\nfrom river import ensemble\nfrom river import evaluate\nfrom river import drift\nfrom river import metrics\nfrom river import tree\n\ndataset = datasets.Elec2().take(3000)\n\nmodel = ensemble.BOLEClassifier(\n    model=drift.DriftRetrainingClassifier(\n        model=tree.HoeffdingTreeClassifier(),\n        drift_detector=drift.binary.DDM()\n    ),\n    n_models=10,\n    seed=42\n)\n\nmetric = metrics.Accuracy()\n\nevaluate.progressive_val_score(dataset, model, metric)\n
    Accuracy: 93.63%\n

    "},{"location":"api/ensemble/BOLEClassifier/#methods","title":"Methods","text":"learn_one

    Update the model with a set of features x and a label y.

    Parameters

    • x
    • y
    • kwargs

    Returns

    self

    predict_one

    Predict the label of a set of features x.

    Parameters

    • x \u2014 'dict'
    • kwargs

    Returns

    base.typing.ClfTarget | None: The predicted label.

    predict_proba_one

    Predict the probability of each label for a dictionary of features x.

    Parameters

    • x
    • kwargs

    Returns

    A dictionary that associates a probability which each label.

    1. Oza, N.C., 2005, October. Online bagging and boosting. In 2005 IEEE international conference on systems, man and cybernetics (Vol. 3, pp. 2340-2345). Ieee. \u21a9

    2. R. S. M. d. Barros, S. Garrido T. de Carvalho Santos and P. M. Gon\u00e7alves J\u00fanior, \"A Boosting-like Online Learning Ensemble,\" 2016 International Joint Conference on Neural Networks (IJCNN), 2016, pp. 1871-1878, doi: 10.1109/IJCNN.2016.7727427.\u00a0\u21a9

    "},{"location":"api/ensemble/BaggingClassifier/","title":"BaggingClassifier","text":"

    Online bootstrap aggregation for classification.

    For each incoming observation, each model's learn_one method is called k times where k is sampled from a Poisson distribution of parameter 1. k thus has a 36% chance of being equal to 0, a 36% chance of being equal to 1, an 18% chance of being equal to 2, a 6% chance of being equal to 3, a 1% chance of being equal to 4, etc. You can do scipy.stats.utils.random.poisson(1).pmf(k) to obtain more detailed values.

    "},{"location":"api/ensemble/BaggingClassifier/#parameters","title":"Parameters","text":"
    • model

      Type \u2192 base.Classifier

      The classifier to bag.

    • n_models

      Default \u2192 10

      The number of models in the ensemble.

    • seed

      Type \u2192 int | None

      Default \u2192 None

      Random number generator seed for reproducibility.

    "},{"location":"api/ensemble/BaggingClassifier/#attributes","title":"Attributes","text":"
    • models
    "},{"location":"api/ensemble/BaggingClassifier/#examples","title":"Examples","text":"

    In the following example three logistic regressions are bagged together. The performance is slightly better than when using a single logistic regression.

    from river import datasets\nfrom river import ensemble\nfrom river import evaluate\nfrom river import linear_model\nfrom river import metrics\nfrom river import optim\nfrom river import preprocessing\n\ndataset = datasets.Phishing()\n\nmodel = ensemble.BaggingClassifier(\n    model=(\n        preprocessing.StandardScaler() |\n        linear_model.LogisticRegression()\n    ),\n    n_models=3,\n    seed=42\n)\n\nmetric = metrics.F1()\n\nevaluate.progressive_val_score(dataset, model, metric)\n
    F1: 87.65%\n

    print(model)\n
    BaggingClassifier(StandardScaler | LogisticRegression)\n

    "},{"location":"api/ensemble/BaggingClassifier/#methods","title":"Methods","text":"learn_one predict_one

    Predict the label of a set of features x.

    Parameters

    • x \u2014 'dict'
    • kwargs

    Returns

    base.typing.ClfTarget | None: The predicted label.

    predict_proba_one

    Averages the predictions of each classifier.

    Parameters

    • x
    • kwargs

    1. Oza, N.C., 2005, October. Online bagging and boosting. In 2005 IEEE international conference on systems, man and cybernetics (Vol. 3, pp. 2340-2345). Ieee. \u21a9

    "},{"location":"api/ensemble/BaggingRegressor/","title":"BaggingRegressor","text":"

    Online bootstrap aggregation for regression.

    For each incoming observation, each model's learn_one method is called k times where k is sampled from a Poisson distribution of parameter 1. k thus has a 36% chance of being equal to 0, a 36% chance of being equal to 1, an 18% chance of being equal to 2, a 6% chance of being equal to 3, a 1% chance of being equal to 4, etc. You can do scipy.stats.utils.random.poisson(1).pmf(k) for more detailed values.

    "},{"location":"api/ensemble/BaggingRegressor/#parameters","title":"Parameters","text":"
    • model

      Type \u2192 base.Regressor

      The regressor to bag.

    • n_models

      Default \u2192 10

      The number of models in the ensemble.

    • seed

      Type \u2192 int | None

      Default \u2192 None

      Random number generator seed for reproducibility.

    "},{"location":"api/ensemble/BaggingRegressor/#attributes","title":"Attributes","text":"
    • models
    "},{"location":"api/ensemble/BaggingRegressor/#examples","title":"Examples","text":"

    In the following example three logistic regressions are bagged together. The performance is slightly better than when using a single logistic regression.

    from river import datasets\nfrom river import ensemble\nfrom river import evaluate\nfrom river import linear_model\nfrom river import metrics\nfrom river import optim\nfrom river import preprocessing\n\ndataset = datasets.TrumpApproval()\n\nmodel = preprocessing.StandardScaler()\nmodel |= ensemble.BaggingRegressor(\n    model=linear_model.LinearRegression(intercept_lr=0.1),\n    n_models=3,\n    seed=42\n)\n\nmetric = metrics.MAE()\n\nevaluate.progressive_val_score(dataset, model, metric)\n
    MAE: 0.677586\n

    "},{"location":"api/ensemble/BaggingRegressor/#methods","title":"Methods","text":"learn_one predict_one

    Averages the predictions of each regressor.

    Parameters

    • x
    • kwargs

    1. Oza, N.C., 2005, October. Online bagging and boosting. In 2005 IEEE international conference on systems, man and cybernetics (Vol. 3, pp. 2340-2345). Ieee. \u21a9

    "},{"location":"api/ensemble/EWARegressor/","title":"EWARegressor","text":"

    Exponentially Weighted Average regressor.

    "},{"location":"api/ensemble/EWARegressor/#parameters","title":"Parameters","text":"
    • models

      Type \u2192 list[base.Regressor]

      The regressors to hedge.

    • loss

      Type \u2192 optim.losses.RegressionLoss | None

      Default \u2192 None

      The loss function that has to be minimized. Defaults to optim.losses.Squared.

    • learning_rate

      Default \u2192 0.5

      The learning rate by which the model weights are multiplied at each iteration.

    "},{"location":"api/ensemble/EWARegressor/#attributes","title":"Attributes","text":"
    • models
    "},{"location":"api/ensemble/EWARegressor/#examples","title":"Examples","text":"

    from river import datasets\nfrom river import ensemble\nfrom river import evaluate\nfrom river import linear_model\nfrom river import metrics\nfrom river import optim\nfrom river import preprocessing\nfrom river import stream\n\noptimizers = [\n    optim.SGD(0.01),\n    optim.RMSProp(),\n    optim.AdaGrad()\n]\n\nfor optimizer in optimizers:\n\n    dataset = datasets.TrumpApproval()\n    metric = metrics.MAE()\n    model = (\n        preprocessing.StandardScaler() |\n        linear_model.LinearRegression(\n            optimizer=optimizer,\n            intercept_lr=.1\n        )\n    )\n\n    print(optimizer, evaluate.progressive_val_score(dataset, model, metric))\n
    SGD MAE: 0.558735\nRMSProp MAE: 0.522449\nAdaGrad MAE: 0.477289\n

    dataset = datasets.TrumpApproval()\nmetric = metrics.MAE()\nhedge = (\n    preprocessing.StandardScaler() |\n    ensemble.EWARegressor(\n        [\n            linear_model.LinearRegression(optimizer=o, intercept_lr=.1)\n            for o in optimizers\n        ],\n        learning_rate=0.005\n    )\n)\n\nevaluate.progressive_val_score(dataset, hedge, metric)\n
    MAE: 0.496298\n

    "},{"location":"api/ensemble/EWARegressor/#methods","title":"Methods","text":"learn_one

    Fits to a set of features x and a real-valued target y.

    Parameters

    • x
    • y

    Returns

    self

    learn_predict_one predict_one

    Predict the output of features x.

    Parameters

    • x

    Returns

    The prediction.

    1. Online Learning from Experts: Weighed Majority and Hedge \u21a9

    2. Wikipedia page on the multiplicative weight update method \u21a9

    3. Kivinen, J. and Warmuth, M.K., 1997. Exponentiated gradient versus gradient descent for linear predictors. information and computation, 132(1), pp.1-63. \u21a9

    "},{"location":"api/ensemble/LeveragingBaggingClassifier/","title":"LeveragingBaggingClassifier","text":"

    Leveraging Bagging ensemble classifier.

    Leveraging Bagging [^1] is an improvement over the Oza Bagging algorithm. The bagging performance is leveraged by increasing the re-sampling. It uses a poisson distribution to simulate the re-sampling process. To increase re-sampling it uses a higher w value of the Poisson distribution (agerage number of events), 6 by default, increasing the input space diversity, by attributing a different range of weights to the data samples.

    To deal with concept drift, Leveraging Bagging uses the ADWIN algorithm to monitor the performance of each member of the enemble If concept drift is detected, the worst member of the ensemble (based on the error estimation by ADWIN) is replaced by a new (empty) classifier.

    "},{"location":"api/ensemble/LeveragingBaggingClassifier/#parameters","title":"Parameters","text":"
    • model

      Type \u2192 base.Classifier

      The classifier to bag.

    • n_models

      Type \u2192 int

      Default \u2192 10

      The number of models in the ensemble.

    • w

      Type \u2192 float

      Default \u2192 6

      Indicates the average number of events. This is the lambda parameter of the Poisson distribution used to compute the re-sampling weight.

    • adwin_delta

      Type \u2192 float

      Default \u2192 0.002

      The delta parameter for the ADWIN change detector.

    • bagging_method

      Type \u2192 str

      Default \u2192 bag

      The bagging method to use. Can be one of the following: * 'bag' - Leveraging Bagging using ADWIN. * 'me' - Assigns \\(weight=1\\) if sample is misclassified, otherwise \\(weight=error/(1-error)\\). * 'half' - Use resampling without replacement for half of the instances. * 'wt' - Resample without taking out all instances. * 'subag' - Resampling without replacement.

    • seed

      Type \u2192 int | None

      Default \u2192 None

      Random number generator seed for reproducibility.

    "},{"location":"api/ensemble/LeveragingBaggingClassifier/#attributes","title":"Attributes","text":"
    • bagging_methods

      Valid bagging_method options.

    • models

    "},{"location":"api/ensemble/LeveragingBaggingClassifier/#examples","title":"Examples","text":"

    from river import datasets\nfrom river import ensemble\nfrom river import evaluate\nfrom river import linear_model\nfrom river import metrics\nfrom river import optim\nfrom river import preprocessing\n\ndataset = datasets.Phishing()\n\nmodel = ensemble.LeveragingBaggingClassifier(\n    model=(\n        preprocessing.StandardScaler() |\n        linear_model.LogisticRegression()\n    ),\n    n_models=3,\n    seed=42\n)\n\nmetric = metrics.F1()\n\nevaluate.progressive_val_score(dataset, model, metric)\n
    F1: 88.55%\n

    "},{"location":"api/ensemble/LeveragingBaggingClassifier/#methods","title":"Methods","text":"learn_one

    Update the model with a set of features x and a label y.

    Parameters

    • x
    • y
    • kwargs

    Returns

    self

    predict_one

    Predict the label of a set of features x.

    Parameters

    • x \u2014 'dict'
    • kwargs

    Returns

    base.typing.ClfTarget | None: The predicted label.

    predict_proba_one

    Averages the predictions of each classifier.

    Parameters

    • x
    • kwargs

    "},{"location":"api/ensemble/SRPClassifier/","title":"SRPClassifier","text":"

    Streaming Random Patches ensemble classifier.

    The Streaming Random Patches (SRP) 1 is an ensemble method that simulates bagging or random subspaces. The default algorithm uses both bagging and random subspaces, namely Random Patches. The default base estimator is a Hoeffding Tree, but other base estimators can be used (differently from random forest variations).

    "},{"location":"api/ensemble/SRPClassifier/#parameters","title":"Parameters","text":"
    • model

      Type \u2192 base.Estimator | None

      Default \u2192 None

      The base estimator.

    • n_models

      Type \u2192 int

      Default \u2192 10

      Number of members in the ensemble.

    • subspace_size

      Type \u2192 int | float | str

      Default \u2192 0.6

      Number of features per subset for each classifier where M is the total number of features. A negative value means M - subspace_size. Only applies when using random subspaces or random patches. * If int indicates the number of features to use. Valid range [2, M]. * If float indicates the percentage of features to use, Valid range (0., 1.]. * 'sqrt' - sqrt(M)+1 * 'rmsqrt' - Residual from M-(sqrt(M)+1)

    • training_method

      Type \u2192 str

      Default \u2192 patches

      The training method to use. * 'subspaces' - Random subspaces. * 'resampling' - Resampling. * 'patches' - Random patches.

    • lam

      Type \u2192 int

      Default \u2192 6

      Lambda value for resampling.

    • drift_detector

      Type \u2192 base.DriftDetector | None

      Default \u2192 None

      Drift detector.

    • warning_detector

      Type \u2192 base.DriftDetector | None

      Default \u2192 None

      Warning detector.

    • disable_detector

      Type \u2192 str

      Default \u2192 off

      Option to disable drift detectors: * If 'off', detectors are enabled. * If 'drift', disables concept drift detection and the background learner. * If 'warning', disables the background learner and ensemble members are reset if drift is detected.

    • disable_weighted_vote

      Type \u2192 bool

      Default \u2192 False

      If True, disables weighted voting.

    • seed

      Type \u2192 int | None

      Default \u2192 None

      Random number generator seed for reproducibility.

    • metric

      Type \u2192 ClassificationMetric | None

      Default \u2192 None

      The metric to track members performance within the ensemble. This implementation assumes that larger values are better when using weighted votes.

    "},{"location":"api/ensemble/SRPClassifier/#attributes","title":"Attributes","text":"
    • models
    "},{"location":"api/ensemble/SRPClassifier/#examples","title":"Examples","text":"

    from river import ensemble\nfrom river import evaluate\nfrom river import metrics\nfrom river.datasets import synth\nfrom river import tree\n\ndataset = synth.ConceptDriftStream(\n    seed=42,\n    position=500,\n    width=50\n).take(1000)\n\nbase_model = tree.HoeffdingTreeClassifier(\n    grace_period=50, delta=0.01,\n    nominal_attributes=['age', 'car', 'zipcode']\n)\nmodel = ensemble.SRPClassifier(\n    model=base_model, n_models=3, seed=42,\n)\n\nmetric = metrics.Accuracy()\n\nevaluate.progressive_val_score(dataset, model, metric)\n
    Accuracy: 72.77%\n

    "},{"location":"api/ensemble/SRPClassifier/#methods","title":"Methods","text":"learn_one predict_one

    Predict the label of a set of features x.

    Parameters

    • x \u2014 'dict'
    • kwargs

    Returns

    base.typing.ClfTarget | None: The predicted label.

    predict_proba_one

    Predict the probability of each label for a dictionary of features x.

    Parameters

    • x
    • kwargs

    Returns

    A dictionary that associates a probability which each label.

    reset"},{"location":"api/ensemble/SRPClassifier/#notes","title":"Notes","text":"

    This implementation uses n_models=10 as default given the impact on processing time. The optimal number of models depends on the data and resources available.

    1. Heitor Murilo Gomes, Jesse Read, Albert Bifet. Streaming Random Patches for Evolving Data Stream Classification. IEEE International Conference on Data Mining (ICDM), 2019.\u00a0\u21a9

    "},{"location":"api/ensemble/SRPRegressor/","title":"SRPRegressor","text":"

    Streaming Random Patches ensemble regressor.

    The Streaming Random Patches 1 ensemble method for regression trains each base learner on a subset of features and instances from the original data, namely a random patch. This strategy to enforce diverse base models is similar to the one in the random forest, yet it is not restricted to using decision trees as base learner.

    This method is an adaptation of 2 for regression.

    "},{"location":"api/ensemble/SRPRegressor/#parameters","title":"Parameters","text":"
    • model

      Type \u2192 base.Regressor | None

      Default \u2192 None

      The base estimator.

    • n_models

      Type \u2192 int

      Default \u2192 10

      Number of members in the ensemble.

    • subspace_size

      Type \u2192 int | float | str

      Default \u2192 0.6

      Number of features per subset for each classifier where M is the total number of features. A negative value means M - subspace_size. Only applies when using random subspaces or random patches. * If int indicates the number of features to use. Valid range [2, M]. * If float indicates the percentage of features to use, Valid range (0., 1.]. * 'sqrt' - sqrt(M)+1 * 'rmsqrt' - Residual from M-(sqrt(M)+1)

    • training_method

      Type \u2192 str

      Default \u2192 patches

      The training method to use. * 'subspaces' - Random subspaces. * 'resampling' - Resampling. * 'patches' - Random patches.

    • lam

      Type \u2192 int

      Default \u2192 6

      Lambda value for bagging.

    • drift_detector

      Type \u2192 base.DriftDetector | None

      Default \u2192 None

      Drift detector.

    • warning_detector

      Type \u2192 base.DriftDetector | None

      Default \u2192 None

      Warning detector.

    • disable_detector

      Type \u2192 str

      Default \u2192 off

      Option to disable drift detectors: * If 'off', detectors are enabled. * If 'drift', disables concept drift detection and the background learner. * If 'warning', disables the background learner and ensemble members are reset if drift is detected.

    • disable_weighted_vote

      Type \u2192 bool

      Default \u2192 True

      If True, disables weighted voting.

    • drift_detection_criteria

      Type \u2192 str

      Default \u2192 error

      The criteria used to track drifts. * 'error' - absolute error. * 'prediction' - predicted target values.

    • aggregation_method

      Type \u2192 str

      Default \u2192 mean

      The method to use to aggregate predictions in the ensemble. * 'mean' * 'median'

    • seed

      Default \u2192 None

      Random number generator seed for reproducibility.

    • metric

      Type \u2192 RegressionMetric | None

      Default \u2192 None

      The metric to track members performance within the ensemble.

    "},{"location":"api/ensemble/SRPRegressor/#attributes","title":"Attributes","text":"
    • models
    "},{"location":"api/ensemble/SRPRegressor/#examples","title":"Examples","text":"

    from river import ensemble\nfrom river import evaluate\nfrom river import metrics\nfrom river.datasets import synth\nfrom river import tree\n\ndataset = synth.FriedmanDrift(\n    drift_type='gsg',\n    position=(350, 750),\n    transition_window=200,\n    seed=42\n).take(1000)\n\nbase_model = tree.HoeffdingTreeRegressor(grace_period=50)\nmodel = ensemble.SRPRegressor(\n    model=base_model,\n    training_method=\"patches\",\n    n_models=3,\n    seed=42\n)\n\nmetric = metrics.R2()\n\nevaluate.progressive_val_score(dataset, model, metric)\n
    R2: 0.571117\n

    "},{"location":"api/ensemble/SRPRegressor/#methods","title":"Methods","text":"learn_one predict_one

    Predict the output of features x.

    Parameters

    • x
    • kwargs

    Returns

    The prediction.

    reset"},{"location":"api/ensemble/SRPRegressor/#notes","title":"Notes","text":"

    This implementation uses n_models=10 as default given the impact on processing time. The optimal number of models depends on the data and resources available.

    1. Heitor Gomes, Jacob Montiel, Saulo Martiello Mastelini, Bernhard Pfahringer, and Albert Bifet. On Ensemble Techniques for Data Stream Regression. IJCNN'20. International Joint Conference on Neural Networks. 2020.\u00a0\u21a9

    2. Heitor Murilo Gomes, Jesse Read, Albert Bifet. Streaming Random Patches for Evolving Data Stream Classification. IEEE International Conference on Data Mining (ICDM), 2019.\u00a0\u21a9

    "},{"location":"api/ensemble/StackingClassifier/","title":"StackingClassifier","text":"

    Stacking for binary classification.

    "},{"location":"api/ensemble/StackingClassifier/#parameters","title":"Parameters","text":"
    • models

      Type \u2192 list[base.Classifier]

    • meta_classifier

      Type \u2192 base.Classifier

    • include_features

      Default \u2192 True

      Indicates whether or not the original features should be provided to the meta-model along with the predictions from each model.

    "},{"location":"api/ensemble/StackingClassifier/#attributes","title":"Attributes","text":"
    • models
    "},{"location":"api/ensemble/StackingClassifier/#examples","title":"Examples","text":"

    from river import compose\nfrom river import datasets\nfrom river import ensemble\nfrom river import evaluate\nfrom river import linear_model as lm\nfrom river import metrics\nfrom river import preprocessing as pp\n\ndataset = datasets.Phishing()\n\nmodel = compose.Pipeline(\n    ('scale', pp.StandardScaler()),\n    ('stack', ensemble.StackingClassifier(\n        [\n            lm.LogisticRegression(),\n            lm.PAClassifier(mode=1, C=0.01),\n            lm.PAClassifier(mode=2, C=0.01),\n        ],\n        meta_classifier=lm.LogisticRegression()\n    ))\n)\n\nmetric = metrics.F1()\n\nevaluate.progressive_val_score(dataset, model, metric)\n
    F1: 88.14%\n

    "},{"location":"api/ensemble/StackingClassifier/#methods","title":"Methods","text":"learn_one

    Update the model with a set of features x and a label y.

    Parameters

    • x
    • y

    Returns

    self

    predict_one

    Predict the label of a set of features x.

    Parameters

    • x \u2014 'dict'
    • kwargs

    Returns

    base.typing.ClfTarget | None: The predicted label.

    predict_proba_one

    Predict the probability of each label for a dictionary of features x.

    Parameters

    • x

    Returns

    A dictionary that associates a probability which each label.

    1. A Kaggler's Guide to Model Stacking in Practice \u21a9

    "},{"location":"api/ensemble/VotingClassifier/","title":"VotingClassifier","text":"

    Voting classifier.

    A classification is made by aggregating the predictions of each model in the ensemble. The probabilities for each class are summed up if use_probabilities is set to True. If not, the probabilities are ignored and each prediction is weighted the same. In this case, it's important that you use an odd number of classifiers. A random class will be picked if the number of classifiers is even.

    "},{"location":"api/ensemble/VotingClassifier/#parameters","title":"Parameters","text":"
    • models

      Type \u2192 list[base.Classifier]

      The classifiers.

    • use_probabilities

      Default \u2192 True

      Whether or to weight each prediction with its associated probability.

    "},{"location":"api/ensemble/VotingClassifier/#attributes","title":"Attributes","text":"
    • models
    "},{"location":"api/ensemble/VotingClassifier/#examples","title":"Examples","text":"

    from river import datasets\nfrom river import ensemble\nfrom river import evaluate\nfrom river import linear_model\nfrom river import metrics\nfrom river import naive_bayes\nfrom river import preprocessing\nfrom river import tree\n\ndataset = datasets.Phishing()\n\nmodel = (\n    preprocessing.StandardScaler() |\n    ensemble.VotingClassifier([\n        linear_model.LogisticRegression(),\n        tree.HoeffdingTreeClassifier(),\n        naive_bayes.GaussianNB()\n    ])\n)\n\nmetric = metrics.F1()\n\nevaluate.progressive_val_score(dataset, model, metric)\n
    F1: 86.94%\n

    "},{"location":"api/ensemble/VotingClassifier/#methods","title":"Methods","text":"learn_one

    Update the model with a set of features x and a label y.

    Parameters

    • x \u2014 'dict'
    • y \u2014 'base.typing.ClfTarget'

    Returns

    Classifier: self

    predict_one

    Predict the label of a set of features x.

    Parameters

    • x \u2014 'dict'

    Returns

    base.typing.ClfTarget | None: The predicted label.

    predict_proba_one

    Predict the probability of each label for a dictionary of features x.

    Parameters

    • x \u2014 'dict'

    Returns

    dict[base.typing.ClfTarget, float]: A dictionary that associates a probability which each label.

    "},{"location":"api/evaluate/BinaryClassificationTrack/","title":"BinaryClassificationTrack","text":"

    This track evaluates a model's performance on binary classification tasks. These do not include synthetic datasets.

    "},{"location":"api/evaluate/BinaryClassificationTrack/#methods","title":"Methods","text":"run"},{"location":"api/evaluate/MultiClassClassificationTrack/","title":"MultiClassClassificationTrack","text":"

    This track evaluates a model's performance on multi-class classification tasks. These do not include synthetic datasets.

    "},{"location":"api/evaluate/MultiClassClassificationTrack/#methods","title":"Methods","text":"run"},{"location":"api/evaluate/RegressionTrack/","title":"RegressionTrack","text":"

    This track evaluates a model's performance on regression tasks. These do not include synthetic datasets.

    "},{"location":"api/evaluate/RegressionTrack/#methods","title":"Methods","text":"run"},{"location":"api/evaluate/Track/","title":"Track","text":"

    A track evaluate a model's performance.

    The following metrics are recorded:

    • Time, which should be interpreted with wisdom. Indeed time can depend on the architecture

      and local resource situations. Comparison via FLOPS should be preferred. - The model's memory footprint.

    • The model's predictive performance on the track's dataset.

    "},{"location":"api/evaluate/Track/#parameters","title":"Parameters","text":"
    • name

      Type \u2192 str

      The name of the track.

    • datasets

      The datasets that compose the track.

    • metric

      The metric(s) used to track performance.

    "},{"location":"api/evaluate/Track/#methods","title":"Methods","text":"run"},{"location":"api/evaluate/iter-progressive-val-score/","title":"iter_progressive_val_score","text":"

    Evaluates the performance of a model on a streaming dataset and yields results.

    This does exactly the same as evaluate.progressive_val_score. The only difference is that this function returns an iterator, yielding results at every step. This can be useful if you want to have control over what you do with the results. For instance, you might want to plot the results.

    "},{"location":"api/evaluate/iter-progressive-val-score/#parameters","title":"Parameters","text":"
    • dataset

      Type \u2192 base.typing.Dataset

      The stream of observations against which the model will be evaluated.

    • model

      The model to evaluate.

    • metric

      Type \u2192 metrics.base.Metric

      The metric used to evaluate the model's predictions.

    • moment

      Type \u2192 str | typing.Callable | None

      Default \u2192 None

      The attribute used for measuring time. If a callable is passed, then it is expected to take as input a dict of features. If None, then the observations are implicitly timestamped in the order in which they arrive.

    • delay

      Type \u2192 str | int | dt.timedelta | typing.Callable | None

      Default \u2192 None

      The amount to wait before revealing the target associated with each observation to the model. This value is expected to be able to sum with the moment value. For instance, if moment is a datetime.date, then delay is expected to be a datetime.timedelta. If a callable is passed, then it is expected to take as input a dict of features and the target. If a str is passed, then it will be used to access the relevant field from the features. If None is passed, then no delay will be used, which leads to doing standard online validation.

    • step

      Default \u2192 1

      Iteration number at which to yield results. This only takes into account the predictions, and not the training steps.

    • measure_time

      Default \u2192 False

      Whether or not to measure the elapsed time.

    • measure_memory

      Default \u2192 False

      Whether or not to measure the memory usage of the model.

    "},{"location":"api/evaluate/iter-progressive-val-score/#examples","title":"Examples","text":"

    Take the following model:

    from river import linear_model\nfrom river import preprocessing\n\nmodel = (\n    preprocessing.StandardScaler() |\n    linear_model.LogisticRegression()\n)\n

    We can evaluate it on the Phishing dataset as so:

    from river import datasets\nfrom river import evaluate\nfrom river import metrics\n\nsteps = evaluate.iter_progressive_val_score(\n    model=model,\n    dataset=datasets.Phishing(),\n    metric=metrics.ROCAUC(),\n    step=200\n)\n\nfor step in steps:\n    print(step)\n
    {'ROCAUC': ROCAUC: 90.20%, 'Step': 200}\n{'ROCAUC': ROCAUC: 92.25%, 'Step': 400}\n{'ROCAUC': ROCAUC: 93.23%, 'Step': 600}\n{'ROCAUC': ROCAUC: 94.05%, 'Step': 800}\n{'ROCAUC': ROCAUC: 94.79%, 'Step': 1000}\n{'ROCAUC': ROCAUC: 95.07%, 'Step': 1200}\n{'ROCAUC': ROCAUC: 95.07%, 'Step': 1250}\n

    1. Beating the Hold-Out: Bounds for K-fold and Progressive Cross-Validation \u21a9

    2. Grzenda, M., Gomes, H.M. and Bifet, A., 2019. Delayed labelling evaluation for data streams. Data Mining and Knowledge Discovery, pp.1-30 \u21a9

    "},{"location":"api/evaluate/progressive-val-score/","title":"progressive_val_score","text":"

    Evaluates the performance of a model on a streaming dataset.

    This method is the canonical way to evaluate a model's performance. When used correctly, it allows you to exactly assess how a model would have performed in a production scenario.

    dataset is converted into a stream of questions and answers. At each step the model is either asked to predict an observation, or is either updated. The target is only revealed to the model after a certain amount of time, which is determined by the delay parameter. Note that under the hood this uses the stream.simulate_qa function to go through the data in arrival order.

    By default, there is no delay, which means that the samples are processed one after the other. When there is no delay, this function essentially performs progressive validation. When there is a delay, then we refer to it as delayed progressive validation.

    It is recommended to use this method when you want to determine a model's performance on a dataset. In particular, it is advised to use the delay parameter in order to get a reliable assessment. Indeed, in a production scenario, it is often the case that ground truths are made available after a certain amount of time. By using this method, you can reproduce this scenario and therefore truthfully assess what would have been the performance of a model on a given dataset.

    "},{"location":"api/evaluate/progressive-val-score/#parameters","title":"Parameters","text":"
    • dataset

      Type \u2192 base.typing.Dataset

      The stream of observations against which the model will be evaluated.

    • model

      The model to evaluate.

    • metric

      Type \u2192 metrics.base.Metric

      The metric used to evaluate the model's predictions.

    • moment

      Type \u2192 str | typing.Callable | None

      Default \u2192 None

      The attribute used for measuring time. If a callable is passed, then it is expected to take as input a dict of features. If None, then the observations are implicitly timestamped in the order in which they arrive.

    • delay

      Type \u2192 str | int | dt.timedelta | typing.Callable | None

      Default \u2192 None

      The amount to wait before revealing the target associated with each observation to the model. This value is expected to be able to sum with the moment value. For instance, if moment is a datetime.date, then delay is expected to be a datetime.timedelta. If a callable is passed, then it is expected to take as input a dict of features and the target. If a str is passed, then it will be used to access the relevant field from the features. If None is passed, then no delay will be used, which leads to doing standard online validation.

    • print_every

      Default \u2192 0

      Iteration number at which to print the current metric. This only takes into account the predictions, and not the training steps.

    • show_time

      Default \u2192 False

      Whether or not to display the elapsed time.

    • show_memory

      Default \u2192 False

      Whether or not to display the memory usage of the model.

    • print_kwargs

      Extra keyword arguments are passed to the print function. For instance, this allows providing a file argument, which indicates where to output progress.

    "},{"location":"api/evaluate/progressive-val-score/#examples","title":"Examples","text":"

    Take the following model:

    from river import linear_model\nfrom river import preprocessing\n\nmodel = (\n    preprocessing.StandardScaler() |\n    linear_model.LogisticRegression()\n)\n

    We can evaluate it on the Phishing dataset as so:

    from river import datasets\nfrom river import evaluate\nfrom river import metrics\n\nevaluate.progressive_val_score(\n    model=model,\n    dataset=datasets.Phishing(),\n    metric=metrics.ROCAUC(),\n    print_every=200\n)\n
    [200] ROCAUC: 90.20%\n[400] ROCAUC: 92.25%\n[600] ROCAUC: 93.23%\n[800] ROCAUC: 94.05%\n[1,000] ROCAUC: 94.79%\n[1,200] ROCAUC: 95.07%\n[1,250] ROCAUC: 95.07%\nROCAUC: 95.07%\n

    We haven't specified a delay, therefore this is strictly equivalent to the following piece of code:

    model = (\n    preprocessing.StandardScaler() |\n    linear_model.LogisticRegression()\n)\n\nmetric = metrics.ROCAUC()\n\nfor x, y in datasets.Phishing():\n    y_pred = model.predict_proba_one(x)\n    metric = metric.update(y, y_pred)\n    model = model.learn_one(x, y)\n\nmetric\n
    ROCAUC: 95.07%\n

    When print_every is specified, the current state is printed at regular intervals. Under the hood, Python's print method is being used. You can pass extra keyword arguments to modify its behavior. For instance, you may use the file argument if you want to log the progress to a file of your choice.

    with open('progress.log', 'w') as f:\n    metric = evaluate.progressive_val_score(\n        model=model,\n        dataset=datasets.Phishing(),\n        metric=metrics.ROCAUC(),\n        print_every=200,\n        file=f\n    )\n\nwith open('progress.log') as f:\n    for line in f.read().splitlines():\n        print(line)\n
    [200] ROCAUC: 94.00%\n[400] ROCAUC: 94.70%\n[600] ROCAUC: 95.17%\n[800] ROCAUC: 95.42%\n[1,000] ROCAUC: 95.82%\n[1,200] ROCAUC: 96.00%\n[1,250] ROCAUC: 96.04%\n

    Note that the performance is slightly better than above because we haven't used a fresh copy of the model. Instead, we've reused the existing model which has already done a full pass on the data.

    import os; os.remove('progress.log')\n
    1. Beating the Hold-Out: Bounds for K-fold and Progressive Cross-Validation \u21a9

    2. Grzenda, M., Gomes, H.M. and Bifet, A., 2019. Delayed labelling evaluation for data streams. Data Mining and Knowledge Discovery, pp.1-30 \u21a9

    "},{"location":"api/facto/FFMClassifier/","title":"FFMClassifier","text":"

    Field-aware Factorization Machine for binary classification.

    The model equation is defined by:

    \\[\\hat{y}(x) = w_{0} + \\sum_{j=1}^{p} w_{j} x_{j} + \\sum_{j=1}^{p} \\sum_{j'=j+1}^{p} \\langle \\mathbf{v}_{j, f_{j'}}, \\mathbf{v}_{j', f_j} \\rangle x_{j} x_{j'}\\]

    Where \\(\\mathbf{v}_{j, f_{j'}}\\) is the latent vector corresponding to \\(j\\) feature for \\(f_{j'}\\) field, and \\(\\mathbf{v}_{j', f_j}\\) is the latent vector corresponding to \\(j'\\) feature for \\(f_j\\) field.

    For more efficiency, this model automatically one-hot encodes strings features considering them as categorical variables. Field names are inferred from feature names by taking everything before the first underscore: feature_name.split('_')[0].

    "},{"location":"api/facto/FFMClassifier/#parameters","title":"Parameters","text":"
    • n_factors

      Default \u2192 10

      Dimensionality of the factorization or number of latent factors.

    • weight_optimizer

      Type \u2192 optim.base.Optimizer | None

      Default \u2192 None

      The sequential optimizer used for updating the feature weights. Note that the intercept is handled separately.

    • latent_optimizer

      Type \u2192 optim.base.Optimizer | None

      Default \u2192 None

      The sequential optimizer used for updating the latent factors.

    • loss

      Type \u2192 optim.losses.BinaryLoss | None

      Default \u2192 None

      The loss function to optimize for.

    • sample_normalization

      Default \u2192 False

      Whether to divide each element of x by x's L2-norm.

    • l1_weight

      Default \u2192 0.0

      Amount of L1 regularization used to push weights towards 0.

    • l2_weight

      Default \u2192 0.0

      Amount of L2 regularization used to push weights towards 0.

    • l1_latent

      Default \u2192 0.0

      Amount of L1 regularization used to push latent weights towards 0.

    • l2_latent

      Default \u2192 0.0

      Amount of L2 regularization used to push latent weights towards 0.

    • intercept

      Default \u2192 0.0

      Initial intercept value.

    • intercept_lr

      Type \u2192 optim.base.Scheduler | float

      Default \u2192 0.01

      Learning rate scheduler used for updating the intercept. An instance of optim.schedulers.Constant is used if a float is passed. No intercept will be used if this is set to 0.

    • weight_initializer

      Type \u2192 optim.initializers.Initializer | None

      Default \u2192 None

      Weights initialization scheme. Defaults to optim.initializers.Zeros()`.

    • latent_initializer

      Type \u2192 optim.initializers.Initializer | None

      Default \u2192 None

      Latent factors initialization scheme. Defaults to optim.initializers.Normal(mu=.0, sigma=.1, random_state=self.random_state)`.

    • clip_gradient

      Default \u2192 1000000000000.0

      Clips the absolute value of each gradient value.

    • seed

      Type \u2192 int | None

      Default \u2192 None

      Randomization seed used for reproducibility.

    "},{"location":"api/facto/FFMClassifier/#attributes","title":"Attributes","text":"
    • weights

      The current weights assigned to the features.

    • latents

      The current latent weights assigned to the features.

    "},{"location":"api/facto/FFMClassifier/#examples","title":"Examples","text":"

    from river import facto\n\ndataset = (\n    ({'user': 'Alice', 'item': 'Superman', 'time': .12}, True),\n    ({'user': 'Alice', 'item': 'Terminator', 'time': .13}, True),\n    ({'user': 'Alice', 'item': 'Star Wars', 'time': .14}, True),\n    ({'user': 'Alice', 'item': 'Notting Hill', 'time': .15}, False),\n    ({'user': 'Alice', 'item': 'Harry Potter ', 'time': .16}, True),\n    ({'user': 'Bob', 'item': 'Superman', 'time': .13}, True),\n    ({'user': 'Bob', 'item': 'Terminator', 'time': .12}, True),\n    ({'user': 'Bob', 'item': 'Star Wars', 'time': .16}, True),\n    ({'user': 'Bob', 'item': 'Notting Hill', 'time': .10}, False)\n)\n\nmodel = facto.FFMClassifier(\n    n_factors=10,\n    intercept=.5,\n    seed=42,\n)\n\nfor x, y in dataset:\n    model = model.learn_one(x, y)\n\nmodel.predict_one({'user': 'Bob', 'item': 'Harry Potter', 'time': .14})\n
    True\n

    "},{"location":"api/facto/FFMClassifier/#methods","title":"Methods","text":"debug_one

    Debugs the output of the FM regressor.

    Parameters

    • x \u2014 'dict'
    • decimals \u2014 'int' \u2014 defaults to 5

    Returns

    str: A table which explains the output.

    learn_one

    Update the model with a set of features x and a label y.

    Parameters

    • x \u2014 'dict'
    • y \u2014 'base.typing.ClfTarget'
    • sample_weight \u2014 defaults to 1.0

    Returns

    Classifier: self

    predict_one

    Predict the label of a set of features x.

    Parameters

    • x \u2014 'dict'
    • kwargs

    Returns

    base.typing.ClfTarget | None: The predicted label.

    predict_proba_one

    Predict the probability of each label for a dictionary of features x.

    Parameters

    • x

    Returns

    A dictionary that associates a probability which each label.

    1. Juan, Y., Zhuang, Y., Chin, W.S. and Lin, C.J., 2016, September. Field-aware factorization machines for CTR prediction. In Proceedings of the 10th ACM Conference on Recommender Systems (pp. 43-50). \u21a9

    "},{"location":"api/facto/FFMRegressor/","title":"FFMRegressor","text":"

    Field-aware Factorization Machine for regression.

    The model equation is defined by:

    \\[\\hat{y}(x) = w_{0} + \\sum_{j=1}^{p} w_{j} x_{j} + \\sum_{j=1}^{p} \\sum_{j'=j+1}^{p} \\langle \\mathbf{v}_{j, f_{j'}}, \\mathbf{v}_{j', f_j} \\rangle x_{j} x_{j'}\\]

    Where \\(\\mathbf{v}_{j, f_{j'}}\\) is the latent vector corresponding to \\(j\\) feature for \\(f_{j'}\\) field, and \\(\\mathbf{v}_{j', f_j}\\) is the latent vector corresponding to \\(j'\\) feature for \\(f_j\\) field.

    For more efficiency, this model automatically one-hot encodes strings features considering them as categorical variables. Field names are inferred from feature names by taking everything before the first underscore: feature_name.split('_')[0].

    "},{"location":"api/facto/FFMRegressor/#parameters","title":"Parameters","text":"
    • n_factors

      Default \u2192 10

      Dimensionality of the factorization or number of latent factors.

    • weight_optimizer

      Type \u2192 optim.base.Optimizer | None

      Default \u2192 None

      The sequential optimizer used for updating the feature weights. Note that the intercept is handled separately.

    • latent_optimizer

      Type \u2192 optim.base.Optimizer | None

      Default \u2192 None

      The sequential optimizer used for updating the latent factors.

    • loss

      Type \u2192 optim.losses.RegressionLoss | None

      Default \u2192 None

      The loss function to optimize for.

    • sample_normalization

      Default \u2192 False

      Whether to divide each element of x by x's L2-norm.

    • l1_weight

      Default \u2192 0.0

      Amount of L1 regularization used to push weights towards 0.

    • l2_weight

      Default \u2192 0.0

      Amount of L2 regularization used to push weights towards 0.

    • l1_latent

      Default \u2192 0.0

      Amount of L1 regularization used to push latent weights towards 0.

    • l2_latent

      Default \u2192 0.0

      Amount of L2 regularization used to push latent weights towards 0.

    • intercept

      Default \u2192 0.0

      Initial intercept value.

    • intercept_lr

      Type \u2192 optim.base.Scheduler | float

      Default \u2192 0.01

      Learning rate scheduler used for updating the intercept. An instance of optim.schedulers.Constant is used if a float is passed. No intercept will be used if this is set to 0.

    • weight_initializer

      Type \u2192 optim.initializers.Initializer | None

      Default \u2192 None

      Weights initialization scheme. Defaults to optim.initializers.Zeros()`.

    • latent_initializer

      Type \u2192 optim.initializers.Initializer | None

      Default \u2192 None

      Latent factors initialization scheme. Defaults to optim.initializers.Normal(mu=.0, sigma=.1, random_state=self.random_state)`.

    • clip_gradient

      Default \u2192 1000000000000.0

      Clips the absolute value of each gradient value.

    • seed

      Type \u2192 int | None

      Default \u2192 None

      Randomization seed used for reproducibility.

    "},{"location":"api/facto/FFMRegressor/#attributes","title":"Attributes","text":"
    • weights

      The current weights assigned to the features.

    • latents

      The current latent weights assigned to the features.

    "},{"location":"api/facto/FFMRegressor/#examples","title":"Examples","text":"

    from river import facto\n\ndataset = (\n    ({'user': 'Alice', 'item': 'Superman', 'time': .12}, 8),\n    ({'user': 'Alice', 'item': 'Terminator', 'time': .13}, 9),\n    ({'user': 'Alice', 'item': 'Star Wars', 'time': .14}, 8),\n    ({'user': 'Alice', 'item': 'Notting Hill', 'time': .15}, 2),\n    ({'user': 'Alice', 'item': 'Harry Potter ', 'time': .16}, 5),\n    ({'user': 'Bob', 'item': 'Superman', 'time': .13}, 8),\n    ({'user': 'Bob', 'item': 'Terminator', 'time': .12}, 9),\n    ({'user': 'Bob', 'item': 'Star Wars', 'time': .16}, 8),\n    ({'user': 'Bob', 'item': 'Notting Hill', 'time': .10}, 2)\n)\n\nmodel = facto.FFMRegressor(\n    n_factors=10,\n    intercept=5,\n    seed=42,\n)\n\nfor x, y in dataset:\n    model = model.learn_one(x, y)\n\nmodel.predict_one({'user': 'Bob', 'item': 'Harry Potter', 'time': .14})\n
    5.319945\n

    report = model.debug_one({'user': 'Bob', 'item': 'Harry Potter', 'time': .14})\n\nprint(report)\n
    Name                                       Value      Weight     Contribution\n                               Intercept    1.00000    5.23501        5.23501\n                                user_Bob    1.00000    0.11438        0.11438\n                                    time    0.14000    0.03186        0.00446\n    item_Harry Potter(time) - time(item)    0.14000    0.03153        0.00441\n             user_Bob(time) - time(user)    0.14000    0.02864        0.00401\n                       item_Harry Potter    1.00000    0.00000        0.00000\nuser_Bob(item) - item_Harry Potter(user)    1.00000   -0.04232       -0.04232\n

    "},{"location":"api/facto/FFMRegressor/#methods","title":"Methods","text":"debug_one

    Debugs the output of the FM regressor.

    Parameters

    • x \u2014 'dict'
    • decimals \u2014 'int' \u2014 defaults to 5

    Returns

    str: A table which explains the output.

    learn_one

    Fits to a set of features x and a real-valued target y.

    Parameters

    • x \u2014 'dict'
    • y \u2014 'base.typing.RegTarget'
    • sample_weight \u2014 defaults to 1.0

    Returns

    Regressor: self

    predict_one

    Predict the output of features x.

    Parameters

    • x

    Returns

    The prediction.

    1. Juan, Y., Zhuang, Y., Chin, W.S. and Lin, C.J., 2016, September. Field-aware factorization machines for CTR prediction. In Proceedings of the 10th ACM Conference on Recommender Systems (pp. 43-50). \u21a9

    "},{"location":"api/facto/FMClassifier/","title":"FMClassifier","text":"

    Factorization Machine for binary classification.

    The model equation is defined as:

    \\[\\hat{y}(x) = w_{0} + \\sum_{j=1}^{p} w_{j} x_{j} + \\sum_{j=1}^{p} \\sum_{j'=j+1}^{p} \\langle \\mathbf{v}_j, \\mathbf{v}_{j'} \\rangle x_{j} x_{j'}\\]

    Where \\(\\mathbf{v}_j\\) and \\(\\mathbf{v}_{j'}\\) are \\(j\\) and \\(j'\\) latent vectors, respectively.

    For more efficiency, this model automatically one-hot encodes strings features considering them as categorical variables.

    "},{"location":"api/facto/FMClassifier/#parameters","title":"Parameters","text":"
    • n_factors

      Default \u2192 10

      Dimensionality of the factorization or number of latent factors.

    • weight_optimizer

      Type \u2192 optim.base.Optimizer | None

      Default \u2192 None

      The sequential optimizer used for updating the feature weights. Note that the intercept is handled separately.

    • latent_optimizer

      Type \u2192 optim.base.Optimizer | None

      Default \u2192 None

      The sequential optimizer used for updating the latent factors.

    • loss

      Type \u2192 optim.losses.BinaryLoss | None

      Default \u2192 None

      The loss function to optimize for.

    • sample_normalization

      Default \u2192 False

      Whether to divide each element of x by x's L2-norm.

    • l1_weight

      Default \u2192 0.0

      Amount of L1 regularization used to push weights towards 0.

    • l2_weight

      Default \u2192 0.0

      Amount of L2 regularization used to push weights towards 0.

    • l1_latent

      Default \u2192 0.0

      Amount of L1 regularization used to push latent weights towards 0.

    • l2_latent

      Default \u2192 0.0

      Amount of L2 regularization used to push latent weights towards 0.

    • intercept

      Default \u2192 0.0

      Initial intercept value.

    • intercept_lr

      Type \u2192 optim.base.Scheduler | float

      Default \u2192 0.01

      Learning rate scheduler used for updating the intercept. An instance of optim.schedulers.Constant is used if a float is passed. No intercept will be used if this is set to 0.

    • weight_initializer

      Type \u2192 optim.initializers.Initializer | None

      Default \u2192 None

      Weights initialization scheme. Defaults to optim.initializers.Zeros()`.

    • latent_initializer

      Type \u2192 optim.initializers.Initializer | None

      Default \u2192 None

      Latent factors initialization scheme. Defaults to optim.initializers.Normal(mu=.0, sigma=.1, random_state=self.random_state)`.

    • clip_gradient

      Default \u2192 1000000000000.0

      Clips the absolute value of each gradient value.

    • seed

      Type \u2192 int | None

      Default \u2192 None

      Randomization seed used for reproducibility.

    "},{"location":"api/facto/FMClassifier/#attributes","title":"Attributes","text":"
    • weights

      The current weights assigned to the features.

    • latents

      The current latent weights assigned to the features.

    "},{"location":"api/facto/FMClassifier/#examples","title":"Examples","text":"

    from river import facto\n\ndataset = (\n    ({'user': 'Alice', 'item': 'Superman'}, True),\n    ({'user': 'Alice', 'item': 'Terminator'}, True),\n    ({'user': 'Alice', 'item': 'Star Wars'}, True),\n    ({'user': 'Alice', 'item': 'Notting Hill'}, False),\n    ({'user': 'Alice', 'item': 'Harry Potter '}, True),\n    ({'user': 'Bob', 'item': 'Superman'}, True),\n    ({'user': 'Bob', 'item': 'Terminator'}, True),\n    ({'user': 'Bob', 'item': 'Star Wars'}, True),\n    ({'user': 'Bob', 'item': 'Notting Hill'}, False)\n)\n\nmodel = facto.FMClassifier(\n    n_factors=10,\n    seed=42,\n)\n\nfor x, y in dataset:\n    _ = model.learn_one(x, y)\n\nmodel.predict_one({'Bob': 1, 'Harry Potter': 1})\n
    True\n

    "},{"location":"api/facto/FMClassifier/#methods","title":"Methods","text":"debug_one

    Debugs the output of the FM regressor.

    Parameters

    • x \u2014 'dict'
    • decimals \u2014 'int' \u2014 defaults to 5

    Returns

    str: A table which explains the output.

    learn_one

    Update the model with a set of features x and a label y.

    Parameters

    • x \u2014 'dict'
    • y \u2014 'base.typing.ClfTarget'
    • sample_weight \u2014 defaults to 1.0

    Returns

    Classifier: self

    predict_one

    Predict the label of a set of features x.

    Parameters

    • x \u2014 'dict'
    • kwargs

    Returns

    base.typing.ClfTarget | None: The predicted label.

    predict_proba_one

    Predict the probability of each label for a dictionary of features x.

    Parameters

    • x

    Returns

    A dictionary that associates a probability which each label.

    1. Rendle, S., 2010, December. Factorization machines. In 2010 IEEE International Conference on Data Mining (pp. 995-1000). IEEE. \u21a9

    2. Rendle, S., 2012, May. Factorization Machines with libFM. In ACM Transactions on Intelligent Systems and Technology 3, 3, Article 57, 22 pages. \u21a9

    "},{"location":"api/facto/FMRegressor/","title":"FMRegressor","text":"

    Factorization Machine for regression.

    The model equation is defined as:

    \\[\\hat{y}(x) = w_{0} + \\sum_{j=1}^{p} w_{j} x_{j} + \\sum_{j=1}^{p} \\sum_{j'=j+1}^{p} \\langle \\mathbf{v}_j, \\mathbf{v}_{j'} \\rangle x_{j} x_{j'}\\]

    Where \\(\\mathbf{v}_j\\) and \\(\\mathbf{v}_{j'}\\) are \\(j\\) and \\(j'\\) latent vectors, respectively.

    For more efficiency, this model automatically one-hot encodes strings features considering them as categorical variables.

    "},{"location":"api/facto/FMRegressor/#parameters","title":"Parameters","text":"
    • n_factors

      Default \u2192 10

      Dimensionality of the factorization or number of latent factors.

    • weight_optimizer

      Type \u2192 optim.base.Optimizer | None

      Default \u2192 None

      The sequential optimizer used for updating the feature weights. Note that the intercept is handled separately.

    • latent_optimizer

      Type \u2192 optim.base.Optimizer | None

      Default \u2192 None

      The sequential optimizer used for updating the latent factors.

    • loss

      Type \u2192 optim.losses.RegressionLoss | None

      Default \u2192 None

      The loss function to optimize for.

    • sample_normalization

      Default \u2192 False

      Whether to divide each element of x by x's L2-norm.

    • l1_weight

      Default \u2192 0.0

      Amount of L1 regularization used to push weights towards 0.

    • l2_weight

      Default \u2192 0.0

      Amount of L2 regularization used to push weights towards 0.

    • l1_latent

      Default \u2192 0.0

      Amount of L1 regularization used to push latent weights towards 0.

    • l2_latent

      Default \u2192 0.0

      Amount of L2 regularization used to push latent weights towards 0.

    • intercept

      Default \u2192 0.0

      Initial intercept value.

    • intercept_lr

      Type \u2192 optim.base.Scheduler | float

      Default \u2192 0.01

      Learning rate scheduler used for updating the intercept. An instance of optim.schedulers.Constant is used if a float is passed. No intercept will be used if this is set to 0.

    • weight_initializer

      Type \u2192 optim.initializers.Initializer | None

      Default \u2192 None

      Weights initialization scheme. Defaults to optim.initializers.Zeros()`.

    • latent_initializer

      Type \u2192 optim.initializers.Initializer | None

      Default \u2192 None

      Latent factors initialization scheme. Defaults to optim.initializers.Normal(mu=.0, sigma=.1, random_state=self.random_state)`.

    • clip_gradient

      Default \u2192 1000000000000.0

      Clips the absolute value of each gradient value.

    • seed

      Type \u2192 int | None

      Default \u2192 None

      Randomization seed used for reproducibility.

    "},{"location":"api/facto/FMRegressor/#attributes","title":"Attributes","text":"
    • weights

      The current weights assigned to the features.

    • latents

      The current latent weights assigned to the features.

    "},{"location":"api/facto/FMRegressor/#examples","title":"Examples","text":"

    from river import facto\n\ndataset = (\n    ({'user': 'Alice', 'item': 'Superman'}, 8),\n    ({'user': 'Alice', 'item': 'Terminator'}, 9),\n    ({'user': 'Alice', 'item': 'Star Wars'}, 8),\n    ({'user': 'Alice', 'item': 'Notting Hill'}, 2),\n    ({'user': 'Alice', 'item': 'Harry Potter '}, 5),\n    ({'user': 'Bob', 'item': 'Superman'}, 8),\n    ({'user': 'Bob', 'item': 'Terminator'}, 9),\n    ({'user': 'Bob', 'item': 'Star Wars'}, 8),\n    ({'user': 'Bob', 'item': 'Notting Hill'}, 2)\n)\n\nmodel = facto.FMRegressor(\n    n_factors=10,\n    intercept=5,\n    seed=42,\n)\n\nfor x, y in dataset:\n    _ = model.learn_one(x, y)\n\nmodel.predict_one({'Bob': 1, 'Harry Potter': 1})\n
    5.236504\n

    report = model.debug_one({'Bob': 1, 'Harry Potter': 1})\n\nprint(report)\n
    Name                 Value      Weight     Contribution\n         Intercept    1.00000    5.23426        5.23426\nBob - Harry Potter    1.00000    0.00224        0.00224\n      Harry Potter    1.00000    0.00000        0.00000\n               Bob    1.00000    0.00000        0.00000\n

    "},{"location":"api/facto/FMRegressor/#methods","title":"Methods","text":"debug_one

    Debugs the output of the FM regressor.

    Parameters

    • x \u2014 'dict'
    • decimals \u2014 'int' \u2014 defaults to 5

    Returns

    str: A table which explains the output.

    learn_one

    Fits to a set of features x and a real-valued target y.

    Parameters

    • x \u2014 'dict'
    • y \u2014 'base.typing.RegTarget'
    • sample_weight \u2014 defaults to 1.0

    Returns

    Regressor: self

    predict_one

    Predict the output of features x.

    Parameters

    • x

    Returns

    The prediction.

    1. Rendle, S., 2010, December. Factorization machines. In 2010 IEEE International Conference on Data Mining (pp. 995-1000). IEEE. \u21a9

    2. Rendle, S., 2012, May. Factorization Machines with libFM. In ACM Transactions on Intelligent Systems and Technology 3, 3, Article 57, 22 pages. \u21a9

    "},{"location":"api/facto/FwFMClassifier/","title":"FwFMClassifier","text":"

    Field-weighted Factorization Machine for binary classification.

    The model equation is defined as:

    \\[\\hat{y}(x) = w_{0} + \\sum_{j=1}^{p} w_{j} x_{j} + \\sum_{j=1}^{p} \\sum_{j'=j+1}^{p} r_{f_j, f_{j'}} \\langle \\mathbf{v}_j, \\mathbf{v}_{j'} \\rangle x_{j} x_{j'}\\]

    Where \\(f_j\\) and \\(f_{j'}\\) are \\(j\\) and \\(j'\\) fields, respectively, and \\(\\mathbf{v}_j\\) and \\(\\mathbf{v}_{j'}\\) are \\(j\\) and \\(j'\\) latent vectors, respectively.

    For more efficiency, this model automatically one-hot encodes strings features considering them as categorical variables. Field names are inferred from feature names by taking everything before the first underscore: feature_name.split('_')[0].

    "},{"location":"api/facto/FwFMClassifier/#parameters","title":"Parameters","text":"
    • n_factors

      Default \u2192 10

      Dimensionality of the factorization or number of latent factors.

    • weight_optimizer

      Type \u2192 optim.base.Optimizer | None

      Default \u2192 None

      The sequential optimizer used for updating the feature weights. Note that the intercept is handled separately.

    • latent_optimizer

      Type \u2192 optim.base.Optimizer | None

      Default \u2192 None

      The sequential optimizer used for updating the latent factors.

    • int_weight_optimizer

      Type \u2192 optim.base.Optimizer | None

      Default \u2192 None

      The sequential optimizer used for updating the field pairs interaction weights.

    • loss

      Type \u2192 optim.losses.BinaryLoss | None

      Default \u2192 None

      The loss function to optimize for.

    • sample_normalization

      Default \u2192 False

      Whether to divide each element of x by x's L2-norm.

    • l1_weight

      Default \u2192 0.0

      Amount of L1 regularization used to push weights towards 0.

    • l2_weight

      Default \u2192 0.0

      Amount of L2 regularization used to push weights towards 0.

    • l1_latent

      Default \u2192 0.0

      Amount of L1 regularization used to push latent weights towards 0.

    • l2_latent

      Default \u2192 0.0

      Amount of L2 regularization used to push latent weights towards 0.

    • intercept

      Default \u2192 0.0

      Initial intercept value.

    • intercept_lr

      Type \u2192 optim.base.Scheduler | float

      Default \u2192 0.01

      Learning rate scheduler used for updating the intercept. An instance of optim.schedulers.Constant is used if a float is passed. No intercept will be used if this is set to 0.

    • weight_initializer

      Type \u2192 optim.initializers.Initializer | None

      Default \u2192 None

      Weights initialization scheme. Defaults to optim.initializers.Zeros()`.

    • latent_initializer

      Type \u2192 optim.initializers.Initializer | None

      Default \u2192 None

      Latent factors initialization scheme. Defaults to optim.initializers.Normal(mu=.0, sigma=.1, random_state=self.random_state)`.

    • clip_gradient

      Default \u2192 1000000000000.0

      Clips the absolute value of each gradient value.

    • seed

      Type \u2192 int | None

      Default \u2192 None

      Randomization seed used for reproducibility.

    "},{"location":"api/facto/FwFMClassifier/#attributes","title":"Attributes","text":"
    • weights

      The current weights assigned to the features.

    • latents

      The current latent weights assigned to the features.

    • interaction_weights

      The current interaction strengths of field pairs.

    "},{"location":"api/facto/FwFMClassifier/#examples","title":"Examples","text":"

    from river import facto\n\ndataset = (\n    ({'user': 'Alice', 'item': 'Superman'}, True),\n    ({'user': 'Alice', 'item': 'Terminator'}, True),\n    ({'user': 'Alice', 'item': 'Star Wars'}, True),\n    ({'user': 'Alice', 'item': 'Notting Hill'}, False),\n    ({'user': 'Alice', 'item': 'Harry Potter '}, True),\n    ({'user': 'Bob', 'item': 'Superman'}, True),\n    ({'user': 'Bob', 'item': 'Terminator'}, True),\n    ({'user': 'Bob', 'item': 'Star Wars'}, True),\n    ({'user': 'Bob', 'item': 'Notting Hill'}, False)\n)\n\nmodel = facto.FwFMClassifier(\n    n_factors=10,\n    seed=42,\n)\n\nfor x, y in dataset:\n    model = model.learn_one(x, y)\n\nmodel.predict_one({'Bob': 1, 'Harry Potter': 1})\n
    True\n

    "},{"location":"api/facto/FwFMClassifier/#methods","title":"Methods","text":"debug_one

    Debugs the output of the FM regressor.

    Parameters

    • x \u2014 'dict'
    • decimals \u2014 'int' \u2014 defaults to 5

    Returns

    str: A table which explains the output.

    learn_one

    Update the model with a set of features x and a label y.

    Parameters

    • x \u2014 'dict'
    • y \u2014 'base.typing.ClfTarget'
    • sample_weight \u2014 defaults to 1.0

    Returns

    Classifier: self

    predict_one

    Predict the label of a set of features x.

    Parameters

    • x \u2014 'dict'
    • kwargs

    Returns

    base.typing.ClfTarget | None: The predicted label.

    predict_proba_one

    Predict the probability of each label for a dictionary of features x.

    Parameters

    • x

    Returns

    A dictionary that associates a probability which each label.

    1. Junwei Pan, Jian Xu, Alfonso Lobos Ruiz, Wenliang Zhao, Shengjun Pan, Yu Sun, and Quan Lu, 2018, April. Field-weighted Factorization Machines for Click-Through Rate Prediction in Display Advertising. In Proceedings of the 2018 World Wide Web Conference on World Wide Web. International World Wide Web Conferences Steering Committee, (pp. 1349\u20131357). \u21a9

    "},{"location":"api/facto/FwFMRegressor/","title":"FwFMRegressor","text":"

    Field-weighted Factorization Machine for regression.

    The model equation is defined as:

    \\[\\hat{y}(x) = w_{0} + \\sum_{j=1}^{p} w_{j} x_{j} + \\sum_{j=1}^{p} \\sum_{j'=j+1}^{p} r_{f_j, f_{j'}} \\langle \\mathbf{v}_j, \\mathbf{v}_{j'} \\rangle x_{j} x_{j'}\\]

    Where \\(f_j\\) and \\(f_{j'}\\) are \\(j\\) and \\(j'\\) fields, respectively, and \\(\\mathbf{v}_j\\) and \\(\\mathbf{v}_{j'}\\) are \\(j\\) and \\(j'\\) latent vectors, respectively.

    For more efficiency, this model automatically one-hot encodes strings features considering them as categorical variables. Field names are inferred from feature names by taking everything before the first underscore: feature_name.split('_')[0].

    "},{"location":"api/facto/FwFMRegressor/#parameters","title":"Parameters","text":"
    • n_factors

      Default \u2192 10

      Dimensionality of the factorization or number of latent factors.

    • weight_optimizer

      Type \u2192 optim.base.Optimizer | None

      Default \u2192 None

      The sequential optimizer used for updating the feature weights. Note that the intercept is handled separately.

    • latent_optimizer

      Type \u2192 optim.base.Optimizer | None

      Default \u2192 None

      The sequential optimizer used for updating the latent factors.

    • int_weight_optimizer

      Type \u2192 optim.base.Optimizer | None

      Default \u2192 None

      The sequential optimizer used for updating the field pairs interaction weights.

    • loss

      Type \u2192 optim.losses.RegressionLoss | None

      Default \u2192 None

      The loss function to optimize for.

    • sample_normalization

      Default \u2192 False

      Whether to divide each element of x by x's L2-norm.

    • l1_weight

      Default \u2192 0.0

      Amount of L1 regularization used to push weights towards 0.

    • l2_weight

      Default \u2192 0.0

      Amount of L2 regularization used to push weights towards 0.

    • l1_latent

      Default \u2192 0.0

      Amount of L1 regularization used to push latent weights towards 0.

    • l2_latent

      Default \u2192 0.0

      Amount of L2 regularization used to push latent weights towards 0.

    • intercept

      Default \u2192 0.0

      Initial intercept value.

    • intercept_lr

      Type \u2192 optim.base.Scheduler | float

      Default \u2192 0.01

      Learning rate scheduler used for updating the intercept. An instance of optim.schedulers.Constant is used if a float is passed. No intercept will be used if this is set to 0.

    • weight_initializer

      Type \u2192 optim.initializers.Initializer | None

      Default \u2192 None

      Weights initialization scheme. Defaults to optim.initializers.Zeros()`.

    • latent_initializer

      Type \u2192 optim.initializers.Initializer | None

      Default \u2192 None

      Latent factors initialization scheme. Defaults to optim.initializers.Normal(mu=.0, sigma=.1, random_state=self.random_state)`.

    • clip_gradient

      Default \u2192 1000000000000.0

      Clips the absolute value of each gradient value.

    • seed

      Type \u2192 int | None

      Default \u2192 None

      Randomization seed used for reproducibility.

    "},{"location":"api/facto/FwFMRegressor/#attributes","title":"Attributes","text":"
    • weights

      The current weights assigned to the features.

    • latents

      The current latent weights assigned to the features.

    • interaction_weights

      The current interaction strengths of field pairs.

    "},{"location":"api/facto/FwFMRegressor/#examples","title":"Examples","text":"

    from river import facto\n\ndataset = (\n    ({'user': 'Alice', 'item': 'Superman'}, 8),\n    ({'user': 'Alice', 'item': 'Terminator'}, 9),\n    ({'user': 'Alice', 'item': 'Star Wars'}, 8),\n    ({'user': 'Alice', 'item': 'Notting Hill'}, 2),\n    ({'user': 'Alice', 'item': 'Harry Potter '}, 5),\n    ({'user': 'Bob', 'item': 'Superman'}, 8),\n    ({'user': 'Bob', 'item': 'Terminator'}, 9),\n    ({'user': 'Bob', 'item': 'Star Wars'}, 8),\n    ({'user': 'Bob', 'item': 'Notting Hill'}, 2)\n)\n\nmodel = facto.FwFMRegressor(\n    n_factors=10,\n    intercept=5,\n    seed=42,\n)\n\nfor x, y in dataset:\n    model = model.learn_one(x, y)\n\nmodel.predict_one({'Bob': 1, 'Harry Potter': 1})\n
    5.236501\n

    report = model.debug_one({'Bob': 1, 'Harry Potter': 1})\n\nprint(report)\n
    Name                                    Value      Weight     Contribution\n                            Intercept    1.00000    5.23426        5.23426\nBob(Harry Potter) - Harry Potter(Bob)    1.00000    0.00224        0.00224\n                         Harry Potter    1.00000    0.00000        0.00000\n                                  Bob    1.00000    0.00000        0.00000\n

    "},{"location":"api/facto/FwFMRegressor/#methods","title":"Methods","text":"debug_one

    Debugs the output of the FM regressor.

    Parameters

    • x \u2014 'dict'
    • decimals \u2014 'int' \u2014 defaults to 5

    Returns

    str: A table which explains the output.

    learn_one

    Fits to a set of features x and a real-valued target y.

    Parameters

    • x \u2014 'dict'
    • y \u2014 'base.typing.RegTarget'
    • sample_weight \u2014 defaults to 1.0

    Returns

    Regressor: self

    predict_one

    Predict the output of features x.

    Parameters

    • x

    Returns

    The prediction.

    1. Junwei Pan, Jian Xu, Alfonso Lobos Ruiz, Wenliang Zhao, Shengjun Pan, Yu Sun, and Quan Lu, 2018, April. Field-weighted Factorization Machines for Click-Through Rate Prediction in Display Advertising. In Proceedings of the 2018 World Wide Web Conference on World Wide Web. International World Wide Web Conferences Steering Committee, (pp. 1349\u20131357). \u21a9

    "},{"location":"api/facto/HOFMClassifier/","title":"HOFMClassifier","text":"

    Higher-Order Factorization Machine for binary classification.

    The model equation is defined as:

    \\[\\hat{y}(x) = w_{0} + \\sum_{j=1}^{p} w_{j} x_{j} + \\sum_{l=2}^{d} \\sum_{j_1=1}^{p} \\cdots \\sum_{j_l=j_{l-1}+1}^{p} \\left(\\prod_{j'=1}^{l} x_{j_{j'}} \\right) \\left(\\sum_{f=1}^{k_l} \\prod_{j'=1}^{l} v_{j_{j'}, f}^{(l)} \\right)\\]

    For more efficiency, this model automatically one-hot encodes strings features considering them as categorical variables.

    "},{"location":"api/facto/HOFMClassifier/#parameters","title":"Parameters","text":"
    • degree

      Default \u2192 3

      Polynomial degree or model order.

    • n_factors

      Default \u2192 10

      Dimensionality of the factorization or number of latent factors.

    • weight_optimizer

      Type \u2192 optim.base.Optimizer | None

      Default \u2192 None

      The sequential optimizer used for updating the feature weights. Note that the intercept is handled separately.

    • latent_optimizer

      Type \u2192 optim.base.Optimizer | None

      Default \u2192 None

      The sequential optimizer used for updating the latent factors.

    • loss

      Type \u2192 optim.losses.BinaryLoss | None

      Default \u2192 None

      The loss function to optimize for.

    • sample_normalization

      Default \u2192 False

      Whether to divide each element of x by x's L2-norm.

    • l1_weight

      Default \u2192 0.0

      Amount of L1 regularization used to push weights towards 0.

    • l2_weight

      Default \u2192 0.0

      Amount of L2 regularization used to push weights towards 0.

    • l1_latent

      Default \u2192 0.0

      Amount of L1 regularization used to push latent weights towards 0.

    • l2_latent

      Default \u2192 0.0

      Amount of L2 regularization used to push latent weights towards 0.

    • intercept

      Default \u2192 0.0

      Initial intercept value.

    • intercept_lr

      Type \u2192 optim.base.Scheduler | float

      Default \u2192 0.01

      Learning rate scheduler used for updating the intercept. An instance of optim.schedulers.Constant is used if a float is passed. No intercept will be used if this is set to 0.

    • weight_initializer

      Type \u2192 optim.initializers.Initializer | None

      Default \u2192 None

      Weights initialization scheme. Defaults to optim.initializers.Zeros()`.

    • latent_initializer

      Type \u2192 optim.initializers.Initializer | None

      Default \u2192 None

      Latent factors initialization scheme. Defaults to optim.initializers.Normal(mu=.0, sigma=.1, random_state=self.random_state)`.

    • clip_gradient

      Default \u2192 1000000000000.0

      Clips the absolute value of each gradient value.

    • seed

      Type \u2192 int | None

      Default \u2192 None

      Randomization seed used for reproducibility.

    "},{"location":"api/facto/HOFMClassifier/#attributes","title":"Attributes","text":"
    • weights

      The current weights assigned to the features.

    • latents

      The current latent weights assigned to the features.

    "},{"location":"api/facto/HOFMClassifier/#examples","title":"Examples","text":"

    from river import facto\n\ndataset = (\n    ({'user': 'Alice', 'item': 'Superman', 'time': .12}, True),\n    ({'user': 'Alice', 'item': 'Terminator', 'time': .13}, True),\n    ({'user': 'Alice', 'item': 'Star Wars', 'time': .14}, True),\n    ({'user': 'Alice', 'item': 'Notting Hill', 'time': .15}, False),\n    ({'user': 'Alice', 'item': 'Harry Potter ', 'time': .16}, True),\n    ({'user': 'Bob', 'item': 'Superman', 'time': .13}, True),\n    ({'user': 'Bob', 'item': 'Terminator', 'time': .12}, True),\n    ({'user': 'Bob', 'item': 'Star Wars', 'time': .16}, True),\n    ({'user': 'Bob', 'item': 'Notting Hill', 'time': .10}, False)\n)\n\nmodel = facto.HOFMClassifier(\n    degree=3,\n    n_factors=10,\n    intercept=.5,\n    seed=42,\n)\n\nfor x, y in dataset:\n    _ = model.learn_one(x, y)\n\nmodel.predict_one({'user': 'Bob', 'item': 'Harry Potter', 'time': .14})\n
    True\n

    "},{"location":"api/facto/HOFMClassifier/#methods","title":"Methods","text":"debug_one

    Debugs the output of the FM regressor.

    Parameters

    • x \u2014 'dict'
    • decimals \u2014 'int' \u2014 defaults to 5

    Returns

    str: A table which explains the output.

    learn_one

    Update the model with a set of features x and a label y.

    Parameters

    • x \u2014 'dict'
    • y \u2014 'base.typing.ClfTarget'
    • sample_weight \u2014 defaults to 1.0

    Returns

    Classifier: self

    predict_one

    Predict the label of a set of features x.

    Parameters

    • x \u2014 'dict'
    • kwargs

    Returns

    base.typing.ClfTarget | None: The predicted label.

    predict_proba_one

    Predict the probability of each label for a dictionary of features x.

    Parameters

    • x

    Returns

    A dictionary that associates a probability which each label.

    1. Rendle, S., 2010, December. Factorization machines. In 2010 IEEE International Conference on Data Mining (pp. 995-1000). IEEE. \u21a9

    "},{"location":"api/facto/HOFMRegressor/","title":"HOFMRegressor","text":"

    Higher-Order Factorization Machine for regression.

    The model equation is defined as:

    \\[\\hat{y}(x) = w_{0} + \\sum_{j=1}^{p} w_{j} x_{j} + \\sum_{l=2}^{d} \\sum_{j_1=1}^{p} \\cdots \\sum_{j_l=j_{l-1}+1}^{p} \\left(\\prod_{j'=1}^{l} x_{j_{j'}} \\right) \\left(\\sum_{f=1}^{k_l} \\prod_{j'=1}^{l} v_{j_{j'}, f}^{(l)} \\right)\\]

    For more efficiency, this model automatically one-hot encodes strings features considering them as categorical variables.

    "},{"location":"api/facto/HOFMRegressor/#parameters","title":"Parameters","text":"
    • degree

      Default \u2192 3

      Polynomial degree or model order.

    • n_factors

      Default \u2192 10

      Dimensionality of the factorization or number of latent factors.

    • weight_optimizer

      Type \u2192 optim.base.Optimizer | None

      Default \u2192 None

      The sequential optimizer used for updating the feature weights. Note thatthe intercept is handled separately.

    • latent_optimizer

      Type \u2192 optim.base.Optimizer | None

      Default \u2192 None

      The sequential optimizer used for updating the latent factors.

    • loss

      Type \u2192 optim.losses.RegressionLoss | None

      Default \u2192 None

      The loss function to optimize for.

    • sample_normalization

      Default \u2192 False

      Whether to divide each element of x by x's L2-norm.

    • l1_weight

      Default \u2192 0.0

      Amount of L1 regularization used to push weights towards 0.

    • l2_weight

      Default \u2192 0.0

      Amount of L2 regularization used to push weights towards 0.

    • l1_latent

      Default \u2192 0.0

      Amount of L1 regularization used to push latent weights towards 0.

    • l2_latent

      Default \u2192 0.0

      Amount of L2 regularization used to push latent weights towards 0.

    • intercept

      Default \u2192 0.0

      Initial intercept value.

    • intercept_lr

      Type \u2192 optim.base.Scheduler | float

      Default \u2192 0.01

      Learning rate scheduler used for updating the intercept. An instance of optim.schedulers.Constant is used if a float is passed. No intercept will be used if this is set to 0.

    • weight_initializer

      Type \u2192 optim.initializers.Initializer | None

      Default \u2192 None

      Weights initialization scheme. Defaults to optim.initializers.Zeros()`.

    • latent_initializer

      Type \u2192 optim.initializers.Initializer | None

      Default \u2192 None

      Latent factors initialization scheme. Defaults to optim.initializers.Normal(mu=.0, sigma=.1, random_state=self.random_state)`.

    • clip_gradient

      Default \u2192 1000000000000.0

      Clips the absolute value of each gradient value.

    • seed

      Type \u2192 int | None

      Default \u2192 None

      Randomization seed used for reproducibility.

    "},{"location":"api/facto/HOFMRegressor/#attributes","title":"Attributes","text":"
    • weights

      The current weights assigned to the features.

    • latents

      The current latent weights assigned to the features.

    "},{"location":"api/facto/HOFMRegressor/#examples","title":"Examples","text":"

    from river import facto\n\ndataset = (\n    ({'user': 'Alice', 'item': 'Superman', 'time': .12}, 8),\n    ({'user': 'Alice', 'item': 'Terminator', 'time': .13}, 9),\n    ({'user': 'Alice', 'item': 'Star Wars', 'time': .14}, 8),\n    ({'user': 'Alice', 'item': 'Notting Hill', 'time': .15}, 2),\n    ({'user': 'Alice', 'item': 'Harry Potter ', 'time': .16}, 5),\n    ({'user': 'Bob', 'item': 'Superman', 'time': .13}, 8),\n    ({'user': 'Bob', 'item': 'Terminator', 'time': .12}, 9),\n    ({'user': 'Bob', 'item': 'Star Wars', 'time': .16}, 8),\n    ({'user': 'Bob', 'item': 'Notting Hill', 'time': .10}, 2)\n)\n\nmodel = facto.HOFMRegressor(\n    degree=3,\n    n_factors=10,\n    intercept=5,\n    seed=42,\n)\n\nfor x, y in dataset:\n    _ = model.learn_one(x, y)\n\nmodel.predict_one({'user': 'Bob', 'item': 'Harry Potter', 'time': .14})\n
    5.311745\n

    report = model.debug_one({'user': 'Bob', 'item': 'Harry Potter', 'time': .14})\n\nprint(report)\n
    Name                                  Value      Weight     Contribution\n                          Intercept    1.00000    5.23495        5.23495\n                           user_Bob    1.00000    0.11436        0.11436\n                               time    0.14000    0.03185        0.00446\n                    user_Bob - time    0.14000    0.00884        0.00124\nuser_Bob - item_Harry Potter - time    0.14000    0.00117        0.00016\n                  item_Harry Potter    1.00000    0.00000        0.00000\n           item_Harry Potter - time    0.14000   -0.00695       -0.00097\n       user_Bob - item_Harry Potter    1.00000   -0.04246       -0.04246\n

    "},{"location":"api/facto/HOFMRegressor/#methods","title":"Methods","text":"debug_one

    Debugs the output of the FM regressor.

    Parameters

    • x \u2014 'dict'
    • decimals \u2014 'int' \u2014 defaults to 5

    Returns

    str: A table which explains the output.

    learn_one

    Fits to a set of features x and a real-valued target y.

    Parameters

    • x \u2014 'dict'
    • y \u2014 'base.typing.RegTarget'
    • sample_weight \u2014 defaults to 1.0

    Returns

    Regressor: self

    predict_one

    Predict the output of features x.

    Parameters

    • x

    Returns

    The prediction.

    1. Rendle, S., 2010, December. Factorization machines. In 2010 IEEE International Conference on Data Mining (pp. 995-1000). IEEE. \u21a9

    "},{"location":"api/feature-extraction/Agg/","title":"Agg","text":"

    Computes a streaming aggregate.

    This transformer allows to compute an aggregate statistic, very much like the groupby method from pandas, but on a streaming dataset. This makes use of the streaming statistics from the stats module.

    When learn_one is called, the running statistic how of group by is updated with the value of on. Meanwhile, the output of transform_one is a single-element dictionary, where the key is the name of the aggregate and the value is the current value of the statistic for the relevant group. The key is automatically inferred from the parameters.

    Note that you can use a compose.TransformerUnion to extract many aggregate statistics in a concise manner.

    "},{"location":"api/feature-extraction/Agg/#parameters","title":"Parameters","text":"
    • on

      Type \u2192 str

      The feature on which to compute the aggregate statistic.

    • by

      Type \u2192 str | list[str] | None

      The feature by which to group the data. All the data is included in the aggregate if this is None.

    • how

      Type \u2192 stats.base.Univariate | utils.Rolling | utils.TimeRolling

      The statistic to compute.

    "},{"location":"api/feature-extraction/Agg/#attributes","title":"Attributes","text":"
    • state

      Return the current values for each group as a series.

    "},{"location":"api/feature-extraction/Agg/#examples","title":"Examples","text":"

    Consider the following dataset:

    X = [\n    {'country': 'France', 'place': 'Taco Bell', 'revenue': 42},\n    {'country': 'Sweden', 'place': 'Burger King', 'revenue': 16},\n    {'country': 'France', 'place': 'Burger King', 'revenue': 24},\n    {'country': 'Sweden', 'place': 'Taco Bell', 'revenue': 58},\n    {'country': 'Sweden', 'place': 'Burger King', 'revenue': 20},\n    {'country': 'France', 'place': 'Taco Bell', 'revenue': 50},\n    {'country': 'France', 'place': 'Burger King', 'revenue': 10},\n    {'country': 'Sweden', 'place': 'Taco Bell', 'revenue': 80}\n]\n

    As an example, we can calculate the average (how) revenue (on) for each place (by):

    from river import feature_extraction as fx\nfrom river import stats\n\nagg = fx.Agg(\n    on='revenue',\n    by='place',\n    how=stats.Mean()\n)\n\nfor x in X:\n    agg = agg.learn_one(x)\n    print(agg.transform_one(x))\n
    {'revenue_mean_by_place': 42.0}\n{'revenue_mean_by_place': 16.0}\n{'revenue_mean_by_place': 20.0}\n{'revenue_mean_by_place': 50.0}\n{'revenue_mean_by_place': 20.0}\n{'revenue_mean_by_place': 50.0}\n{'revenue_mean_by_place': 17.5}\n{'revenue_mean_by_place': 57.5}\n

    You can compute an aggregate over multiple keys by passing a tuple to the by argument. For instance, we can compute the maximum (how) revenue (on) per place as well as per day (by):

    agg = fx.Agg(\n    on='revenue',\n    by=['place', 'country'],\n    how=stats.Max()\n)\n\nfor x in X:\n    agg = agg.learn_one(x)\n    print(agg.transform_one(x))\n
    {'revenue_max_by_place_and_country': 42}\n{'revenue_max_by_place_and_country': 16}\n{'revenue_max_by_place_and_country': 24}\n{'revenue_max_by_place_and_country': 58}\n{'revenue_max_by_place_and_country': 20}\n{'revenue_max_by_place_and_country': 50}\n{'revenue_max_by_place_and_country': 24}\n{'revenue_max_by_place_and_country': 80}\n

    You can use a compose.TransformerUnion in order to calculate multiple aggregates in one go. The latter can be constructed by using the + operator:

    agg = (\n    fx.Agg(on='revenue', by='place', how=stats.Mean()) +\n    fx.Agg(on='revenue', by=['place', 'country'], how=stats.Max())\n)\n\nimport pprint\nfor x in X:\n    agg = agg.learn_one(x)\n    pprint.pprint(agg.transform_one(x))\n
    {'revenue_max_by_place_and_country': 42, 'revenue_mean_by_place': 42.0}\n{'revenue_max_by_place_and_country': 16, 'revenue_mean_by_place': 16.0}\n{'revenue_max_by_place_and_country': 24, 'revenue_mean_by_place': 20.0}\n{'revenue_max_by_place_and_country': 58, 'revenue_mean_by_place': 50.0}\n{'revenue_max_by_place_and_country': 20, 'revenue_mean_by_place': 20.0}\n{'revenue_max_by_place_and_country': 50, 'revenue_mean_by_place': 50.0}\n{'revenue_max_by_place_and_country': 24, 'revenue_mean_by_place': 17.5}\n{'revenue_max_by_place_and_country': 80, 'revenue_mean_by_place': 57.5}\n

    The state property returns a pandas.Series, which can be useful for visualizing the current state.

    agg[0].state\n
    Taco Bell      57.5\nBurger King    17.5\nName: revenue_mean_by_place, dtype: float64\n

    agg[1].state\n
    place        country\nTaco Bell    France     50\nBurger King  Sweden     20\n             France     24\nTaco Bell    Sweden     80\nName: revenue_max_by_place_and_country, dtype: int64\n

    This transformer can also be used in conjunction with utils.TimeRolling. The latter requires a t argument, which is a timestamp that indicates when the current row was observed. For instance, we can calculate the average (how) revenue (on) for each place (by) over the last 7 days (t):

    import datetime as dt\nimport random\nimport string\nfrom river import utils\n\nagg = fx.Agg(\n    on=\"value\",\n    by=\"group\",\n    how=utils.TimeRolling(stats.Mean(), dt.timedelta(days=7))\n)\n\nfor day in range(366):\n    g = random.choice(string.ascii_lowercase)\n    x = {\n        \"group\": g,\n        \"value\": string.ascii_lowercase.index(g) + random.random(),\n    }\n    t = dt.datetime(2023, 1, 1) + dt.timedelta(days=day)\n    agg = agg.learn_one(x, t=t)\n\nlen(agg.state)\n
    26\n

    "},{"location":"api/feature-extraction/Agg/#methods","title":"Methods","text":"learn_one

    Update with a set of features x.

    A lot of transformers don't actually have to do anything during the learn_one step because they are stateless. For this reason the default behavior of this function is to do nothing. Transformers that however do something during the learn_one can override this method.

    Parameters

    • x \u2014 'dict'
    • t \u2014 defaults to None

    Returns

    Transformer: self

    transform_one

    Transform a set of features x.

    Parameters

    • x \u2014 'dict'

    Returns

    dict: The transformed values.

    1. Streaming groupbys in pandas for big datasets \u21a9

    "},{"location":"api/feature-extraction/BagOfWords/","title":"BagOfWords","text":"

    Counts tokens in sentences.

    This transformer can be used to counts tokens in a given piece of text. It takes care of normalizing the text before tokenizing it. In mini-batch settings, this transformers allows to convert a series of pandas of text into sparse dataframe.

    Note that the parameters are identical to those of feature_extraction.TFIDF.

    "},{"location":"api/feature-extraction/BagOfWords/#parameters","title":"Parameters","text":"
    • on

      Type \u2192 str | None

      Default \u2192 None

      The name of the feature that contains the text to vectorize. If None, then each learn_one and transform_one will assume that each x that is provided is a str, andnot a dict.

    • strip_accents

      Default \u2192 True

      Whether or not to strip accent characters.

    • lowercase

      Default \u2192 True

      Whether or not to convert all characters to lowercase.

    • preprocessor

      Type \u2192 typing.Callable | None

      Default \u2192 None

      An optional preprocessing function which overrides the strip_accents and lowercase steps, while preserving the tokenizing and n-grams generation steps.

    • stop_words

      Type \u2192 set[str] | None

      Default \u2192 None

      An optional set of tokens to remove.

    • tokenizer_pattern

      Default \u2192 (?u)\\b\\w[\\w\\-]+\\b

      The tokenization pattern which is used when no tokenizer function is passed. A single capture group may optionally be specified.

    • tokenizer

      Type \u2192 typing.Callable | None

      Default \u2192 None

      A function used to convert preprocessed text into a dict of tokens. By default, a regex formula that works well in most cases is used.

    • ngram_range

      Default \u2192 (1, 1)

      The lower and upper boundary of the range n-grams to be extracted. All values of n such that min_n <= n <= max_n will be used. For example an ngram_range of (1, 1) means only unigrams, (1, 2) means unigrams and bigrams, and (2, 2) means only bigrams.

    "},{"location":"api/feature-extraction/BagOfWords/#examples","title":"Examples","text":"

    By default, BagOfWords will take as input a sentence, preprocess it, tokenize the preprocessed text, and then return a collections.Counter containing the number of occurrences of each token.

    from river import feature_extraction as fx\n\ncorpus = [\n    'This is the first document.',\n    'This document is the second document.',\n    'And this is the third one.',\n    'Is this the first document?',\n]\n\nbow = fx.BagOfWords()\n\nfor sentence in corpus:\n    print(bow.transform_one(sentence))\n
    {'this': 1, 'is': 1, 'the': 1, 'first': 1, 'document': 1}\n{'this': 1, 'document': 2, 'is': 1, 'the': 1, 'second': 1}\n{'and': 1, 'this': 1, 'is': 1, 'the': 1, 'third': 1, 'one': 1}\n{'is': 1, 'this': 1, 'the': 1, 'first': 1, 'document': 1}\n

    Note that learn_one does not have to be called because BagOfWords is stateless. You can call it but it won't do anything.

    In the above example, a string is passed to transform_one. You can also indicate which field to access if the string is stored in a dictionary:

    bow = fx.BagOfWords(on='sentence')\n\nfor sentence in corpus:\n    x = {'sentence': sentence}\n    print(bow.transform_one(x))\n
    {'this': 1, 'is': 1, 'the': 1, 'first': 1, 'document': 1}\n{'this': 1, 'document': 2, 'is': 1, 'the': 1, 'second': 1}\n{'and': 1, 'this': 1, 'is': 1, 'the': 1, 'third': 1, 'one': 1}\n{'is': 1, 'this': 1, 'the': 1, 'first': 1, 'document': 1}\n

    The ngram_range parameter can be used to extract n-grams (including unigrams):

    ngrammer = fx.BagOfWords(ngram_range=(1, 2))\n\nngrams = ngrammer.transform_one('I love the smell of napalm in the morning')\nfor ngram, count in ngrams.items():\n    print(ngram, count)\n
    love 1\nthe 2\nsmell 1\nof 1\nnapalm 1\nin 1\nmorning 1\n('love', 'the') 1\n('the', 'smell') 1\n('smell', 'of') 1\n('of', 'napalm') 1\n('napalm', 'in') 1\n('in', 'the') 1\n('the', 'morning') 1\n

    BagOfWord allows to build a term-frequency pandas sparse dataframe with the transform_many method.

    import pandas as pd\nX = pd.Series(['Hello world', 'Hello River'], index = ['river', 'rocks'])\nbow = fx.BagOfWords()\nbow.transform_many(X=X)\n
           hello  world  river\nriver      1      1      0\nrocks      1      0      1\n

    "},{"location":"api/feature-extraction/BagOfWords/#methods","title":"Methods","text":"learn_many learn_one

    Update with a set of features x.

    A lot of transformers don't actually have to do anything during the learn_one step because they are stateless. For this reason the default behavior of this function is to do nothing. Transformers that however do something during the learn_one can override this method.

    Parameters

    • x \u2014 'dict'

    Returns

    Transformer: self

    process_text transform_many

    Transform pandas series of string into term-frequency pandas sparse dataframe.

    Parameters

    • X \u2014 'pd.Series'

    transform_one

    Transform a set of features x.

    Parameters

    • x \u2014 'dict'

    Returns

    dict: The transformed values.

    "},{"location":"api/feature-extraction/PolynomialExtender/","title":"PolynomialExtender","text":"

    Polynomial feature extender.

    Generate features consisting of all polynomial combinations of the features with degree less than or equal to the specified degree.

    Be aware that the number of outputted features scales polynomially in the number of input features and exponentially in the degree. High degrees can cause overfitting.

    "},{"location":"api/feature-extraction/PolynomialExtender/#parameters","title":"Parameters","text":"
    • degree

      Default \u2192 2

      The maximum degree of the polynomial features.

    • interaction_only

      Default \u2192 False

      If True then only combinations that include an element at most once will be computed.

    • include_bias

      Default \u2192 False

      Whether or not to include a dummy feature which is always equal to 1.

    • bias_name

      Default \u2192 bias

      Name to give to the bias feature.

    "},{"location":"api/feature-extraction/PolynomialExtender/#examples","title":"Examples","text":"

    from river import feature_extraction as fx\n\nX = [\n    {'x': 0, 'y': 1},\n    {'x': 2, 'y': 3},\n    {'x': 4, 'y': 5}\n]\n\npoly = fx.PolynomialExtender(degree=2, include_bias=True)\nfor x in X:\n    print(poly.transform_one(x))\n
    {'x': 0, 'y': 1, 'x*x': 0, 'x*y': 0, 'y*y': 1, 'bias': 1}\n{'x': 2, 'y': 3, 'x*x': 4, 'x*y': 6, 'y*y': 9, 'bias': 1}\n{'x': 4, 'y': 5, 'x*x': 16, 'x*y': 20, 'y*y': 25, 'bias': 1}\n

    X = [\n    {'x': 0, 'y': 1, 'z': 2},\n    {'x': 2, 'y': 3, 'z': 2},\n    {'x': 4, 'y': 5, 'z': 2}\n]\n\npoly = fx.PolynomialExtender(degree=3, interaction_only=True)\nfor x in X:\n    print(poly.transform_one(x))\n
    {'x': 0, 'y': 1, 'z': 2, 'x*y': 0, 'x*z': 0, 'y*z': 2, 'x*y*z': 0}\n{'x': 2, 'y': 3, 'z': 2, 'x*y': 6, 'x*z': 4, 'y*z': 6, 'x*y*z': 12}\n{'x': 4, 'y': 5, 'z': 2, 'x*y': 20, 'x*z': 8, 'y*z': 10, 'x*y*z': 40}\n

    Polynomial features are typically used for a linear model to capture interactions between features. This may done by setting up a pipeline, as so:

    from river import datasets\nfrom river import evaluate\nfrom river import linear_model as lm\nfrom river import metrics\nfrom river import preprocessing as pp\n\ndataset = datasets.Phishing()\n\nmodel = (\n    fx.PolynomialExtender() |\n    pp.StandardScaler() |\n    lm.LogisticRegression()\n)\n\nmetric = metrics.Accuracy()\n\nevaluate.progressive_val_score(dataset, model, metric)\n
    Accuracy: 88.88%\n

    "},{"location":"api/feature-extraction/PolynomialExtender/#methods","title":"Methods","text":"learn_one

    Update with a set of features x.

    A lot of transformers don't actually have to do anything during the learn_one step because they are stateless. For this reason the default behavior of this function is to do nothing. Transformers that however do something during the learn_one can override this method.

    Parameters

    • x \u2014 'dict'

    Returns

    Transformer: self

    transform_one

    Transform a set of features x.

    Parameters

    • x \u2014 'dict'

    Returns

    dict: The transformed values.

    "},{"location":"api/feature-extraction/RBFSampler/","title":"RBFSampler","text":"

    Extracts random features which approximate an RBF kernel.

    This is a powerful way to give non-linear capacity to linear classifiers. This method is also called \"random Fourier features\" in the literature.

    "},{"location":"api/feature-extraction/RBFSampler/#parameters","title":"Parameters","text":"
    • gamma

      Default \u2192 1.0

      RBF kernel parameter in (-gamma * x^2).

    • n_components

      Default \u2192 100

      Number of samples per original feature. Equals the dimensionality of the computed feature space.

    • seed

      Type \u2192 int | None

      Default \u2192 None

      Random number seed.

    "},{"location":"api/feature-extraction/RBFSampler/#examples","title":"Examples","text":"

    from river import feature_extraction as fx\nfrom river import linear_model as lm\nfrom river import optim\nfrom river import stream\n\nX = [[0, 0], [1, 1], [1, 0], [0, 1]]\nY = [0, 0, 1, 1]\n\nmodel = lm.LogisticRegression(optimizer=optim.SGD(.1))\n\nfor x, y in stream.iter_array(X, Y):\n    model = model.learn_one(x, y)\n    y_pred = model.predict_one(x)\n    print(y, int(y_pred))\n
    0 0\n0 0\n1 0\n1 1\n

    model = (\n    fx.RBFSampler(seed=3) |\n    lm.LogisticRegression(optimizer=optim.SGD(.1))\n)\n\nfor x, y in stream.iter_array(X, Y):\n    model = model.learn_one(x, y)\n    y_pred = model.predict_one(x)\n    print(y, int(y_pred))\n
    0 0\n0 0\n1 1\n1 1\n

    "},{"location":"api/feature-extraction/RBFSampler/#methods","title":"Methods","text":"learn_one

    Update with a set of features x.

    A lot of transformers don't actually have to do anything during the learn_one step because they are stateless. For this reason the default behavior of this function is to do nothing. Transformers that however do something during the learn_one can override this method.

    Parameters

    • x \u2014 'dict'

    Returns

    Transformer: self

    transform_one

    Transform a set of features x.

    Parameters

    • x \u2014 'dict'
    • y \u2014 defaults to None

    Returns

    dict: The transformed values.

    1. Rahimi, A. and Recht, B., 2008. Random features for large-scale kernel machines. In Advances in neural information processing systems (pp. 1177-1184 \u21a9

    "},{"location":"api/feature-extraction/TFIDF/","title":"TFIDF","text":"

    Computes TF-IDF values from sentences.

    The TF-IDF formula is the same one as scikit-learn. The only difference is the fact that the document frequencies are determined online, whereas in a batch setting they can be determined by performing an initial pass through the data.

    Note that the parameters are identical to those of feature_extraction.BagOfWords.

    "},{"location":"api/feature-extraction/TFIDF/#parameters","title":"Parameters","text":"
    • normalize

      Default \u2192 True

      Whether or not the TF-IDF values by their L2 norm.

    • on

      Type \u2192 str | None

      Default \u2192 None

      The name of the feature that contains the text to vectorize. If None, then the input is treated as a document instead of a set of features.

    • strip_accents

      Default \u2192 True

      Whether or not to strip accent characters.

    • lowercase

      Default \u2192 True

      Whether or not to convert all characters to lowercase.

    • preprocessor

      Type \u2192 typing.Callable | None

      Default \u2192 None

      An optional preprocessing function which overrides the strip_accents and lowercase steps, while preserving the tokenizing and n-grams generation steps.

    • tokenizer

      Type \u2192 typing.Callable | None

      Default \u2192 None

      A function used to convert preprocessed text into a dict of tokens. By default, a regex formula that works well in most cases is used.

    • ngram_range

      Default \u2192 (1, 1)

      The lower and upper boundary of the range n-grams to be extracted. All values of n such that min_n <= n <= max_n will be used. For example an ngram_range of (1, 1) means only unigrams, (1, 2) means unigrams and bigrams, and (2, 2) means only bigrams. Only works if tokenizer is not set to False.

    "},{"location":"api/feature-extraction/TFIDF/#attributes","title":"Attributes","text":"
    • dfs (collections.defaultdict))

      Document counts.

    • n (int)

      Number of scanned documents.

    "},{"location":"api/feature-extraction/TFIDF/#examples","title":"Examples","text":"

    from river import feature_extraction\n\ntfidf = feature_extraction.TFIDF()\n\ncorpus = [\n    'This is the first document.',\n    'This document is the second document.',\n    'And this is the third one.',\n    'Is this the first document?',\n]\n\nfor sentence in corpus:\n    tfidf = tfidf.learn_one(sentence)\n    print(tfidf.transform_one(sentence))\n
    {'this': 0.447, 'is': 0.447, 'the': 0.447, 'first': 0.447, 'document': 0.447}\n{'this': 0.333, 'document': 0.667, 'is': 0.333, 'the': 0.333, 'second': 0.469}\n{'and': 0.497, 'this': 0.293, 'is': 0.293, 'the': 0.293, 'third': 0.497, 'one': 0.497}\n{'is': 0.384, 'this': 0.384, 'the': 0.384, 'first': 0.580, 'document': 0.469}\n

    In the above example, a string is passed to transform_one. You can also indicate which field to access if the string is stored in a dictionary:

    tfidf = feature_extraction.TFIDF(on='sentence')\n\nfor sentence in corpus:\n    x = {'sentence': sentence}\n    tfidf = tfidf.learn_one(x)\n    print(tfidf.transform_one(x))\n
    {'this': 0.447, 'is': 0.447, 'the': 0.447, 'first': 0.447, 'document': 0.447}\n{'this': 0.333, 'document': 0.667, 'is': 0.333, 'the': 0.333, 'second': 0.469}\n{'and': 0.497, 'this': 0.293, 'is': 0.293, 'the': 0.293, 'third': 0.497, 'one': 0.497}\n{'is': 0.384, 'this': 0.384, 'the': 0.384, 'first': 0.580, 'document': 0.469}\n

    "},{"location":"api/feature-extraction/TFIDF/#methods","title":"Methods","text":"learn_many learn_one

    Update with a set of features x.

    A lot of transformers don't actually have to do anything during the learn_one step because they are stateless. For this reason the default behavior of this function is to do nothing. Transformers that however do something during the learn_one can override this method.

    Parameters

    • x \u2014 'dict'

    Returns

    Transformer: self

    process_text transform_many

    Transform pandas series of string into term-frequency pandas sparse dataframe.

    Parameters

    • X \u2014 'pd.Series'

    transform_one

    Transform a set of features x.

    Parameters

    • x \u2014 'dict'

    Returns

    dict: The transformed values.

    "},{"location":"api/feature-extraction/TargetAgg/","title":"TargetAgg","text":"

    Computes a streaming aggregate of the target values.

    This transformer is identical to feature_extraction.Agg, the only difference is that it operates on the target rather than on a feature. At each step, the running statistic how of target values in group by is updated with the target. It is therefore a supervised transformer.

    "},{"location":"api/feature-extraction/TargetAgg/#parameters","title":"Parameters","text":"
    • by

      Type \u2192 str | list[str] | None

      The feature by which to group the target values. All the data is included in the aggregate if this is None.

    • how

      Type \u2192 stats.base.Univariate | utils.Rolling | utils.TimeRolling

      The statistic to compute.

    • target_name

      Default \u2192 y

      The target name which is used in the result.

    "},{"location":"api/feature-extraction/TargetAgg/#attributes","title":"Attributes","text":"
    • state

      Return the current values for each group as a series.

    • target_name

    "},{"location":"api/feature-extraction/TargetAgg/#examples","title":"Examples","text":"

    Consider the following dataset, where the second value of each value is the target:

    dataset = [\n    ({'country': 'France', 'place': 'Taco Bell'}, 42),\n    ({'country': 'Sweden', 'place': 'Burger King'}, 16),\n    ({'country': 'France', 'place': 'Burger King'}, 24),\n    ({'country': 'Sweden', 'place': 'Taco Bell'}, 58),\n    ({'country': 'Sweden', 'place': 'Burger King'}, 20),\n    ({'country': 'France', 'place': 'Taco Bell'}, 50),\n    ({'country': 'France', 'place': 'Burger King'}, 10),\n    ({'country': 'Sweden', 'place': 'Taco Bell'}, 80)\n]\n

    As an example, let's perform a target encoding of the place feature. Instead of simply updating a running average, we use a stats.BayesianMean which allows us to incorporate some prior knowledge. This makes subsequent models less prone to overfitting. Indeed, it dampens the fact that too few samples might have been seen within a group.

    from river import feature_extraction\nfrom river import stats\n\nagg = feature_extraction.TargetAgg(\n    by='place',\n    how=stats.BayesianMean(\n        prior=3,\n        prior_weight=1\n    )\n)\n\nfor x, y in dataset:\n    print(agg.transform_one(x))\n    agg = agg.learn_one(x, y)\n
    {'y_bayes_mean_by_place': 3.0}\n{'y_bayes_mean_by_place': 3.0}\n{'y_bayes_mean_by_place': 9.5}\n{'y_bayes_mean_by_place': 22.5}\n{'y_bayes_mean_by_place': 14.333}\n{'y_bayes_mean_by_place': 34.333}\n{'y_bayes_mean_by_place': 15.75}\n{'y_bayes_mean_by_place': 38.25}\n

    Just like with feature_extraction.Agg, we can specify multiple features on which to group the data:

    agg = feature_extraction.TargetAgg(\n    by=['place', 'country'],\n    how=stats.BayesianMean(\n        prior=3,\n        prior_weight=1\n    )\n)\n\nfor x, y in dataset:\n    print(agg.transform_one(x))\n    agg = agg.learn_one(x, y)\n
    {'y_bayes_mean_by_place_and_country': 3.0}\n{'y_bayes_mean_by_place_and_country': 3.0}\n{'y_bayes_mean_by_place_and_country': 3.0}\n{'y_bayes_mean_by_place_and_country': 3.0}\n{'y_bayes_mean_by_place_and_country': 9.5}\n{'y_bayes_mean_by_place_and_country': 22.5}\n{'y_bayes_mean_by_place_and_country': 13.5}\n{'y_bayes_mean_by_place_and_country': 30.5}\n

    agg.state\n
    place        country\nTaco Bell    France     31.666667\nBurger King  Sweden     13.000000\n             France     12.333333\nTaco Bell    Sweden     47.000000\nName: y_bayes_mean_by_place_and_country, dtype: float64\n

    This transformer can also be used in conjunction with utils.TimeRolling. The latter requires a t argument, which is a timestamp that indicates when the current row was observed. For instance, we can calculate the average (how) revenue (on) for each place (by) over the last 7 days (t):

    import datetime as dt\nimport random\nimport string\nfrom river import utils\n\nagg = feature_extraction.TargetAgg(\n    by=\"group\",\n    how=utils.TimeRolling(stats.Mean(), dt.timedelta(days=7))\n)\n\nfor day in range(366):\n    g = random.choice(string.ascii_lowercase)\n    x = {\"group\": g}\n    y = string.ascii_lowercase.index(g) + random.random()\n    t = dt.datetime(2023, 1, 1) + dt.timedelta(days=day)\n    agg = agg.learn_one(x, y, t=t)\n
    "},{"location":"api/feature-extraction/TargetAgg/#methods","title":"Methods","text":"learn_one

    Update with a set of features x and a target y.

    Parameters

    • x \u2014 'dict'
    • y \u2014 'base.typing.Target'
    • t \u2014 defaults to None

    Returns

    SupervisedTransformer: self

    transform_one

    Transform a set of features x.

    Parameters

    • x \u2014 'dict'

    Returns

    dict: The transformed values.

    1. Streaming groupbys in pandas for big datasets

    "},{"location":"api/feature-selection/PoissonInclusion/","title":"PoissonInclusion","text":"

    Randomly selects features with an inclusion trial.

    When a new feature is encountered, it is selected with probability p. The number of times a feature needs to beseen before it is added to the model follows a geometric distribution with expected value 1 / p. This feature selection method is meant to be used when you have a very large amount of sparse features.

    "},{"location":"api/feature-selection/PoissonInclusion/#parameters","title":"Parameters","text":"
    • p

      Type \u2192 float

      Probability of including a feature the first time it is encountered.

    • seed

      Type \u2192 int | None

      Default \u2192 None

      Random seed value used for reproducibility.

    "},{"location":"api/feature-selection/PoissonInclusion/#examples","title":"Examples","text":"

    from river import datasets\nfrom river import feature_selection\nfrom river import stream\n\nselector = feature_selection.PoissonInclusion(p=0.1, seed=42)\n\ndataset = iter(datasets.TrumpApproval())\n\nfeature_names = next(dataset)[0].keys()\nn = 0\n\nwhile True:\n    x, y = next(dataset)\n    xt = selector.transform_one(x)\n    if xt.keys() == feature_names:\n        break\n    n += 1\n\nn\n
    12\n

    "},{"location":"api/feature-selection/PoissonInclusion/#methods","title":"Methods","text":"learn_one

    Update with a set of features x.

    A lot of transformers don't actually have to do anything during the learn_one step because they are stateless. For this reason the default behavior of this function is to do nothing. Transformers that however do something during the learn_one can override this method.

    Parameters

    • x \u2014 'dict'

    Returns

    Transformer: self

    transform_one

    Transform a set of features x.

    Parameters

    • x \u2014 'dict'

    Returns

    dict: The transformed values.

    1. McMahan, H.B., Holt, G., Sculley, D., Young, M., Ebner, D., Grady, J., Nie, L., Phillips, T., Davydov, E., Golovin, D. and Chikkerur, S., 2013, August. Ad click prediction: a view from the trenches. In Proceedings of the 19th ACM SIGKDD international conference on Knowledge discovery and data mining (pp. 1222-1230) \u21a9

    "},{"location":"api/feature-selection/SelectKBest/","title":"SelectKBest","text":"

    Removes all but the \\(k\\) highest scoring features.

    "},{"location":"api/feature-selection/SelectKBest/#parameters","title":"Parameters","text":"
    • similarity

      Type \u2192 stats.base.Bivariate

    • k

      Default \u2192 10

      The number of features to keep.

    "},{"location":"api/feature-selection/SelectKBest/#attributes","title":"Attributes","text":"
    • similarities (dict)

      The similarity instances used for each feature.

    • leaderboard (dict)

      The actual similarity measures.

    "},{"location":"api/feature-selection/SelectKBest/#examples","title":"Examples","text":"

    from pprint import pprint\nfrom river import feature_selection\nfrom river import stats\nfrom river import stream\nfrom sklearn import datasets\n\nX, y = datasets.make_regression(\n    n_samples=100,\n    n_features=10,\n    n_informative=2,\n    random_state=42\n)\n\nselector = feature_selection.SelectKBest(\n    similarity=stats.PearsonCorr(),\n    k=2\n)\n\nfor xi, yi, in stream.iter_array(X, y):\n    selector = selector.learn_one(xi, yi)\n\npprint(selector.leaderboard)\n
    Counter({9: 0.7898,\n        7: 0.5444,\n        8: 0.1062,\n        2: 0.0638,\n        4: 0.0538,\n        5: 0.0271,\n        1: -0.0312,\n        6: -0.0657,\n        3: -0.1501,\n        0: -0.1895})\n

    selector.transform_one(xi)\n
    {7: -1.2795, 9: -1.8408}\n

    "},{"location":"api/feature-selection/SelectKBest/#methods","title":"Methods","text":"learn_one

    Update with a set of features x and a target y.

    Parameters

    • x \u2014 'dict'
    • y \u2014 'base.typing.Target'

    Returns

    SupervisedTransformer: self

    transform_one

    Transform a set of features x.

    Parameters

    • x \u2014 'dict'

    Returns

    dict: The transformed values.

    "},{"location":"api/feature-selection/VarianceThreshold/","title":"VarianceThreshold","text":"

    Removes low-variance features.

    "},{"location":"api/feature-selection/VarianceThreshold/#parameters","title":"Parameters","text":"
    • threshold

      Default \u2192 0

      Only features with a variance above the threshold will be kept.

    • min_samples

      Default \u2192 2

      The minimum number of samples required to perform selection.

    "},{"location":"api/feature-selection/VarianceThreshold/#attributes","title":"Attributes","text":"
    • variances (dict)

      The variance of each feature.

    "},{"location":"api/feature-selection/VarianceThreshold/#examples","title":"Examples","text":"

    from river import feature_selection\nfrom river import stream\n\nX = [\n    [0, 2, 0, 3],\n    [0, 1, 4, 3],\n    [0, 1, 1, 3]\n]\n\nselector = feature_selection.VarianceThreshold()\n\nfor x, _ in stream.iter_array(X):\n    print(selector.learn_one(x).transform_one(x))\n
    {0: 0, 1: 2, 2: 0, 3: 3}\n{1: 1, 2: 4}\n{1: 1, 2: 1}\n

    "},{"location":"api/feature-selection/VarianceThreshold/#methods","title":"Methods","text":"check_feature learn_one

    Update with a set of features x.

    A lot of transformers don't actually have to do anything during the learn_one step because they are stateless. For this reason the default behavior of this function is to do nothing. Transformers that however do something during the learn_one can override this method.

    Parameters

    • x \u2014 'dict'

    Returns

    Transformer: self

    transform_one

    Transform a set of features x.

    Parameters

    • x \u2014 'dict'

    Returns

    dict: The transformed values.

    "},{"location":"api/forest/AMFClassifier/","title":"AMFClassifier","text":"

    Aggregated Mondrian Forest classifier for online learning.

    This implementation is truly online1, in the sense that a single pass is performed, and that predictions can be produced anytime.

    Each node in a tree predicts according to the distribution of the labels it contains. This distribution is regularized using a \"Jeffreys\" prior with parameter dirichlet. For each class with count labels in the node and n_samples samples in it, the prediction of a node is given by

    \\(\\frac{count + dirichlet}{n_{samples} + dirichlet \\times n_{classes}}\\).

    The prediction for a sample is computed as the aggregated predictions of all the subtrees along the path leading to the leaf node containing the sample. The aggregation weights are exponential weights with learning rate step and log-loss when use_aggregation is True.

    This computation is performed exactly thanks to a context tree weighting algorithm. More details can be found in the paper cited in the references below.

    The final predictions are the average class probabilities predicted by each of the n_estimators trees in the forest.

    "},{"location":"api/forest/AMFClassifier/#parameters","title":"Parameters","text":"
    • n_estimators

      Type \u2192 int

      Default \u2192 10

      The number of trees in the forest.

    • step

      Type \u2192 float

      Default \u2192 1.0

      Step-size for the aggregation weights. Default is 1 for classification with the log-loss, which is usually the best choice.

    • use_aggregation

      Type \u2192 bool

      Default \u2192 True

      Controls if aggregation is used in the trees. It is highly recommended to leave it as True.

    • dirichlet

      Type \u2192 float

      Default \u2192 0.5

      Regularization level of the class frequencies used for predictions in each node. A rule of thumb is to set this to 1 / n_classes, where n_classes is the expected number of classes which might appear. Default is dirichlet = 0.5, which works well for binary classification problems.

    • split_pure

      Type \u2192 bool

      Default \u2192 False

      Controls if nodes that contains only sample of the same class should be split (\"pure\" nodes). Default is False, namely pure nodes are not split, but True can be sometimes better.

    • seed

      Type \u2192 int | None

      Default \u2192 None

      Random seed for reproducibility.

    "},{"location":"api/forest/AMFClassifier/#attributes","title":"Attributes","text":"
    • models
    "},{"location":"api/forest/AMFClassifier/#examples","title":"Examples","text":"

    from river import datasets\nfrom river import evaluate\nfrom river import forest\nfrom river import metrics\n\ndataset = datasets.Bananas().take(500)\n\nmodel = forest.AMFClassifier(\n    n_estimators=10,\n    use_aggregation=True,\n    dirichlet=0.5,\n    seed=1\n)\n\nmetric = metrics.Accuracy()\n\nevaluate.progressive_val_score(dataset, model, metric)\n
    Accuracy: 85.37%\n

    "},{"location":"api/forest/AMFClassifier/#methods","title":"Methods","text":"learn_one

    Update the model with a set of features x and a label y.

    Parameters

    • x
    • y

    Returns

    self

    predict_one

    Predict the label of a set of features x.

    Parameters

    • x \u2014 'dict'
    • kwargs

    Returns

    base.typing.ClfTarget | None: The predicted label.

    predict_proba_one

    Predict the probability of each label for a dictionary of features x.

    Parameters

    • x

    Returns

    A dictionary that associates a probability which each label.

    "},{"location":"api/forest/AMFClassifier/#notes","title":"Notes","text":"

    Only log_loss used for the computation of the aggregation weights is supported for now, namely the log-loss for multi-class classification.

    1. Mourtada, J., Ga\u00efffas, S., & Scornet, E. (2021). AMF: Aggregated Mondrian forests for online learning. Journal of the Royal Statistical Society Series B: Statistical Methodology, 83(3), 505-533.\u00a0\u21a9

    "},{"location":"api/forest/AMFRegressor/","title":"AMFRegressor","text":"

    Aggregated Mondrian Forest regressor for online learning.

    This algorithm is truly online, in the sense that a single pass is performed, and that predictions can be produced anytime.

    Each node in a tree predicts according to the average of the labels it contains. The prediction for a sample is computed as the aggregated predictions of all the subtrees along the path leading to the leaf node containing the sample. The aggregation weights are exponential weights with learning rate step using a squared loss when use_aggregation is True.

    This computation is performed exactly thanks to a context tree weighting algorithm. More details can be found in the original paper1.

    The final predictions are the average of the predictions of each of the n_estimators trees in the forest.

    "},{"location":"api/forest/AMFRegressor/#parameters","title":"Parameters","text":"
    • n_estimators

      Type \u2192 int

      Default \u2192 10

      The number of trees in the forest.

    • step

      Type \u2192 float

      Default \u2192 1.0

      Step-size for the aggregation weights.

    • use_aggregation

      Type \u2192 bool

      Default \u2192 True

      Controls if aggregation is used in the trees. It is highly recommended to leave it as True.

    • seed

      Type \u2192 int

      Default \u2192 None

      Random seed for reproducibility.

    "},{"location":"api/forest/AMFRegressor/#attributes","title":"Attributes","text":"
    • models
    "},{"location":"api/forest/AMFRegressor/#examples","title":"Examples","text":"

    from river import datasets\nfrom river import evaluate\nfrom river import forest\nfrom river import metrics\n\ndataset = datasets.TrumpApproval()\nmodel = forest.AMFRegressor(seed=42)\nmetric = metrics.MAE()\n\nevaluate.progressive_val_score(dataset, model, metric)\n
    MAE: 0.268533\n

    "},{"location":"api/forest/AMFRegressor/#methods","title":"Methods","text":"learn_one

    Fits to a set of features x and a real-valued target y.

    Parameters

    • x
    • y

    Returns

    self

    predict_one

    Predict the output of features x.

    Parameters

    • x

    Returns

    The prediction.

    1. Mourtada, J., Ga\u00efffas, S., & Scornet, E. (2021). AMF: Aggregated Mondrian forests for online learning. Journal of the Royal Statistical Society Series B: Statistical Methodology, 83(3), 505-533.\u00a0\u21a9

    "},{"location":"api/forest/ARFClassifier/","title":"ARFClassifier","text":"

    Adaptive Random Forest classifier.

    The 3 most important aspects of Adaptive Random Forest 1 are:

    1. inducing diversity through re-sampling

    2. inducing diversity through randomly selecting subsets of features for node splits

    3. drift detectors per base tree, which cause selective resets in response to drifts

    It also allows training background trees, which start training if a warning is detected and replace the active tree if the warning escalates to a drift.

    "},{"location":"api/forest/ARFClassifier/#parameters","title":"Parameters","text":"
    • n_models

      Type \u2192 int

      Default \u2192 10

      Number of trees in the ensemble.

    • max_features

      Type \u2192 bool | str | int

      Default \u2192 sqrt

      Max number of attributes for each node split. - If int, then consider max_features at each split. - If float, then max_features is a percentage and int(max_features * n_features) features are considered per split. - If \"sqrt\", then max_features=sqrt(n_features). - If \"log2\", then max_features=log2(n_features). - If None, then max_features=n_features.

    • lambda_value

      Type \u2192 int

      Default \u2192 6

      The lambda value for bagging (lambda=6 corresponds to Leveraging Bagging).

    • metric

      Type \u2192 metrics.base.MultiClassMetric | None

      Default \u2192 None

      Metric used to track trees performance within the ensemble. Defaults to metrics.Accuracy()`.

    • disable_weighted_vote

      Default \u2192 False

      If True, disables the weighted vote prediction.

    • drift_detector

      Type \u2192 base.DriftDetector | None

      Default \u2192 None

      Drift Detection method. Set to None to disable Drift detection. Defaults to drift.ADWIN(delta=0.001)`.

    • warning_detector

      Type \u2192 base.DriftDetector | None

      Default \u2192 None

      Warning Detection method. Set to None to disable warning detection. Defaults to drift.ADWIN(delta=0.01)`.

    • grace_period

      Type \u2192 int

      Default \u2192 50

      [Tree parameter] Number of instances a leaf should observe between split attempts.

    • max_depth

      Type \u2192 int | None

      Default \u2192 None

      [Tree parameter] The maximum depth a tree can reach. If None, the tree will grow indefinitely.

    • split_criterion

      Type \u2192 str

      Default \u2192 info_gain

      [Tree parameter] Split criterion to use. - 'gini' - Gini - 'info_gain' - Information Gain - 'hellinger' - Hellinger Distance

    • delta

      Type \u2192 float

      Default \u2192 0.01

      [Tree parameter] Allowed error in split decision, a value closer to 0 takes longer to decide.

    • tau

      Type \u2192 float

      Default \u2192 0.05

      [Tree parameter] Threshold below which a split will be forced to break ties.

    • leaf_prediction

      Type \u2192 str

      Default \u2192 nba

      [Tree parameter] Prediction mechanism used at leafs. - 'mc' - Majority Class - 'nb' - Naive Bayes - 'nba' - Naive Bayes Adaptive

    • nb_threshold

      Type \u2192 int

      Default \u2192 0

      [Tree parameter] Number of instances a leaf should observe before allowing Naive Bayes.

    • nominal_attributes

      Type \u2192 list | None

      Default \u2192 None

      [Tree parameter] List of Nominal attributes. If empty, then assume that all attributes are numerical.

    • splitter

      Type \u2192 Splitter | None

      Default \u2192 None

      [Tree parameter] The Splitter or Attribute Observer (AO) used to monitor the class statistics of numeric features and perform splits. Splitters are available in the tree.splitter module. Different splitters are available for classification and regression tasks. Classification and regression splitters can be distinguished by their property is_target_class. This is an advanced option. Special care must be taken when choosing different splitters. By default, tree.splitter.GaussianSplitter is used if splitter is None.

    • binary_split

      Type \u2192 bool

      Default \u2192 False

      [Tree parameter] If True, only allow binary splits.

    • min_branch_fraction

      Type \u2192 float

      Default \u2192 0.01

      [Tree parameter] The minimum percentage of observed data required for branches resulting from split candidates. To validate a split candidate, at least two resulting branches must have a percentage of samples greater than min_branch_fraction. This criterion prevents unnecessary splits when the majority of instances are concentrated in a single branch.

    • max_share_to_split

      Type \u2192 float

      Default \u2192 0.99

      [Tree parameter] Only perform a split in a leaf if the proportion of elements in the majority class is smaller than this parameter value. This parameter avoids performing splits when most of the data belongs to a single class.

    • max_size

      Type \u2192 float

      Default \u2192 100.0

      [Tree parameter] Maximum memory (MB) consumed by the tree.

    • memory_estimate_period

      Type \u2192 int

      Default \u2192 2000000

      [Tree parameter] Number of instances between memory consumption checks.

    • stop_mem_management

      Type \u2192 bool

      Default \u2192 False

      [Tree parameter] If True, stop growing as soon as memory limit is hit.

    • remove_poor_attrs

      Type \u2192 bool

      Default \u2192 False

      [Tree parameter] If True, disable poor attributes to reduce memory usage.

    • merit_preprune

      Type \u2192 bool

      Default \u2192 True

      [Tree parameter] If True, enable merit-based tree pre-pruning.

    • seed

      Type \u2192 int | None

      Default \u2192 None

      Random seed for reproducibility.

    "},{"location":"api/forest/ARFClassifier/#attributes","title":"Attributes","text":"
    • models
    "},{"location":"api/forest/ARFClassifier/#examples","title":"Examples","text":"

    from river import evaluate\nfrom river import forest\nfrom river import metrics\nfrom river.datasets import synth\n\ndataset = synth.ConceptDriftStream(\n    seed=42,\n    position=500,\n    width=40\n).take(1000)\n\nmodel = forest.ARFClassifier(seed=8, leaf_prediction=\"mc\")\n\nmetric = metrics.Accuracy()\n\nevaluate.progressive_val_score(dataset, model, metric)\n
    Accuracy: 71.07%\n

    "},{"location":"api/forest/ARFClassifier/#methods","title":"Methods","text":"learn_one predict_one

    Predict the label of a set of features x.

    Parameters

    • x \u2014 'dict'
    • kwargs

    Returns

    base.typing.ClfTarget | None: The predicted label.

    predict_proba_one

    Predict the probability of each label for a dictionary of features x.

    Parameters

    • x \u2014 'dict'

    Returns

    dict[base.typing.ClfTarget, float]: A dictionary that associates a probability which each label.

    1. Heitor Murilo Gomes, Albert Bifet, Jesse Read, Jean Paul Barddal, Fabricio Enembreck, Bernhard Pfharinger, Geoff Holmes, Talel Abdessalem. Adaptive random forests for evolving data stream classification. In Machine Learning, DOI: 10.1007/s10994-017-5642-8, Springer, 2017.\u00a0\u21a9

    "},{"location":"api/forest/ARFRegressor/","title":"ARFRegressor","text":"

    Adaptive Random Forest regressor.

    The 3 most important aspects of Adaptive Random Forest 1 are:

    1. inducing diversity through re-sampling

    2. inducing diversity through randomly selecting subsets of features for node splits

    3. drift detectors per base tree, which cause selective resets in response to drifts

    Notice that this implementation is slightly different from the original algorithm proposed in 2. The HoeffdingTreeRegressor is used as base learner, instead of FIMT-DD. It also adds a new strategy to monitor the predictions and check for concept drifts. The deviations of the predictions to the target are monitored and normalized in the [0, 1] range to fulfill ADWIN's requirements. We assume that the data subjected to the normalization follows a normal distribution, and thus, lies within the interval of the mean \\(\\pm3\\sigma\\).

    "},{"location":"api/forest/ARFRegressor/#parameters","title":"Parameters","text":"
    • n_models

      Type \u2192 int

      Default \u2192 10

      Number of trees in the ensemble.

    • max_features

      Default \u2192 sqrt

      Max number of attributes for each node split. - If int, then consider max_features at each split. - If float, then max_features is a percentage and int(max_features * n_features) features are considered per split. - If \"sqrt\", then max_features=sqrt(n_features). - If \"log2\", then max_features=log2(n_features). - If None, then max_features=n_features.

    • aggregation_method

      Type \u2192 str

      Default \u2192 median

      The method to use to aggregate predictions in the ensemble. - 'mean' - 'median' - If selected will disable the weighted vote.

    • lambda_value

      Type \u2192 int

      Default \u2192 6

      The lambda value for bagging (lambda=6 corresponds to Leveraging Bagging).

    • metric

      Type \u2192 metrics.base.RegressionMetric | None

      Default \u2192 None

      Metric used to track trees performance within the ensemble. Depending, on the configuration, this metric is also used to weight predictions from the members of the ensemble. Defaults to metrics.MSE()`.

    • disable_weighted_vote

      Default \u2192 True

      If True, disables the weighted vote prediction, i.e. does not assign weights to individual tree's predictions and uses the arithmetic mean instead. Otherwise will use the metric value to weight predictions.

    • drift_detector

      Type \u2192 base.DriftDetector | None

      Default \u2192 None

      Drift Detection method. Set to None to disable Drift detection. Defaults to drift.ADWIN(0.001)`.

    • warning_detector

      Type \u2192 base.DriftDetector | None

      Default \u2192 None

      Warning Detection method. Set to None to disable warning detection. Defaults to drift.ADWIN(0.01)`.

    • grace_period

      Type \u2192 int

      Default \u2192 50

      [Tree parameter] Number of instances a leaf should observe between split attempts.

    • max_depth

      Type \u2192 int | None

      Default \u2192 None

      [Tree parameter] The maximum depth a tree can reach. If None, the tree will grow indefinitely.

    • delta

      Type \u2192 float

      Default \u2192 0.01

      [Tree parameter] Allowed error in split decision, a value closer to 0 takes longer to decide.

    • tau

      Type \u2192 float

      Default \u2192 0.05

      [Tree parameter] Threshold below which a split will be forced to break ties.

    • leaf_prediction

      Type \u2192 str

      Default \u2192 adaptive

      [Tree parameter] Prediction mechanism used at leaves. - 'mean' - Target mean - 'model' - Uses the model defined in leaf_model - 'adaptive' - Chooses between 'mean' and 'model' dynamically

    • leaf_model

      Type \u2192 base.Regressor | None

      Default \u2192 None

      [Tree parameter] The regression model used to provide responses if leaf_prediction='model'. If not provided, an instance of linear_model.LinearRegression with the default hyperparameters is used.

    • model_selector_decay

      Type \u2192 float

      Default \u2192 0.95

      [Tree parameter] The exponential decaying factor applied to the learning models' squared errors, that are monitored if leaf_prediction='adaptive'. Must be between 0 and 1. The closer to 1, the more importance is going to be given to past observations. On the other hand, if its value approaches 0, the recent observed errors are going to have more influence on the final decision.

    • nominal_attributes

      Type \u2192 list | None

      Default \u2192 None

      [Tree parameter] List of Nominal attributes. If empty, then assume that all attributes are numerical.

    • splitter

      Type \u2192 Splitter | None

      Default \u2192 None

      [Tree parameter] The Splitter or Attribute Observer (AO) used to monitor the class statistics of numeric features and perform splits. Splitters are available in the tree.splitter module. Different splitters are available for classification and regression tasks. Classification and regression splitters can be distinguished by their property is_target_class. This is an advanced option. Special care must be taken when choosing different splitters.By default, tree.splitter.EBSTSplitter is used if splitter is None.

    • min_samples_split

      Type \u2192 int

      Default \u2192 5

      [Tree parameter] The minimum number of samples every branch resulting from a split candidate must have to be considered valid.

    • binary_split

      Type \u2192 bool

      Default \u2192 False

      [Tree parameter] If True, only allow binary splits.

    • max_size

      Type \u2192 float

      Default \u2192 500.0

      [Tree parameter] Maximum memory (MB) consumed by the tree.

    • memory_estimate_period

      Type \u2192 int

      Default \u2192 2000000

      [Tree parameter] Number of instances between memory consumption checks.

    • stop_mem_management

      Type \u2192 bool

      Default \u2192 False

      [Tree parameter] If True, stop growing as soon as memory limit is hit.

    • remove_poor_attrs

      Type \u2192 bool

      Default \u2192 False

      [Tree parameter] If True, disable poor attributes to reduce memory usage.

    • merit_preprune

      Type \u2192 bool

      Default \u2192 True

      [Tree parameter] If True, enable merit-based tree pre-pruning.

    • seed

      Type \u2192 int | None

      Default \u2192 None

      Random seed for reproducibility.

    "},{"location":"api/forest/ARFRegressor/#attributes","title":"Attributes","text":"
    • models

    • valid_aggregation_method

      Valid aggregation_method values.

    "},{"location":"api/forest/ARFRegressor/#examples","title":"Examples","text":"

    from river import datasets\nfrom river import evaluate\nfrom river import forest\nfrom river import metrics\nfrom river import preprocessing\n\ndataset = datasets.TrumpApproval()\n\nmodel = (\n    preprocessing.StandardScaler() |\n    forest.ARFRegressor(seed=42)\n)\n\nmetric = metrics.MAE()\n\nevaluate.progressive_val_score(dataset, model, metric)\n
    MAE: 0.800378\n

    "},{"location":"api/forest/ARFRegressor/#methods","title":"Methods","text":"learn_one predict_one

    Predict the output of features x.

    Parameters

    • x \u2014 'dict'

    Returns

    base.typing.RegTarget: The prediction.

    1. Gomes, H.M., Bifet, A., Read, J., Barddal, J.P., Enembreck, F., Pfharinger, B., Holmes, G. and Abdessalem, T., 2017. Adaptive random forests for evolving data stream classification. Machine Learning, 106(9-10), pp.1469-1495.\u00a0\u21a9

    2. Gomes, H.M., Barddal, J.P., Boiko, L.E., Bifet, A., 2018. Adaptive random forests for data stream regression. ESANN 2018.\u00a0\u21a9

    "},{"location":"api/forest/OXTRegressor/","title":"OXTRegressor","text":"

    Online Extra Trees regressor.

    The online Extra Trees1 ensemble takes some steps further into randomization when compared to Adaptive Random Forests (ARF). A subspace of the feature space is considered at each split attempt, as ARF does, and online bagging or subbagging can also be (optionally) used. Nonetheless, Extra Trees randomizes the split candidates evaluated by each leaf node (just a single split is tested by numerical feature, which brings significant speedups to the ensemble), and might also randomize the maximum depth of the forest members, as well as the size of the feature subspace processed by each of its trees' leaves.

    On the other hand, OXT suffers from a cold-start problem. As the splits are random, the predictive performance in small samples is usually worse than using a deterministic split approach, such as the one used by ARF.

    "},{"location":"api/forest/OXTRegressor/#parameters","title":"Parameters","text":"
    • n_models

      Type \u2192 int

      Default \u2192 10

      The number of trees in the ensemble.

    • max_features

      Type \u2192 bool | str | int

      Default \u2192 sqrt

      Max number of attributes for each node split. - If int, then consider max_features at each split. - If float, then max_features is a percentage and int(max_features * n_features) features are considered per split. - If \"sqrt\", then max_features=sqrt(n_features). - If \"log2\", then max_features=log2(n_features). - If \"random\", then max_features will assume a different random number in the interval [2, n_features] for each tree leaf. - If None, then max_features=n_features.

    • resampling_strategy

      Type \u2192 str | None

      Default \u2192 subbagging

      The chosen instance resampling strategy: - If None, no resampling will be done and the trees will process all instances. - If 'baggging', online bagging will be performed (sampling with replacement). - If 'subbagging', online subbagging will be performed (sampling without replacement).

    • resampling_rate

      Type \u2192 int | float

      Default \u2192 0.5

      Only valid if resampling_strategy is not None. Controls the parameters of the resampling strategy.. - If resampling_strategy='bagging', must be an integer greater than or equal to 1 that parameterizes the poisson distribution used to simulate bagging in online learning settings. It acts as the lambda parameter of Oza Bagging and Leveraging Bagging. - If resampling_strategy='subbagging', must be a float in the interval \\((0, 1]\\) that controls the chance of each instance being used by a tree for learning.

    • detection_mode

      Type \u2192 str

      Default \u2192 all

      The concept drift detection mode in which the forest operates. Valid values are: - \"all\": creates both warning and concept drift detectors. If a warning is detected, an alternate tree starts being trained in the background. If the warning trigger escalates to a concept drift, the affected tree is replaced by the alternate tree. - \"drop\": only the concept drift detectors are created. If a drift is detected, the affected tree is dropped and replaced by a new tree. - \"off\": disables the concept drift adaptation capabilities. The forest will act as if the processed stream is stationary.

    • warning_detector

      Type \u2192 base.DriftDetector | None

      Default \u2192 None

      The detector that will be used to trigger concept drift warnings. Defaults to drift.ADWIN(0.01)`.

    • drift_detector

      Type \u2192 base.DriftDetector | None

      Default \u2192 None

      The detector used to detect concept drifts. Defaults to drift.ADWIN(0.001)`.

    • max_depth

      Type \u2192 int | None

      Default \u2192 None

      The maximum depth the ensemble members might reach. If None, the trees will grow indefinitely.

    • randomize_tree_depth

      Type \u2192 bool

      Default \u2192 False

      Whether or not randomize the maximum depth of each tree in the ensemble. If max_depth is provided, it is going to act as an upper bound to generate the maximum depth for each tree.

    • track_metric

      Type \u2192 metrics.base.RegressionMetric | None

      Default \u2192 None

      The performance metric used to weight predictions. Defaults to metrics.MAE()`.

    • disable_weighted_vote

      Type \u2192 bool

      Default \u2192 True

      Defines whether or not to use predictions weighted by each trees' prediction performance.

    • split_buffer_size

      Type \u2192 int

      Default \u2192 5

      Defines the size of the buffer used by the tree splitters when determining the feature range and a random split point in this interval.

    • seed

      Type \u2192 int | None

      Default \u2192 None

      Random seed to support reproducibility.

    • grace_period

      Type \u2192 int

      Default \u2192 50

      [Tree parameter] Number of instances a leaf should observe between split attempts.

    • delta

      Type \u2192 float

      Default \u2192 0.01

      [Tree parameter] Allowed error in split decision, a value closer to 0 takes longer to decide.

    • tau

      Type \u2192 float

      Default \u2192 0.05

      [Tree parameter] Threshold below which a split will be forced to break ties.

    • leaf_prediction

      Type \u2192 str

      Default \u2192 adaptive

      [Tree parameter] Prediction mechanism used at leaves. - 'mean' - Target mean - 'model' - Uses the model defined in leaf_model - 'adaptive' - Chooses between 'mean' and 'model' dynamically

    • leaf_model

      Type \u2192 base.Regressor | None

      Default \u2192 None

      [Tree parameter] The regression model used to provide responses if leaf_prediction='model'. If not provided, an instance of linear_model.LinearRegression with the default hyperparameters is used.

    • model_selector_decay

      Type \u2192 float

      Default \u2192 0.95

      [Tree parameter] The exponential decaying factor applied to the learning models' squared errors, that are monitored if leaf_prediction='adaptive'. Must be between 0 and 1. The closer to 1, the more importance is going to be given to past observations. On the other hand, if its value approaches 0, the recent observed errors are going to have more influence on the final decision.

    • nominal_attributes

      Type \u2192 list | None

      Default \u2192 None

      [Tree parameter] List of Nominal attributes. If empty, then assume that all attributes are numerical.

    • min_samples_split

      Type \u2192 int

      Default \u2192 5

      [Tree parameter] The minimum number of samples every branch resulting from a split candidate must have to be considered valid.

    • binary_split

      Type \u2192 bool

      Default \u2192 False

      [Tree parameter] If True, only allow binary splits.

    • max_size

      Type \u2192 int

      Default \u2192 500

      [Tree parameter] Maximum memory (MB) consumed by the tree.

    • memory_estimate_period

      Type \u2192 int

      Default \u2192 2000000

      [Tree parameter] Number of instances between memory consumption checks.

    • stop_mem_management

      Type \u2192 bool

      Default \u2192 False

      [Tree parameter] If True, stop growing as soon as memory limit is hit.

    • remove_poor_attrs

      Type \u2192 bool

      Default \u2192 False

      [Tree parameter] If True, disable poor attributes to reduce memory usage.

    • merit_preprune

      Type \u2192 bool

      Default \u2192 True

      [Tree parameter] If True, enable merit-based tree pre-pruning.

    "},{"location":"api/forest/OXTRegressor/#attributes","title":"Attributes","text":"
    • instances_per_tree

      The number of instances processed by each one of the current forest members. Each time a concept drift is detected, the count corresponding to the affected tree is reset.

    • models

    • n_drifts

      The number of concept drifts detected per ensemble member.

    • n_tree_swaps

      The number of performed alternate tree swaps. Not applicable if the warning detectors are disabled.

    • n_warnings

      The number of warnings detected per ensemble member.

    • total_instances

      The total number of instances processed by the ensemble.

    "},{"location":"api/forest/OXTRegressor/#examples","title":"Examples","text":"

    from river import datasets\nfrom river import evaluate\nfrom river import metrics\nfrom river import forest\n\ndataset = datasets.synth.Friedman(seed=42).take(5000)\n\nmodel = forest.OXTRegressor(n_models=3, seed=42)\n\nmetric = metrics.RMSE()\n\nevaluate.progressive_val_score(dataset, model, metric)\n
    RMSE: 3.127311\n

    "},{"location":"api/forest/OXTRegressor/#methods","title":"Methods","text":"learn_one predict_one

    Predict the output of features x.

    Parameters

    • x \u2014 'dict'

    Returns

    base.typing.RegTarget: The prediction.

    "},{"location":"api/forest/OXTRegressor/#notes","title":"Notes","text":"

    As the Online Extra Trees change the way in which Hoeffding Trees perform split attempts and monitor numerical input features, some of the parameters of the vanilla Hoeffding Tree algorithms are not available.

    1. Mastelini, S. M., Nakano, F. K., Vens, C., & de Leon Ferreira, A. C. P. (2022). Online Extra Trees Regressor. IEEE Transactions on Neural Networks and Learning Systems.\u00a0\u21a9

    "},{"location":"api/imblearn/ChebyshevOverSampler/","title":"ChebyshevOverSampler","text":"

    Over-sampling for imbalanced regression using Chebyshev's inequality.

    Chebyshev's inequality can be used to define the probability of target observations being frequent values (w.r.t. the distribution mean).

    Let \\(Y\\) be a random variable with finite expected value \\(\\overline{y}\\) and non-zero variance \\(\\sigma^2\\). For any real number \\(t > 0\\), the Chebyshev's inequality states that, for a wide class of unimodal probability distributions: \\(Pr(|y-\\overline{y}| \\ge t\\sigma) \\le \\dfrac{1}{t^2}\\).

    Taking \\(t=\\dfrac{|y-\\overline{y}|}{\\sigma}\\), and assuming \\(t > 1\\), the Chebyshev\u2019s inequality for an observation \\(y\\) becomes: \\(P(|y - \\overline{y}|=t) = \\dfrac{\\sigma^2}{|y-\\overline{y}|}\\).

    Alternatively, one can use \\(t\\) directly to estimate a frequency weight \\(\\kappa = \\lceil t\\rceil\\) and define an over-sampling strategy for extreme and rare target values1. Each incoming instance is used \\(\\kappa\\) times to update the underlying regressor. Frequent target values contribute only once to the underlying regressor, whereas rares cases are used multiple times for training.

    "},{"location":"api/imblearn/ChebyshevOverSampler/#parameters","title":"Parameters","text":"
    • regressor

      Type \u2192 base.Regressor

      The regression model that will receive the biased sample.

    "},{"location":"api/imblearn/ChebyshevOverSampler/#examples","title":"Examples","text":"

    from river import datasets\nfrom river import evaluate\nfrom river import imblearn\nfrom river import metrics\nfrom river import preprocessing\nfrom river import rules\n\nmodel = (\n    preprocessing.StandardScaler() |\n    imblearn.ChebyshevOverSampler(\n        regressor=rules.AMRules(\n            n_min=50, delta=0.01\n        )\n    )\n)\n\nevaluate.progressive_val_score(\n    datasets.TrumpApproval(),\n    model,\n    metrics.MAE(),\n    print_every=500\n)\n
    [500] MAE: 1.673902\n[1,000] MAE: 1.743046\n[1,001] MAE: 1.741335\nMAE: 1.741335\n

    "},{"location":"api/imblearn/ChebyshevOverSampler/#methods","title":"Methods","text":"learn_one

    Fits to a set of features x and a real-valued target y.

    Parameters

    • x
    • y
    • kwargs

    Returns

    self

    predict_one

    Predict the output of features x.

    Parameters

    • x
    • kwargs

    Returns

    The prediction.

    1. Aminian, Ehsan, Rita P. Ribeiro, and Jo\u00e3o Gama. \"Chebyshev approaches for imbalanced data streams regression models.\" Data Mining and Knowledge Discovery 35.6 (2021): 2389-2466.\u00a0\u21a9

    "},{"location":"api/imblearn/ChebyshevUnderSampler/","title":"ChebyshevUnderSampler","text":"

    Under-sampling for imbalanced regression using Chebyshev's inequality.

    Chebyshev's inequality can be used to define the probability of target observations being frequent values (w.r.t. the distribution mean).

    Let \\(Y\\) be a random variable with finite expected value \\(\\overline{y}\\) and non-zero variance \\(\\sigma^2\\). For any real number \\(t > 0\\), the Chebyshev's inequality states that, for a wide class of unimodal probability distributions: \\(Pr(|y-\\overline{y}| \\ge t\\sigma) \\le \\dfrac{1}{t^2}\\).

    Taking \\(t=\\dfrac{|y-\\overline{y}|}{\\sigma}\\), and assuming \\(t > 1\\), the Chebyshev\u2019s inequality for an observation \\(y\\) becomes: \\(P(|y - \\overline{y}|=t) = \\dfrac{\\sigma^2}{|y-\\overline{y}|}\\). The reciprocal of this probability is used for under-sampling1 the most frequent cases. Extreme valued or rare cases have higher probabilities of selection, whereas the most frequent cases are likely to be discarded. Still, frequent cases have a small chance of being selected (controlled via the sp parameter) in case few rare instances were observed.

    "},{"location":"api/imblearn/ChebyshevUnderSampler/#parameters","title":"Parameters","text":"
    • regressor

      Type \u2192 base.Regressor

      The regression model that will receive the biased sample.

    • sp

      Type \u2192 float

      Default \u2192 0.15

      Second chance probability. Even if an example is not initially selected for training, it still has a small chance of being selected in case the number of rare case observed so far is small.

    • seed

      Type \u2192 int | None

      Default \u2192 None

      Random seed to support reproducibility.

    "},{"location":"api/imblearn/ChebyshevUnderSampler/#examples","title":"Examples","text":"

    from river import datasets\nfrom river import evaluate\nfrom river import imblearn\nfrom river import metrics\nfrom river import preprocessing\nfrom river import rules\n\nmodel = (\n    preprocessing.StandardScaler() |\n    imblearn.ChebyshevUnderSampler(\n        regressor=rules.AMRules(\n            n_min=50, delta=0.01,\n        ),\n        seed=42\n    )\n)\n\nevaluate.progressive_val_score(\n    datasets.TrumpApproval(),\n    model,\n    metrics.MAE(),\n    print_every=500\n)\n
    [500] MAE: 1.787162\n[1,000] MAE: 1.515711\n[1,001] MAE: 1.515236\nMAE: 1.515236\n

    "},{"location":"api/imblearn/ChebyshevUnderSampler/#methods","title":"Methods","text":"learn_one

    Fits to a set of features x and a real-valued target y.

    Parameters

    • x
    • y
    • kwargs

    Returns

    self

    predict_one

    Predict the output of features x.

    Parameters

    • x
    • kwargs

    Returns

    The prediction.

    1. Aminian, Ehsan, Rita P. Ribeiro, and Jo\u00e3o Gama. \"Chebyshev approaches for imbalanced data streams regression models.\" Data Mining and Knowledge Discovery 35.6 (2021): 2389-2466.\u00a0\u21a9

    "},{"location":"api/imblearn/HardSamplingClassifier/","title":"HardSamplingClassifier","text":"

    Hard sampling classifier.

    This wrapper enables a model to retrain on past samples who's output was hard to predict. This works by storing the hardest samples in a buffer of a fixed size. When a new sample arrives, the wrapped model is either trained on one of the buffered samples with a probability p or on the new sample with a probability (1 - p).

    The hardness of an observation is evaluated with a loss function that compares the sample's ground truth with the wrapped model's prediction. If the buffer is not full, then the sample is added to the buffer. If the buffer is full and the new sample has a bigger loss than the lowest loss in the buffer, then the sample takes it's place.

    "},{"location":"api/imblearn/HardSamplingClassifier/#parameters","title":"Parameters","text":"
    • classifier

      Type \u2192 base.Classifier

    • size

      Type \u2192 int

      Size of the buffer.

    • p

      Type \u2192 float

      Probability of updating the model with a sample from the buffer instead of a new incoming sample.

    • loss

      Type \u2192 optim.losses.BinaryLoss | optim.losses.MultiClassLoss | None

      Default \u2192 None

      Criterion used to evaluate the hardness of a sample.

    • seed

      Type \u2192 int | None

      Default \u2192 None

      Random seed.

    "},{"location":"api/imblearn/HardSamplingClassifier/#attributes","title":"Attributes","text":"
    • classifier
    "},{"location":"api/imblearn/HardSamplingClassifier/#examples","title":"Examples","text":"

    from river import datasets\nfrom river import evaluate\nfrom river import imblearn\nfrom river import linear_model\nfrom river import metrics\nfrom river import optim\nfrom river import preprocessing\n\nmodel = (\n    preprocessing.StandardScaler() |\n    imblearn.HardSamplingClassifier(\n        classifier=linear_model.LogisticRegression(),\n        p=0.1,\n        size=40,\n        seed=42,\n    )\n)\n\nevaluate.progressive_val_score(\n    dataset=datasets.Phishing(),\n    model=model,\n    metric=metrics.ROCAUC(),\n    print_every=500,\n)\n
    [500] ROCAUC: 92.78%\n[1,000] ROCAUC: 94.76%\n[1,250] ROCAUC: 95.06%\nROCAUC: 95.06%\n

    "},{"location":"api/imblearn/HardSamplingClassifier/#methods","title":"Methods","text":"learn_one predict_one predict_proba_one

    Predict the probability of each label for a dictionary of features x.

    Parameters

    • x
    • kwargs

    Returns

    A dictionary that associates a probability which each label.

    "},{"location":"api/imblearn/HardSamplingRegressor/","title":"HardSamplingRegressor","text":"

    Hard sampling regressor.

    This wrapper enables a model to retrain on past samples who's output was hard to predict. This works by storing the hardest samples in a buffer of a fixed size. When a new sample arrives, the wrapped model is either trained on one of the buffered samples with a probability p or on the new sample with a probability (1 - p).

    The hardness of an observation is evaluated with a loss function that compares the sample's ground truth with the wrapped model's prediction. If the buffer is not full, then the sample is added to the buffer. If the buffer is full and the new sample has a bigger loss than the lowest loss in the buffer, then the sample takes it's place.

    "},{"location":"api/imblearn/HardSamplingRegressor/#parameters","title":"Parameters","text":"
    • regressor

      Type \u2192 base.Regressor

    • size

      Type \u2192 int

      Size of the buffer.

    • p

      Type \u2192 float

      Probability of updating the model with a sample from the buffer instead of a new incoming sample.

    • loss

      Type \u2192 optim.losses.RegressionLoss | None

      Default \u2192 None

      Criterion used to evaluate the hardness of a sample.

    • seed

      Type \u2192 int | None

      Default \u2192 None

      Random seed.

    "},{"location":"api/imblearn/HardSamplingRegressor/#attributes","title":"Attributes","text":"
    • regressor
    "},{"location":"api/imblearn/HardSamplingRegressor/#examples","title":"Examples","text":"

    from river import datasets\nfrom river import evaluate\nfrom river import imblearn\nfrom river import linear_model\nfrom river import metrics\nfrom river import optim\nfrom river import preprocessing\n\nmodel = (\n    preprocessing.StandardScaler() |\n    imblearn.HardSamplingRegressor(\n        regressor=linear_model.LinearRegression(),\n        p=.2,\n        size=30,\n        seed=42,\n    )\n)\n\nevaluate.progressive_val_score(\n    datasets.TrumpApproval(),\n    model,\n    metrics.MAE(),\n    print_every=500\n)\n
    [500] MAE: 2.274021\n[1,000] MAE: 1.392399\n[1,001] MAE: 1.391246\nMAE: 1.391246\n

    "},{"location":"api/imblearn/HardSamplingRegressor/#methods","title":"Methods","text":"learn_one predict_one"},{"location":"api/imblearn/RandomOverSampler/","title":"RandomOverSampler","text":"

    Random over-sampling.

    This is a wrapper for classifiers. It will train the provided classifier by over-sampling the stream of given observations so that the class distribution seen by the classifier follows a given desired distribution. The implementation is a discrete version of reverse rejection sampling.

    See Working with imbalanced data for example usage.

    "},{"location":"api/imblearn/RandomOverSampler/#parameters","title":"Parameters","text":"
    • classifier

      Type \u2192 base.Classifier

    • desired_dist

      Type \u2192 dict

      The desired class distribution. The keys are the classes whilst the values are the desired class percentages. The values must sum up to 1.

    • seed

      Type \u2192 int | None

      Default \u2192 None

      Random seed for reproducibility.

    "},{"location":"api/imblearn/RandomOverSampler/#examples","title":"Examples","text":"

    from river import datasets\nfrom river import evaluate\nfrom river import imblearn\nfrom river import linear_model\nfrom river import metrics\nfrom river import preprocessing\n\nmodel = imblearn.RandomOverSampler(\n    (\n        preprocessing.StandardScaler() |\n        linear_model.LogisticRegression()\n    ),\n    desired_dist={False: 0.4, True: 0.6},\n    seed=42\n)\n\ndataset = datasets.CreditCard().take(3000)\n\nmetric = metrics.LogLoss()\n\nevaluate.progressive_val_score(dataset, model, metric)\n
    LogLoss: 0.0457...\n

    "},{"location":"api/imblearn/RandomOverSampler/#methods","title":"Methods","text":"learn_one

    Update the model with a set of features x and a label y.

    Parameters

    • x \u2014 'dict'
    • y \u2014 'base.typing.ClfTarget'
    • kwargs

    Returns

    Classifier: self

    predict_one

    Predict the label of a set of features x.

    Parameters

    • x
    • kwargs

    Returns

    The predicted label.

    predict_proba_one

    Predict the probability of each label for a dictionary of features x.

    Parameters

    • x
    • kwargs

    Returns

    A dictionary that associates a probability which each label.

    "},{"location":"api/imblearn/RandomSampler/","title":"RandomSampler","text":"

    Random sampling by mixing under-sampling and over-sampling.

    This is a wrapper for classifiers. It will train the provided classifier by both under-sampling and over-sampling the stream of given observations so that the class distribution seen by the classifier follows a given desired distribution.

    See Working with imbalanced data for example usage.

    "},{"location":"api/imblearn/RandomSampler/#parameters","title":"Parameters","text":"
    • classifier

      Type \u2192 base.Classifier

    • desired_dist

      Type \u2192 dict

      The desired class distribution. The keys are the classes whilst the values are the desired class percentages. The values must sum up to 1. If set to None, then the observations will be sampled uniformly at random, which is stricly equivalent to using ensemble.BaggingClassifier.

    • sampling_rate

      Default \u2192 1.0

      The desired ratio of data to sample.

    • seed

      Type \u2192 int | None

      Default \u2192 None

      Random seed for reproducibility.

    "},{"location":"api/imblearn/RandomSampler/#examples","title":"Examples","text":"

    from river import datasets\nfrom river import evaluate\nfrom river import imblearn\nfrom river import linear_model\nfrom river import metrics\nfrom river import preprocessing\n\nmodel = imblearn.RandomSampler(\n    (\n        preprocessing.StandardScaler() |\n        linear_model.LogisticRegression()\n    ),\n    desired_dist={False: 0.4, True: 0.6},\n    sampling_rate=0.8,\n    seed=42\n)\n\ndataset = datasets.CreditCard().take(3000)\n\nmetric = metrics.LogLoss()\n\nevaluate.progressive_val_score(dataset, model, metric)\n
    LogLoss: 0.09...\n

    "},{"location":"api/imblearn/RandomSampler/#methods","title":"Methods","text":"learn_one

    Update the model with a set of features x and a label y.

    Parameters

    • x \u2014 'dict'
    • y \u2014 'base.typing.ClfTarget'
    • kwargs

    Returns

    Classifier: self

    predict_one

    Predict the label of a set of features x.

    Parameters

    • x
    • kwargs

    Returns

    The predicted label.

    predict_proba_one

    Predict the probability of each label for a dictionary of features x.

    Parameters

    • x
    • kwargs

    Returns

    A dictionary that associates a probability which each label.

    "},{"location":"api/imblearn/RandomUnderSampler/","title":"RandomUnderSampler","text":"

    Random under-sampling.

    This is a wrapper for classifiers. It will train the provided classifier by under-sampling the stream of given observations so that the class distribution seen by the classifier follows a given desired distribution. The implementation is a discrete version of rejection sampling.

    See Working with imbalanced data for example usage.

    "},{"location":"api/imblearn/RandomUnderSampler/#parameters","title":"Parameters","text":"
    • classifier

      Type \u2192 base.Classifier

    • desired_dist

      Type \u2192 dict

      The desired class distribution. The keys are the classes whilst the values are the desired class percentages. The values must sum up to 1.

    • seed

      Type \u2192 int | None

      Default \u2192 None

      Random seed for reproducibility.

    "},{"location":"api/imblearn/RandomUnderSampler/#examples","title":"Examples","text":"

    from river import datasets\nfrom river import evaluate\nfrom river import imblearn\nfrom river import linear_model\nfrom river import metrics\nfrom river import preprocessing\n\nmodel = imblearn.RandomUnderSampler(\n    (\n        preprocessing.StandardScaler() |\n        linear_model.LogisticRegression()\n    ),\n    desired_dist={False: 0.4, True: 0.6},\n    seed=42\n)\n\ndataset = datasets.CreditCard().take(3000)\n\nmetric = metrics.LogLoss()\n\nevaluate.progressive_val_score(dataset, model, metric)\n
    LogLoss: 0.0336...\n

    "},{"location":"api/imblearn/RandomUnderSampler/#methods","title":"Methods","text":"learn_one

    Update the model with a set of features x and a label y.

    Parameters

    • x \u2014 'dict'
    • y \u2014 'base.typing.ClfTarget'
    • kwargs

    Returns

    Classifier: self

    predict_one

    Predict the label of a set of features x.

    Parameters

    • x
    • kwargs

    Returns

    The predicted label.

    predict_proba_one

    Predict the probability of each label for a dictionary of features x.

    Parameters

    • x
    • kwargs

    Returns

    A dictionary that associates a probability which each label.

    1. Under-sampling a dataset with desired ratios \u21a9

    2. Wikipedia article on rejection sampling \u21a9

    "},{"location":"api/linear-model/ALMAClassifier/","title":"ALMAClassifier","text":"

    Approximate Large Margin Algorithm (ALMA).

    "},{"location":"api/linear-model/ALMAClassifier/#parameters","title":"Parameters","text":"
    • p

      Default \u2192 2

    • alpha

      Default \u2192 0.9

    • B

      Default \u2192 1.1111111111111112

    • C

      Default \u2192 1.4142135623730951

    "},{"location":"api/linear-model/ALMAClassifier/#attributes","title":"Attributes","text":"
    • w (collections.defaultdict)

      The current weights.

    • k (int)

      The number of instances seen during training.

    "},{"location":"api/linear-model/ALMAClassifier/#examples","title":"Examples","text":"

    from river import datasets\nfrom river import evaluate\nfrom river import linear_model\nfrom river import metrics\nfrom river import preprocessing\n\ndataset = datasets.Phishing()\n\nmodel = (\n    preprocessing.StandardScaler() |\n    linear_model.ALMAClassifier()\n)\n\nmetric = metrics.Accuracy()\n\nevaluate.progressive_val_score(dataset, model, metric)\n
    Accuracy: 82.56%\n

    "},{"location":"api/linear-model/ALMAClassifier/#methods","title":"Methods","text":"learn_one

    Update the model with a set of features x and a label y.

    Parameters

    • x \u2014 'dict'
    • y \u2014 'base.typing.ClfTarget'

    Returns

    Classifier: self

    predict_one

    Predict the label of a set of features x.

    Parameters

    • x \u2014 'dict'
    • kwargs

    Returns

    base.typing.ClfTarget | None: The predicted label.

    predict_proba_one

    Predict the probability of each label for a dictionary of features x.

    Parameters

    • x \u2014 'dict'

    Returns

    dict[base.typing.ClfTarget, float]: A dictionary that associates a probability which each label.

    1. Gentile, Claudio. \"A new approximate maximal margin classification algorithm.\" Journal of Machine Learning Research 2.Dec (2001): 213-242 \u21a9

    "},{"location":"api/linear-model/BayesianLinearRegression/","title":"BayesianLinearRegression","text":"

    Bayesian linear regression.

    An advantage of Bayesian linear regression over standard linear regression is that features do not have to scaled beforehand. Another attractive property is that this flavor of linear regression is somewhat insensitive to its hyperparameters. Finally, this model can output instead a predictive distribution rather than just a point estimate.

    The downside is that the learning step runs in O(n^2) time, whereas the learning step of standard linear regression takes O(n) time.

    "},{"location":"api/linear-model/BayesianLinearRegression/#parameters","title":"Parameters","text":"
    • alpha

      Default \u2192 1

      Prior parameter.

    • beta

      Default \u2192 1

      Noise parameter.

    • smoothing

      Type \u2192 float

      Default \u2192 None

      Smoothing allows the model to gradually \"forget\" the past, and focus on the more recent data. It thus enables the model to deal with concept drift. Due to the current implementation, activating smoothing may slow down the model.

    "},{"location":"api/linear-model/BayesianLinearRegression/#examples","title":"Examples","text":"

    from river import datasets\nfrom river import evaluate\nfrom river import linear_model\nfrom river import metrics\n\ndataset = datasets.TrumpApproval()\nmodel = linear_model.BayesianLinearRegression()\nmetric = metrics.MAE()\n\nevaluate.progressive_val_score(dataset, model, metric)\n
    MAE: 0.586...\n

    x, _ = next(iter(dataset))\nmodel.predict_one(x)\n
    43.852...\n

    model.predict_one(x, with_dist=True)\n
    \ud835\udca9(\u03bc=43.85..., \u03c3=1.00...)\n

    The smoothing parameter can be set to make the model robust to drift. The parameter is expected to be between 0 and 1. To exemplify, let's generate some simulation data with an abrupt concept drift right in the middle.

    import itertools\nimport random\n\ndef random_data(coefs, n, seed=42):\n    rng = random.Random(seed)\n    for _ in range(n):\n        x = {i: rng.random() for i, c in enumerate(coefs)}\n        y = sum(c * xi for c, xi in zip(coefs, x.values()))\n        yield x, y\n

    Here's how the model performs without any smoothing:

    model = linear_model.BayesianLinearRegression()\ndataset = itertools.chain(\n    random_data([0.1, 3], 100),\n    random_data([10, -2], 100)\n)\nmetric = metrics.MAE()\nevaluate.progressive_val_score(dataset, model, metric)\n
    MAE: 1.284...\n

    And here's how it performs with some smoothing:

    model = linear_model.BayesianLinearRegression(smoothing=0.8)\ndataset = itertools.chain(\n    random_data([0.1, 3], 100),\n    random_data([10, -2], 100)\n)\nmetric = metrics.MAE()\nevaluate.progressive_val_score(dataset, model, metric)\n
    MAE: 0.159...\n

    Smoothing allows the model to gradually \"forget\" the past, and focus on the more recent data.

    Note how this works better than standard linear regression, even when using an aggressive learning rate.

    from river import optim\nmodel = linear_model.LinearRegression(optimizer=optim.SGD(0.5))\ndataset = itertools.chain(\n    random_data([0.1, 3], 100),\n    random_data([10, -2], 100)\n)\nmetric = metrics.MAE()\nevaluate.progressive_val_score(dataset, model, metric)\n
    MAE: 0.242...\n

    "},{"location":"api/linear-model/BayesianLinearRegression/#methods","title":"Methods","text":"learn_one

    Fits to a set of features x and a real-valued target y.

    Parameters

    • x \u2014 'dict'
    • y \u2014 'base.typing.RegTarget'

    Returns

    Regressor: self

    predict_many predict_one

    Predict the output of features x.

    Parameters

    • x \u2014 'dict'
    • with_dist \u2014 defaults to False

    Returns

    base.typing.RegTarget: The prediction.

    1. Pattern Recognition and Machine Learning, page 52 \u2014 Christopher M. Bishop \u21a9

    2. Bayesian/Streaming Algorithms \u2014 Vincent Warmerdam \u21a9

    3. Bayesian linear regression for practitioners \u2014 Max Halford \u21a9

    "},{"location":"api/linear-model/LinearRegression/","title":"LinearRegression","text":"

    Linear regression.

    This estimator supports learning with mini-batches. On top of the single instance methods, it provides the following methods: learn_many, predict_many, predict_proba_many. Each method takes as input a pandas.DataFrame where each column represents a feature.

    It is generally a good idea to scale the data beforehand in order for the optimizer to converge. You can do this online with a preprocessing.StandardScaler.

    "},{"location":"api/linear-model/LinearRegression/#parameters","title":"Parameters","text":"
    • optimizer

      Type \u2192 optim.base.Optimizer | None

      Default \u2192 None

      The sequential optimizer used for updating the weights. Note that the intercept updates are handled separately.

    • loss

      Type \u2192 optim.losses.RegressionLoss | None

      Default \u2192 None

      The loss function to optimize for.

    • l2

      Default \u2192 0.0

      Amount of L2 regularization used to push weights towards 0. For now, only one type of penalty can be used. The joint use of L1 and L2 is not explicitly supported.

    • l1

      Default \u2192 0.0

      Amount of L1 regularization used to push weights towards 0. For now, only one type of penalty can be used. The joint use of L1 and L2 is not explicitly supported.

    • intercept_init

      Default \u2192 0.0

      Initial intercept value.

    • intercept_lr

      Type \u2192 optim.base.Scheduler | float

      Default \u2192 0.01

      Learning rate scheduler used for updating the intercept. A optim.schedulers.Constant is used if a float is provided. The intercept is not updated when this is set to 0.

    • clip_gradient

      Default \u2192 1000000000000.0

      Clips the absolute value of each gradient value.

    • initializer

      Type \u2192 optim.base.Initializer | None

      Default \u2192 None

      Weights initialization scheme.

    "},{"location":"api/linear-model/LinearRegression/#attributes","title":"Attributes","text":"
    • weights (dict)

      The current weights.

    "},{"location":"api/linear-model/LinearRegression/#examples","title":"Examples","text":"

    from river import datasets\nfrom river import evaluate\nfrom river import linear_model\nfrom river import metrics\nfrom river import preprocessing\n\ndataset = datasets.TrumpApproval()\n\nmodel = (\n    preprocessing.StandardScaler() |\n    linear_model.LinearRegression(intercept_lr=.1)\n)\nmetric = metrics.MAE()\n\nevaluate.progressive_val_score(dataset, model, metric)\n
    MAE: 0.558735\n

    model['LinearRegression'].intercept\n
    35.617670\n

    You can call the debug_one method to break down a prediction. This works even if the linear regression is part of a pipeline.

    x, y = next(iter(dataset))\nreport = model.debug_one(x)\nprint(report)\n
    0. Input\n--------\ngallup: 43.84321 (float)\nipsos: 46.19925 (float)\nmorning_consult: 48.31875 (float)\nordinal_date: 736389 (int)\nrasmussen: 44.10469 (float)\nyou_gov: 43.63691 (float)\n<BLANKLINE>\n1. StandardScaler\n-----------------\ngallup: 1.18810 (float)\nipsos: 2.10348 (float)\nmorning_consult: 2.73545 (float)\nordinal_date: -1.73032 (float)\nrasmussen: 1.26872 (float)\nyou_gov: 1.48391 (float)\n<BLANKLINE>\n2. LinearRegression\n-------------------\nName              Value      Weight      Contribution\n      Intercept    1.00000    35.61767       35.61767\n          ipsos    2.10348     0.62689        1.31866\nmorning_consult    2.73545     0.24180        0.66144\n         gallup    1.18810     0.43568        0.51764\n      rasmussen    1.26872     0.28118        0.35674\n        you_gov    1.48391     0.03123        0.04634\n   ordinal_date   -1.73032     3.45162       -5.97242\n<BLANKLINE>\nPrediction: 32.54607\n

    "},{"location":"api/linear-model/LinearRegression/#methods","title":"Methods","text":"debug_one

    Debugs the output of the linear regression.

    Parameters

    • x \u2014 'dict'
    • decimals \u2014 'int' \u2014 defaults to 5

    Returns

    str: A table which explains the output.

    learn_many

    Update the model with a mini-batch of features X and real-valued targets y.

    Parameters

    • X \u2014 'pd.DataFrame'
    • y \u2014 'pd.Series'
    • w \u2014 'float | pd.Series' \u2014 defaults to 1

    Returns

    MiniBatchRegressor: self

    learn_one

    Fits to a set of features x and a real-valued target y.

    Parameters

    • x \u2014 'dict'
    • y \u2014 'base.typing.RegTarget'
    • w \u2014 defaults to 1.0

    Returns

    Regressor: self

    predict_many

    Predict the outcome for each given sample.

    Parameters

    • X

    Returns

    The predicted outcomes.

    predict_one

    Predict the output of features x.

    Parameters

    • x

    Returns

    The prediction.

    "},{"location":"api/linear-model/LogisticRegression/","title":"LogisticRegression","text":"

    Logistic regression.

    This estimator supports learning with mini-batches. On top of the single instance methods, it provides the following methods: learn_many, predict_many, predict_proba_many. Each method takes as input a pandas.DataFrame where each column represents a feature.

    It is generally a good idea to scale the data beforehand in order for the optimizer to converge. You can do this online with a preprocessing.StandardScaler.

    "},{"location":"api/linear-model/LogisticRegression/#parameters","title":"Parameters","text":"
    • optimizer

      Type \u2192 optim.base.Optimizer | None

      Default \u2192 None

      The sequential optimizer used for updating the weights. Note that the intercept is handled separately.

    • loss

      Type \u2192 optim.losses.BinaryLoss | None

      Default \u2192 None

      The loss function to optimize for. Defaults to optim.losses.Log.

    • l2

      Default \u2192 0.0

      Amount of L2 regularization used to push weights towards 0. For now, only one type of penalty can be used. The joint use of L1 and L2 is not explicitly supported.

    • l1

      Default \u2192 0.0

      Amount of L1 regularization used to push weights towards 0. For now, only one type of penalty can be used. The joint use of L1 and L2 is not explicitly supported.

    • intercept_init

      Default \u2192 0.0

      Initial intercept value.

    • intercept_lr

      Type \u2192 float | optim.base.Scheduler

      Default \u2192 0.01

      Learning rate scheduler used for updating the intercept. A optim.schedulers.Constant is used if a float is provided. The intercept is not updated when this is set to 0.

    • clip_gradient

      Default \u2192 1000000000000.0

      Clips the absolute value of each gradient value.

    • initializer

      Type \u2192 optim.base.Initializer | None

      Default \u2192 None

      Weights initialization scheme.

    "},{"location":"api/linear-model/LogisticRegression/#attributes","title":"Attributes","text":"
    • weights

      The current weights.

    "},{"location":"api/linear-model/LogisticRegression/#examples","title":"Examples","text":"

    from river import datasets\nfrom river import evaluate\nfrom river import linear_model\nfrom river import metrics\nfrom river import optim\nfrom river import preprocessing\n\ndataset = datasets.Phishing()\n\nmodel = (\n    preprocessing.StandardScaler() |\n    linear_model.LogisticRegression(optimizer=optim.SGD(.1))\n)\n\nmetric = metrics.Accuracy()\n\nevaluate.progressive_val_score(dataset, model, metric)\n
    Accuracy: 88.96%\n

    "},{"location":"api/linear-model/LogisticRegression/#methods","title":"Methods","text":"learn_many

    Update the model with a mini-batch of features X and boolean targets y.

    Parameters

    • X \u2014 'pd.DataFrame'
    • y \u2014 'pd.Series'
    • w \u2014 'float | pd.Series' \u2014 defaults to 1

    Returns

    MiniBatchClassifier: self

    learn_one

    Update the model with a set of features x and a label y.

    Parameters

    • x \u2014 'dict'
    • y \u2014 'base.typing.ClfTarget'
    • w \u2014 defaults to 1.0

    Returns

    Classifier: self

    predict_many

    Predict the outcome for each given sample.

    Parameters

    • X \u2014 'pd.DataFrame'

    Returns

    pd.Series: The predicted labels.

    predict_one

    Predict the label of a set of features x.

    Parameters

    • x \u2014 'dict'
    • kwargs

    Returns

    base.typing.ClfTarget | None: The predicted label.

    predict_proba_many

    Predict the outcome probabilities for each given sample.

    Parameters

    • X \u2014 'pd.DataFrame'

    Returns

    pd.DataFrame: A dataframe with probabilities of True and False for each sample.

    predict_proba_one

    Predict the probability of each label for a dictionary of features x.

    Parameters

    • x

    Returns

    A dictionary that associates a probability which each label.

    "},{"location":"api/linear-model/PAClassifier/","title":"PAClassifier","text":"

    Passive-aggressive learning for classification.

    "},{"location":"api/linear-model/PAClassifier/#parameters","title":"Parameters","text":"
    • C

      Default \u2192 1.0

    • mode

      Default \u2192 1

    • learn_intercept

      Default \u2192 True

    "},{"location":"api/linear-model/PAClassifier/#examples","title":"Examples","text":"

    The following example is taken from this blog post.

    from river import linear_model\nfrom river import metrics\nfrom river import stream\nimport numpy as np\nfrom sklearn import datasets\nfrom sklearn import model_selection\n\nnp.random.seed(1000)\nX, y = datasets.make_classification(\n    n_samples=5000,\n    n_features=4,\n    n_informative=2,\n    n_redundant=0,\n    n_repeated=0,\n    n_classes=2,\n    n_clusters_per_class=2\n)\n\nX_train, X_test, y_train, y_test = model_selection.train_test_split(\n    X,\n    y,\n    test_size=0.35,\n    random_state=1000\n)\n\nmodel = linear_model.PAClassifier(\n    C=0.01,\n    mode=1\n)\n\nfor xi, yi in stream.iter_array(X_train, y_train):\n    y_pred = model.learn_one(xi, yi)\n\nmetric = metrics.Accuracy() + metrics.LogLoss()\n\nfor xi, yi in stream.iter_array(X_test, y_test):\n    metric = metric.update(yi, model.predict_proba_one(xi))\n\nprint(metric)\n
    Accuracy: 88.46%, LogLoss: 0.325727\n

    "},{"location":"api/linear-model/PAClassifier/#methods","title":"Methods","text":"learn_one

    Update the model with a set of features x and a label y.

    Parameters

    • x
    • y

    Returns

    self

    predict_one

    Predict the label of a set of features x.

    Parameters

    • x \u2014 'dict'
    • kwargs

    Returns

    base.typing.ClfTarget | None: The predicted label.

    predict_proba_one

    Predict the probability of each label for a dictionary of features x.

    Parameters

    • x

    Returns

    A dictionary that associates a probability which each label.

    1. Crammer, K., Dekel, O., Keshet, J., Shalev-Shwartz, S. and Singer, Y., 2006. Online passive-aggressive algorithms. Journal of Machine Learning Research, 7(Mar), pp.551-585 \u21a9

    "},{"location":"api/linear-model/PARegressor/","title":"PARegressor","text":"

    Passive-aggressive learning for regression.

    "},{"location":"api/linear-model/PARegressor/#parameters","title":"Parameters","text":"
    • C

      Default \u2192 1.0

    • mode

      Default \u2192 1

    • eps

      Default \u2192 0.1

    • learn_intercept

      Default \u2192 True

    "},{"location":"api/linear-model/PARegressor/#examples","title":"Examples","text":"

    The following example is taken from this blog post.

    from river import linear_model\nfrom river import metrics\nfrom river import stream\nimport numpy as np\nfrom sklearn import datasets\n\nnp.random.seed(1000)\nX, y = datasets.make_regression(n_samples=500, n_features=4)\n\nmodel = linear_model.PARegressor(\n    C=0.01,\n    mode=2,\n    eps=0.1,\n    learn_intercept=False\n)\nmetric = metrics.MAE() + metrics.MSE()\n\nfor xi, yi in stream.iter_array(X, y):\n    y_pred = model.predict_one(xi)\n    model = model.learn_one(xi, yi)\n    metric = metric.update(yi, y_pred)\n\nprint(metric)\n
    MAE: 9.809402, MSE: 472.393532\n

    "},{"location":"api/linear-model/PARegressor/#methods","title":"Methods","text":"learn_one

    Fits to a set of features x and a real-valued target y.

    Parameters

    • x
    • y

    Returns

    self

    predict_one

    Predict the output of features x.

    Parameters

    • x

    Returns

    The prediction.

    1. Crammer, K., Dekel, O., Keshet, J., Shalev-Shwartz, S. and Singer, Y., 2006. Online passive-aggressive algorithms. Journal of Machine Learning Research, 7(Mar), pp.551-585. \u21a9

    "},{"location":"api/linear-model/Perceptron/","title":"Perceptron","text":"

    Perceptron classifier.

    In this implementation, the Perceptron is viewed as a special case of the logistic regression. The loss function that is used is the Hinge loss with a threshold set to 0, whilst the learning rate of the stochastic gradient descent procedure is set to 1 for both the weights and the intercept.

    "},{"location":"api/linear-model/Perceptron/#parameters","title":"Parameters","text":"
    • l2

      Default \u2192 0.0

      Amount of L2 regularization used to push weights towards 0.

    • clip_gradient

      Default \u2192 1000000000000.0

      Clips the absolute value of each gradient value.

    • initializer

      Type \u2192 optim.initializers.Initializer | None

      Default \u2192 None

      Weights initialization scheme.

    "},{"location":"api/linear-model/Perceptron/#attributes","title":"Attributes","text":"
    • weights

      The current weights.

    "},{"location":"api/linear-model/Perceptron/#examples","title":"Examples","text":"

    from river import datasets\nfrom river import evaluate\nfrom river import linear_model as lm\nfrom river import metrics\nfrom river import preprocessing as pp\n\ndataset = datasets.Phishing()\n\nmodel = pp.StandardScaler() | lm.Perceptron()\n\nmetric = metrics.Accuracy()\n\nevaluate.progressive_val_score(dataset, model, metric)\n
    Accuracy: 85.84%\n

    "},{"location":"api/linear-model/Perceptron/#methods","title":"Methods","text":"learn_many

    Update the model with a mini-batch of features X and boolean targets y.

    Parameters

    • X \u2014 'pd.DataFrame'
    • y \u2014 'pd.Series'
    • w \u2014 'float | pd.Series' \u2014 defaults to 1

    Returns

    MiniBatchClassifier: self

    learn_one

    Update the model with a set of features x and a label y.

    Parameters

    • x \u2014 'dict'
    • y \u2014 'base.typing.ClfTarget'
    • w \u2014 defaults to 1.0

    Returns

    Classifier: self

    predict_many

    Predict the outcome for each given sample.

    Parameters

    • X \u2014 'pd.DataFrame'

    Returns

    pd.Series: The predicted labels.

    predict_one

    Predict the label of a set of features x.

    Parameters

    • x \u2014 'dict'
    • kwargs

    Returns

    base.typing.ClfTarget | None: The predicted label.

    predict_proba_many

    Predict the outcome probabilities for each given sample.

    Parameters

    • X \u2014 'pd.DataFrame'

    Returns

    pd.DataFrame: A dataframe with probabilities of True and False for each sample.

    predict_proba_one

    Predict the probability of each label for a dictionary of features x.

    Parameters

    • x

    Returns

    A dictionary that associates a probability which each label.

    "},{"location":"api/linear-model/SoftmaxRegression/","title":"SoftmaxRegression","text":"

    Softmax regression is a generalization of logistic regression to multiple classes.

    Softmax regression is also known as \"multinomial logistic regression\". There are a set weights for each class, hence the weights attribute is a nested collections.defaultdict. The main advantage of using this instead of a one-vs-all logistic regression is that the probabilities will be calibrated. Moreover softmax regression is more robust to outliers.

    "},{"location":"api/linear-model/SoftmaxRegression/#parameters","title":"Parameters","text":"
    • optimizer

      Type \u2192 optim.base.Optimizer | None

      Default \u2192 None

      The sequential optimizer used to tune the weights.

    • loss

      Type \u2192 optim.losses.MultiClassLoss | None

      Default \u2192 None

      The loss function to optimize for.

    • l2

      Default \u2192 0

      Amount of L2 regularization used to push weights towards 0.

    "},{"location":"api/linear-model/SoftmaxRegression/#attributes","title":"Attributes","text":"
    • weights (collections.defaultdict)
    "},{"location":"api/linear-model/SoftmaxRegression/#examples","title":"Examples","text":"

    from river import datasets\nfrom river import evaluate\nfrom river import linear_model\nfrom river import metrics\nfrom river import optim\nfrom river import preprocessing\n\ndataset = datasets.ImageSegments()\n\nmodel = preprocessing.StandardScaler()\nmodel |= linear_model.SoftmaxRegression()\n\nmetric = metrics.MacroF1()\n\nevaluate.progressive_val_score(dataset, model, metric)\n
    MacroF1: 81.88%\n

    "},{"location":"api/linear-model/SoftmaxRegression/#methods","title":"Methods","text":"learn_one

    Update the model with a set of features x and a label y.

    Parameters

    • x \u2014 'dict'
    • y \u2014 'base.typing.ClfTarget'

    Returns

    Classifier: self

    predict_one

    Predict the label of a set of features x.

    Parameters

    • x \u2014 'dict'
    • kwargs

    Returns

    base.typing.ClfTarget | None: The predicted label.

    predict_proba_one

    Predict the probability of each label for a dictionary of features x.

    Parameters

    • x \u2014 'dict'

    Returns

    dict[base.typing.ClfTarget, float]: A dictionary that associates a probability which each label.

    1. Course on classification stochastic gradient descent \u21a9

    2. Binary vs. Multi-Class Logistic Regression \u21a9

    "},{"location":"api/linear-model/base/GLM/","title":"GLM","text":"

    Generalized Linear Model.

    This serves as a base class for linear and logistic regression.

    "},{"location":"api/linear-model/base/GLM/#parameters","title":"Parameters","text":"
    • optimizer

      The sequential optimizer used for updating the weights. Note that the intercept updates are handled separately.

    • loss

      The loss function to optimize for.

    • l2

      Amount of L2 regularization used to push weights towards 0. For now, only one type of penalty can be used. The joint use of L1 and L2 is not explicitly supported.

    • l1

      Amount of L1 regularization used to push weights towards 0. For now, only one type of penalty can be used. The joint use of L1 and L2 is not explicitly supported.

    • intercept_init

      Initial intercept value.

    • intercept_lr

      Learning rate scheduler used for updating the intercept. A optim.schedulers.Constant is used if a float is provided. The intercept is not updated when this is set to 0.

    • clip_gradient

      Clips the absolute value of each gradient value.

    • initializer

      Weights initialization scheme.

    "},{"location":"api/linear-model/base/GLM/#attributes","title":"Attributes","text":"
    • weights
    "},{"location":"api/linear-model/base/GLM/#methods","title":"Methods","text":"learn_many learn_one"},{"location":"api/metrics/Accuracy/","title":"Accuracy","text":"

    Accuracy score, which is the percentage of exact matches.

    "},{"location":"api/metrics/Accuracy/#parameters","title":"Parameters","text":"
    • cm

      Type \u2192 confusion.ConfusionMatrix | None

      Default \u2192 None

      This parameter allows sharing the same confusion matrix between multiple metrics. Sharing a confusion matrix reduces the amount of storage and computation time.

    "},{"location":"api/metrics/Accuracy/#attributes","title":"Attributes","text":"
    • bigger_is_better

      Indicate if a high value is better than a low one or not.

    • requires_labels

      Indicates if labels are required, rather than probabilities.

    • works_with_weights

      Indicate whether the model takes into consideration the effect of sample weights

    "},{"location":"api/metrics/Accuracy/#examples","title":"Examples","text":"

    from river import metrics\n\ny_true = [True, False, True, True, True]\ny_pred = [True, True, False, True, True]\n\nmetric = metrics.Accuracy()\nfor yt, yp in zip(y_true, y_pred):\n    metric = metric.update(yt, yp)\n\nmetric\n
    Accuracy: 60.00%\n

    "},{"location":"api/metrics/Accuracy/#methods","title":"Methods","text":"get

    Return the current value of the metric.

    is_better_than

    Indicate if the current metric is better than another one.

    Parameters

    • other

    revert

    Revert the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    update

    Update the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    works_with

    Indicates whether or not a metric can work with a given model.

    Parameters

    • model \u2014 'base.Estimator'

    "},{"location":"api/metrics/AdjustedMutualInfo/","title":"AdjustedMutualInfo","text":"

    Adjusted Mutual Information between two clusterings.

    Adjusted Mutual Information (AMI) is an adjustment of the Mutual Information score that accounts for chance. It corrects the effect of agreement solely due to chance between clusterings, similar to the way the Adjusted Rand Index corrects the Rand Index. It is closely related to variation of information. The adjusted measure, however, is no longer metrical.

    For two clusterings \\(U\\) and \\(V\\), the Adjusted Mutual Information is calculated as:

    \\[ AMI(U, V) = \\frac{MI(U, V) - E(MI(U, V))}{avg(H(U), H(V)) - E(MI(U, V))} \\]

    This metric is independent of the permutation of the class or cluster label values; furthermore, it is also symmetric. This can be useful to measure the agreement of two label assignments strategies on the same dataset, regardless of the ground truth.

    However, due to the complexity of the Expected Mutual Info Score, the computation of this metric is an order of magnitude slower than most other metrics, in general.

    "},{"location":"api/metrics/AdjustedMutualInfo/#parameters","title":"Parameters","text":"
    • cm

      Default \u2192 None

      This parameter allows sharing the same confusion matrix between multiple metrics. Sharing a confusion matrix reduces the amount of storage and computation time.

    • average_method

      Default \u2192 arithmetic

      This parameter defines how to compute the normalizer in the denominator. Possible options include min, max, arithmetic and geometric.

    "},{"location":"api/metrics/AdjustedMutualInfo/#attributes","title":"Attributes","text":"
    • bigger_is_better

      Indicate if a high value is better than a low one or not.

    • requires_labels

      Indicates if labels are required, rather than probabilities.

    • works_with_weights

      Indicate whether the model takes into consideration the effect of sample weights

    "},{"location":"api/metrics/AdjustedMutualInfo/#examples","title":"Examples","text":"

    from river import metrics\n\ny_true = [1, 1, 2, 2, 3, 3]\ny_pred = [1, 1, 1, 2, 2, 2]\n\nmetric = metrics.AdjustedMutualInfo()\nfor yt, yp in zip(y_true, y_pred):\n    print(metric.update(yt, yp).get())\n
    1.0\n1.0\n0.0\n0.0\n0.105891\n0.298792\n

    metric\n
    AdjustedMutualInfo: 0.298792\n

    "},{"location":"api/metrics/AdjustedMutualInfo/#methods","title":"Methods","text":"get

    Return the current value of the metric.

    is_better_than

    Indicate if the current metric is better than another one.

    Parameters

    • other

    revert

    Revert the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    update

    Update the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    works_with

    Indicates whether or not a metric can work with a given model.

    Parameters

    • model \u2014 'base.Estimator'

    1. Wikipedia contributors. (2021, March 17). Mutual information. In Wikipedia, The Free Encyclopedia, from https://en.wikipedia.org/w/index.php?title=Mutual_information&oldid=1012714929\u00a0\u21a9

    "},{"location":"api/metrics/AdjustedRand/","title":"AdjustedRand","text":"

    Adjusted Rand Index.

    The Adjusted Rand Index is the corrected-for-chance version of the Rand Index 1 2. Such a correction for chance establishes a baseline by using the expected similarity of all pair-wise comparisions between clusterings specified by a random model.

    Traditionally, the Rand Index was corrected using the Permutation Model for Clustering. However, the premises of the permutation model are frequently violated; in many clustering scenarios, either the number of clusters or the size distribution of those clusters vary drastically. Variations of the adjusted Rand Index account for different models of random clusterings.

    Though the Rand Index may only yield a value between 0 and 1, the Adjusted Rand index can yield negative values if the index is less than the expected index.

    "},{"location":"api/metrics/AdjustedRand/#parameters","title":"Parameters","text":"
    • cm

      Type \u2192 confusion.ConfusionMatrix | None

      Default \u2192 None

      This parameter allows sharing the same confusion matrix between multiple metrics. Sharing a confusion matrix reduces the amount of storage and computation time.

    "},{"location":"api/metrics/AdjustedRand/#attributes","title":"Attributes","text":"
    • bigger_is_better

      Indicate if a high value is better than a low one or not.

    • requires_labels

      Indicates if labels are required, rather than probabilities.

    • works_with_weights

      Indicate whether the model takes into consideration the effect of sample weights

    "},{"location":"api/metrics/AdjustedRand/#examples","title":"Examples","text":"

    from river import metrics\n\ny_true = [0, 0, 0, 1, 1, 1]\ny_pred = [0, 0, 1, 1, 2, 2]\n\nmetric = metrics.AdjustedRand()\n\nfor yt, yp in zip(y_true, y_pred):\n    print(metric.update(yt, yp).get())\n
    1.0\n1.0\n0.0\n0.0\n0.09090909090909091\n0.24242424242424243\n

    metric\n
    AdjustedRand: 0.242424\n

    "},{"location":"api/metrics/AdjustedRand/#methods","title":"Methods","text":"get

    Return the current value of the metric.

    is_better_than

    Indicate if the current metric is better than another one.

    Parameters

    • other

    revert

    Revert the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    update

    Update the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    works_with

    Indicates whether or not a metric can work with a given model.

    Parameters

    • model \u2014 'base.Estimator'

    1. Wikipedia contributors. (2021, January 13). Rand index. In Wikipedia, The Free Encyclopedia, from https://en.wikipedia.org/w/index.php?title=Rand_index&oldid=1000098911\u00a0\u21a9

    2. W. M. Rand (1971). \"Objective criteria for the evaluation of clustering methods\". Journal of the American Statistical Association. American Statistical Association. 66 (336): 846\u2013850. arXiv:1704.01036. doi:10.2307/2284239. JSTOR 2284239.\u00a0\u21a9

    "},{"location":"api/metrics/BalancedAccuracy/","title":"BalancedAccuracy","text":"

    Balanced accuracy.

    Balanced accuracy is the average of recall obtained on each class. It is used to deal with imbalanced datasets in binary and multi-class classification problems.

    "},{"location":"api/metrics/BalancedAccuracy/#parameters","title":"Parameters","text":"
    • cm

      Type \u2192 confusion.ConfusionMatrix | None

      Default \u2192 None

      This parameter allows sharing the same confusion matrix between multiple metrics. Sharing a confusion matrix reduces the amount of storage and computation time.

    "},{"location":"api/metrics/BalancedAccuracy/#attributes","title":"Attributes","text":"
    • bigger_is_better

      Indicate if a high value is better than a low one or not.

    • requires_labels

      Indicates if labels are required, rather than probabilities.

    • works_with_weights

      Indicate whether the model takes into consideration the effect of sample weights

    "},{"location":"api/metrics/BalancedAccuracy/#examples","title":"Examples","text":"

    from river import metrics\ny_true = [True, False, True, True, False, True]\ny_pred = [True, False, True, True, True, False]\n\nmetric = metrics.BalancedAccuracy()\nfor yt, yp in zip(y_true, y_pred):\n    metric = metric.update(yt, yp)\n\nmetric\n
    BalancedAccuracy: 62.50%\n

    y_true = [0, 1, 0, 0, 1, 0]\ny_pred = [0, 1, 0, 0, 0, 1]\nmetric = metrics.BalancedAccuracy()\nfor yt, yp in zip(y_true, y_pred):\n    metric = metric.update(yt, yp)\n\nmetric\n
    BalancedAccuracy: 62.50%\n

    "},{"location":"api/metrics/BalancedAccuracy/#methods","title":"Methods","text":"get

    Return the current value of the metric.

    is_better_than

    Indicate if the current metric is better than another one.

    Parameters

    • other

    revert

    Revert the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    update

    Update the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    works_with

    Indicates whether or not a metric can work with a given model.

    Parameters

    • model \u2014 'base.Estimator'

    "},{"location":"api/metrics/ClassificationReport/","title":"ClassificationReport","text":"

    A report for monitoring a classifier.

    This class maintains a set of metrics and updates each of them every time update is called. You can print this class at any time during a model's lifetime to get a tabular visualization of various metrics.

    You can wrap a metrics.ClassificationReport with utils.Rolling in order to obtain a classification report over a window of observations. You can also wrap it with utils.TimeRolling to obtain a report over a period of time.

    "},{"location":"api/metrics/ClassificationReport/#parameters","title":"Parameters","text":"
    • decimals

      Default \u2192 2

      The number of decimals to display in each cell.

    • cm

      Default \u2192 None

      This parameter allows sharing the same confusion matrix between multiple metrics. Sharing a confusion matrix reduces the amount of storage and computation time.

    "},{"location":"api/metrics/ClassificationReport/#attributes","title":"Attributes","text":"
    • bigger_is_better

      Indicate if a high value is better than a low one or not.

    • requires_labels

      Indicates if labels are required, rather than probabilities.

    • works_with_weights

      Indicate whether the model takes into consideration the effect of sample weights

    "},{"location":"api/metrics/ClassificationReport/#examples","title":"Examples","text":"

    from river import metrics\n\ny_true = ['pear', 'apple', 'banana', 'banana', 'banana']\ny_pred = ['apple', 'pear', 'banana', 'banana', 'apple']\n\nreport = metrics.ClassificationReport()\n\nfor yt, yp in zip(y_true, y_pred):\n    report = report.update(yt, yp)\n\nprint(report)\n
                   Precision   Recall   F1       Support\n<BLANKLINE>\n   apple       0.00%    0.00%    0.00%         1\n  banana     100.00%   66.67%   80.00%         3\n    pear       0.00%    0.00%    0.00%         1\n<BLANKLINE>\n   Macro      33.33%   22.22%   26.67%\n   Micro      40.00%   40.00%   40.00%\nWeighted      60.00%   40.00%   48.00%\n<BLANKLINE>\n                 40.00% accuracy\n

    "},{"location":"api/metrics/ClassificationReport/#methods","title":"Methods","text":"get

    Return the current value of the metric.

    is_better_than

    Indicate if the current metric is better than another one.

    Parameters

    • other

    revert

    Revert the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    update

    Update the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    works_with

    Indicates whether or not a metric can work with a given model.

    Parameters

    • model \u2014 'base.Estimator'

    "},{"location":"api/metrics/CohenKappa/","title":"CohenKappa","text":"

    Cohen's Kappa score.

    Cohen's Kappa expresses the level of agreement between two annotators on a classification problem. It is defined as

    \\[ \\kappa = (p_o - p_e) / (1 - p_e) \\]

    where \\(p_o\\) is the empirical probability of agreement on the label assigned to any sample (prequential accuracy), and \\(p_e\\) is the expected agreement when both annotators assign labels randomly.

    "},{"location":"api/metrics/CohenKappa/#parameters","title":"Parameters","text":"
    • cm

      Type \u2192 confusion.ConfusionMatrix | None

      Default \u2192 None

      This parameter allows sharing the same confusion matrix between multiple metrics. Sharing a confusion matrix reduces the amount of storage and computation time.

    "},{"location":"api/metrics/CohenKappa/#attributes","title":"Attributes","text":"
    • bigger_is_better

      Indicate if a high value is better than a low one or not.

    • requires_labels

      Indicates if labels are required, rather than probabilities.

    • works_with_weights

      Indicate whether the model takes into consideration the effect of sample weights

    "},{"location":"api/metrics/CohenKappa/#examples","title":"Examples","text":"

    from river import metrics\n\ny_true = ['cat', 'ant', 'cat', 'cat', 'ant', 'bird']\ny_pred = ['ant', 'ant', 'cat', 'cat', 'ant', 'cat']\n\nmetric = metrics.CohenKappa()\n\nfor yt, yp in zip(y_true, y_pred):\n    metric = metric.update(yt, yp)\n\nmetric\n
    CohenKappa: 42.86%\n

    "},{"location":"api/metrics/CohenKappa/#methods","title":"Methods","text":"get

    Return the current value of the metric.

    is_better_than

    Indicate if the current metric is better than another one.

    Parameters

    • other

    revert

    Revert the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    update

    Update the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    works_with

    Indicates whether or not a metric can work with a given model.

    Parameters

    • model \u2014 'base.Estimator'

    1. J. Cohen (1960). \"A coefficient of agreement for nominal scales\". Educational and Psychological Measurement 20(1):37-46. doi:10.1177/001316446002000104.\u00a0\u21a9

    "},{"location":"api/metrics/Completeness/","title":"Completeness","text":"

    Completeness Score.

    Completeness 1 is symmetrical to homogeneity. In order to satisfy the completeness criteria, a clustering must assign all of those datapoints that are members of a single class to a single cluster. To evaluate completeness, we examine the distribution cluster assignments within each class. In a perfectly complete clustering solution, each of these distributions will be completely skewed to a single cluster.

    We can evaluate this degree of skew by calculating the conditional entropy of the proposed cluster distribution given the class of the component data points. However, in the worst case scenario, each class is represented by every cluster with a distribution equal to the distribution of cluster sizes. Therefore, symmetric to the claculation above, we define completeness as:

    \\[ c = \\begin{cases} 1 if H(K) = 0, \\\\ 1 - \\frac{H(K|C)}{H(K)} otherwise. \\end{cases}. \\]"},{"location":"api/metrics/Completeness/#parameters","title":"Parameters","text":"
    • cm

      Type \u2192 confusion.ConfusionMatrix | None

      Default \u2192 None

      This parameter allows sharing the same confusion matrix between multiple metrics. Sharing a confusion matrix reduces the amount of storage and computation time.

    "},{"location":"api/metrics/Completeness/#attributes","title":"Attributes","text":"
    • bigger_is_better

      Indicate if a high value is better than a low one or not.

    • requires_labels

      Indicates if labels are required, rather than probabilities.

    • works_with_weights

      Indicate whether the model takes into consideration the effect of sample weights

    "},{"location":"api/metrics/Completeness/#examples","title":"Examples","text":"

    from river import metrics\n\ny_true = [1, 1, 2, 2, 3, 3]\ny_pred = [1, 1, 1, 2, 2, 2]\n\nmetric = metrics.Completeness()\nfor yt, yp in zip(y_true, y_pred):\n    print(metric.update(yt, yp).get())\n
    1.0\n1.0\n1.0\n0.3836885465963443\n0.5880325916843805\n0.6666666666666667\n

    metric\n
    Completeness: 66.67%\n

    "},{"location":"api/metrics/Completeness/#methods","title":"Methods","text":"get

    Return the current value of the metric.

    is_better_than

    Indicate if the current metric is better than another one.

    Parameters

    • other

    revert

    Revert the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    update

    Update the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    works_with

    Indicates whether or not a metric can work with a given model.

    Parameters

    • model \u2014 'base.Estimator'

    1. Andrew Rosenberg and Julia Hirschberg (2007). V-Measure: A conditional entropy-based external cluster evaluation measure. Proceedings of the 2007 Joing Conference on Empirical Methods in Natural Language Processing and Computational Natural Language Learning, pp. 410 - 420, Prague, June 2007.\u00a0\u21a9

    "},{"location":"api/metrics/ConfusionMatrix/","title":"ConfusionMatrix","text":"

    Confusion Matrix for binary and multi-class classification.

    "},{"location":"api/metrics/ConfusionMatrix/#parameters","title":"Parameters","text":"
    • classes

      Default \u2192 None

      The initial set of classes. This is optional and serves only for displaying purposes.

    "},{"location":"api/metrics/ConfusionMatrix/#attributes","title":"Attributes","text":"
    • classes

    • total_false_negatives

    • total_false_positives

    • total_true_negatives

    • total_true_positives

    "},{"location":"api/metrics/ConfusionMatrix/#examples","title":"Examples","text":"

    from river import metrics\n\ny_true = ['cat', 'ant', 'cat', 'cat', 'ant', 'bird']\ny_pred = ['ant', 'ant', 'cat', 'cat', 'ant', 'cat']\n\ncm = metrics.ConfusionMatrix()\n\nfor yt, yp in zip(y_true, y_pred):\n    cm = cm.update(yt, yp)\n\ncm\n
           ant  bird   cat\n ant     2     0     0\nbird     0     0     1\n cat     1     0     2\n

    cm['bird']['cat']\n
    1.0\n

    "},{"location":"api/metrics/ConfusionMatrix/#methods","title":"Methods","text":"false_negatives false_positives revert support true_negatives true_positives update"},{"location":"api/metrics/ConfusionMatrix/#notes","title":"Notes","text":"

    This confusion matrix is a 2D matrix of shape (n_classes, n_classes), corresponding to a single-target (binary and multi-class) classification task.

    Each row represents true (actual) class-labels, while each column corresponds to the predicted class-labels. For example, an entry in position [1, 2] means that the true class-label is 1, and the predicted class-label is 2 (incorrect prediction).

    This structure is used to keep updated statistics about a single-output classifier's performance and to compute multiple evaluation metrics.

    "},{"location":"api/metrics/CrossEntropy/","title":"CrossEntropy","text":"

    Multiclass generalization of the logarithmic loss.

    "},{"location":"api/metrics/CrossEntropy/#attributes","title":"Attributes","text":"
    • bigger_is_better

      Indicate if a high value is better than a low one or not.

    • requires_labels

      Indicates if labels are required, rather than probabilities.

    • works_with_weights

      Indicate whether the model takes into consideration the effect of sample weights

    "},{"location":"api/metrics/CrossEntropy/#examples","title":"Examples","text":"

    from river import metrics\n\ny_true = [0, 1, 2, 2]\ny_pred = [\n    {0: 0.29450637, 1: 0.34216758, 2: 0.36332605},\n    {0: 0.21290077, 1: 0.32728332, 2: 0.45981591},\n    {0: 0.42860913, 1: 0.33380113, 2: 0.23758974},\n    {0: 0.44941979, 1: 0.32962558, 2: 0.22095463}\n]\n\nmetric = metrics.CrossEntropy()\n\nfor yt, yp in zip(y_true, y_pred):\n    metric = metric.update(yt, yp)\n    print(metric.get())\n
    1.222454\n1.169691\n1.258864\n1.321597\n

    metric\n
    CrossEntropy: 1.321598\n

    "},{"location":"api/metrics/CrossEntropy/#methods","title":"Methods","text":"get

    Return the current value of the metric.

    is_better_than

    Indicate if the current metric is better than another one.

    Parameters

    • other

    revert

    Revert the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    update

    Update the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    works_with

    Indicates whether or not a metric can work with a given model.

    Parameters

    • model

    "},{"location":"api/metrics/F1/","title":"F1","text":"

    Binary F1 score.

    "},{"location":"api/metrics/F1/#parameters","title":"Parameters","text":"
    • cm

      Default \u2192 None

      This parameter allows sharing the same confusion matrix between multiple metrics. Sharing a confusion matrix reduces the amount of storage and computation time.

    • pos_val

      Default \u2192 True

      Value to treat as \"positive\".

    "},{"location":"api/metrics/F1/#attributes","title":"Attributes","text":"
    • bigger_is_better

      Indicate if a high value is better than a low one or not.

    • requires_labels

      Indicates if labels are required, rather than probabilities.

    • works_with_weights

      Indicate whether the model takes into consideration the effect of sample weights

    "},{"location":"api/metrics/F1/#examples","title":"Examples","text":"

    from river import metrics\n\ny_true = [False, False, False, True, True, True]\ny_pred = [False, False, True, True, False, False]\n\nmetric = metrics.F1()\n\nfor yt, yp in zip(y_true, y_pred):\n    metric = metric.update(yt, yp)\n\nmetric\n
    F1: 40.00%\n

    "},{"location":"api/metrics/F1/#methods","title":"Methods","text":"get

    Return the current value of the metric.

    is_better_than

    Indicate if the current metric is better than another one.

    Parameters

    • other

    revert

    Revert the metric.

    Parameters

    • y_true \u2014 'bool'
    • y_pred \u2014 'bool | float | dict[bool, float]'
    • sample_weight \u2014 defaults to 1.0

    update

    Update the metric.

    Parameters

    • y_true \u2014 'bool'
    • y_pred \u2014 'bool | float | dict[bool, float]'
    • sample_weight \u2014 defaults to 1.0

    works_with

    Indicates whether or not a metric can work with a given model.

    Parameters

    • model \u2014 'base.Estimator'

    "},{"location":"api/metrics/FBeta/","title":"FBeta","text":"

    Binary F-Beta score.

    The FBeta score is a weighted harmonic mean between precision and recall. The higher the beta value, the higher the recall will be taken into account. When beta equals 1, precision and recall and equivalently weighted, which results in the F1 score (see metrics.F1).

    "},{"location":"api/metrics/FBeta/#parameters","title":"Parameters","text":"
    • beta

      Type \u2192 float

      Weight of precision in the harmonic mean.

    • cm

      Default \u2192 None

      This parameter allows sharing the same confusion matrix between multiple metrics. Sharing a confusion matrix reduces the amount of storage and computation time.

    • pos_val

      Default \u2192 True

      Value to treat as \"positive\".

    "},{"location":"api/metrics/FBeta/#attributes","title":"Attributes","text":"
    • precision (metrics.Precision)

    • recall (metrics.Recall)

    "},{"location":"api/metrics/FBeta/#examples","title":"Examples","text":"

    from river import metrics\n\ny_true = [False, False, False, True, True, True]\ny_pred = [False, False, True, True, False, False]\n\nmetric = metrics.FBeta(beta=2)\nfor yt, yp in zip(y_true, y_pred):\n    metric = metric.update(yt, yp)\n\nmetric\n
    FBeta: 35.71%\n

    "},{"location":"api/metrics/FBeta/#methods","title":"Methods","text":"get

    Return the current value of the metric.

    is_better_than

    Indicate if the current metric is better than another one.

    Parameters

    • other

    revert

    Revert the metric.

    Parameters

    • y_true \u2014 'bool'
    • y_pred \u2014 'bool | float | dict[bool, float]'
    • sample_weight \u2014 defaults to 1.0

    update

    Update the metric.

    Parameters

    • y_true \u2014 'bool'
    • y_pred \u2014 'bool | float | dict[bool, float]'
    • sample_weight \u2014 defaults to 1.0

    works_with

    Indicates whether or not a metric can work with a given model.

    Parameters

    • model \u2014 'base.Estimator'

    "},{"location":"api/metrics/FowlkesMallows/","title":"FowlkesMallows","text":"

    Fowlkes-Mallows Index.

    The Fowlkes-Mallows Index 1 2 is an external evaluation method that is used to determine the similarity between two clusterings, and also a metric to measure confusion matrices. The measure of similarity could be either between two hierarchical clusterings or a clustering and a benchmark classification. A higher value for the Fowlkes-Mallows index indicates a greater similarity between the clusters and the benchmark classifications.

    The Fowlkes-Mallows Index, for two cluster algorithms, is defined as:

    \\[ FM = \\sqrt{PPV \\times TPR} = \\sqrt{\\frac{TP}{TP+FP} \\times \\frac{TP}{TP+FN}} \\]

    where

    • TP, FP, FN are respectively the number of true positives, false positives and false negatives;

    • TPR is the True Positive Rate (or Sensitivity/Recall), PPV is the Positive Predictive Rate (or Precision).

    "},{"location":"api/metrics/FowlkesMallows/#parameters","title":"Parameters","text":"
    • cm

      Type \u2192 confusion.ConfusionMatrix | None

      Default \u2192 None

      This parameter allows sharing the same confusion matrix between multiple metrics. Sharing a confusion matrix reduces the amount of storage and computation time.

    "},{"location":"api/metrics/FowlkesMallows/#attributes","title":"Attributes","text":"
    • bigger_is_better

      Indicate if a high value is better than a low one or not.

    • requires_labels

      Indicates if labels are required, rather than probabilities.

    • works_with_weights

      Indicate whether the model takes into consideration the effect of sample weights

    "},{"location":"api/metrics/FowlkesMallows/#examples","title":"Examples","text":"

    from river import metrics\n\ny_true = [0, 0, 0, 1, 1, 1]\ny_pred = [0, 0, 1, 1, 2, 2]\n\nmetric = metrics.FowlkesMallows()\n\nfor yt, yp in zip(y_true, y_pred):\n    print(metric.update(yt, yp))\n
    FowlkesMallows: 0.00%\nFowlkesMallows: 100.00%\nFowlkesMallows: 57.74%\nFowlkesMallows: 40.82%\nFowlkesMallows: 35.36%\nFowlkesMallows: 47.14%\n

    "},{"location":"api/metrics/FowlkesMallows/#methods","title":"Methods","text":"get

    Return the current value of the metric.

    is_better_than

    Indicate if the current metric is better than another one.

    Parameters

    • other

    revert

    Revert the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    update

    Update the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    works_with

    Indicates whether or not a metric can work with a given model.

    Parameters

    • model \u2014 'base.Estimator'

    1. Wikipedia contributors. (2020, December 22). Fowlkes\u2013Mallows index. In Wikipedia, The Free Encyclopedia, from https://en.wikipedia.org/w/index.php?title=Fowlkes%E2%80%93Mallows_index&oldid=995714222\u00a0\u21a9

    2. E. B. Fowkles and C. L. Mallows (1983). \u201cA method for comparing two hierarchical clusterings\u201d. Journal of the American Statistical Association\u00a0\u21a9

    "},{"location":"api/metrics/GeometricMean/","title":"GeometricMean","text":"

    Geometric mean score.

    The geometric mean is a good indicator of a classifier's performance in the presence of class imbalance because it is independent of the distribution of examples between classes. This implementation computes the geometric mean of class-wise sensitivity (recall).

    \\[ gm = \\sqrt[n]{s_1\\cdot s_2\\cdot s_3\\cdot \\ldots\\cdot s_n} \\]

    where \\(s_i\\) is the sensitivity (recall) of class \\(i\\) and \\(n\\) is the number of classes.

    "},{"location":"api/metrics/GeometricMean/#parameters","title":"Parameters","text":"
    • cm

      Type \u2192 confusion.ConfusionMatrix | None

      Default \u2192 None

      This parameter allows sharing the same confusion matrix between multiple metrics. Sharing a confusion matrix reduces the amount of storage and computation time.

    "},{"location":"api/metrics/GeometricMean/#attributes","title":"Attributes","text":"
    • bigger_is_better

      Indicate if a high value is better than a low one or not.

    • requires_labels

      Indicates if labels are required, rather than probabilities.

    • works_with_weights

      Indicate whether the model takes into consideration the effect of sample weights

    "},{"location":"api/metrics/GeometricMean/#examples","title":"Examples","text":"

    from river import metrics\n\ny_true = ['cat', 'ant', 'cat', 'cat', 'ant', 'bird', 'bird']\ny_pred = ['ant', 'ant', 'cat', 'cat', 'ant', 'cat', 'bird']\n\nmetric = metrics.GeometricMean()\n\nfor yt, yp in zip(y_true, y_pred):\n    metric = metric.update(yt, yp)\n\nmetric\n
    GeometricMean: 69.34%\n

    "},{"location":"api/metrics/GeometricMean/#methods","title":"Methods","text":"get

    Return the current value of the metric.

    is_better_than

    Indicate if the current metric is better than another one.

    Parameters

    • other

    revert

    Revert the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    update

    Update the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    works_with

    Indicates whether or not a metric can work with a given model.

    Parameters

    • model \u2014 'base.Estimator'

    1. Barandela, R. et al. \u201cStrategies for learning in class imbalance problems\u201d, Pattern Recognition, 36(3), (2003), pp 849-851.\u00a0\u21a9

    "},{"location":"api/metrics/Homogeneity/","title":"Homogeneity","text":"

    Homogeneity Score.

    Homogeneity metric 1 of a cluster labeling given a ground truth.

    In order to satisfy the homogeneity criteria, a clustering must assign only those data points that are members of a single class to a single cluster. That is, the class distribution within each cluster should be skewed to a single class, that is, zero entropy. We determine how close a given clustering is to this ideal by examining the conditional entropy of the class distribution given the proposed clustering.

    However, in an imperfect situation, the size of this value is dependent on the size of the dataset and the distribution of class sizes. Therefore, instead of taking the raw conditional entropy, we normalize by the maximum reduction in entropy the clustering information could provide.

    As such, we define homogeneity as:

    \\[ h = \\begin{cases} 1 if H(C) = 0, \\\\ 1 - \\frac{H(C|K)}{H(C)} otherwise. \\end{cases}. \\]"},{"location":"api/metrics/Homogeneity/#parameters","title":"Parameters","text":"
    • cm

      Type \u2192 confusion.ConfusionMatrix | None

      Default \u2192 None

      This parameter allows sharing the same confusion matrix between multiple metrics. Sharing a confusion matrix reduces the amount of storage and computation time.

    "},{"location":"api/metrics/Homogeneity/#attributes","title":"Attributes","text":"
    • bigger_is_better

      Indicate if a high value is better than a low one or not.

    • requires_labels

      Indicates if labels are required, rather than probabilities.

    • works_with_weights

      Indicate whether the model takes into consideration the effect of sample weights

    "},{"location":"api/metrics/Homogeneity/#examples","title":"Examples","text":"

    from river import metrics\n\ny_true = [1, 1, 2, 2, 3, 3]\ny_pred = [1, 1, 1, 2, 2, 2]\n\nmetric = metrics.Homogeneity()\nfor yt, yp in zip(y_true, y_pred):\n    print(metric.update(yt, yp).get())\n
    1.0\n1.0\n0.0\n0.311278\n0.37515\n0.42062\n

    metric\n
    Homogeneity: 42.06%\n

    "},{"location":"api/metrics/Homogeneity/#methods","title":"Methods","text":"get

    Return the current value of the metric.

    is_better_than

    Indicate if the current metric is better than another one.

    Parameters

    • other

    revert

    Revert the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    update

    Update the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    works_with

    Indicates whether or not a metric can work with a given model.

    Parameters

    • model \u2014 'base.Estimator'

    1. Andrew Rosenberg and Julia Hirschberg (2007). V-Measure: A conditional entropy-based external cluster evaluation measure. Proceedings of the 2007 Joing Conference on Empirical Methods in Natural Language Processing and Computational Natural Language Learning, pp. 410 - 420, Prague, June 2007.\u00a0\u21a9

    "},{"location":"api/metrics/Jaccard/","title":"Jaccard","text":"

    Jaccard score.

    "},{"location":"api/metrics/Jaccard/#parameters","title":"Parameters","text":"
    • cm

      Default \u2192 None

      This parameter allows sharing the same confusion matrix between multiple metrics. Sharing a confusion matrix reduces the amount of storage and computation time.

    • pos_val

      Default \u2192 True

      Value to treat as \"positive\".

    "},{"location":"api/metrics/Jaccard/#attributes","title":"Attributes","text":"
    • bigger_is_better

      Indicate if a high value is better than a low one or not.

    • requires_labels

      Indicates if labels are required, rather than probabilities.

    • works_with_weights

      Indicate whether the model takes into consideration the effect of sample weights

    "},{"location":"api/metrics/Jaccard/#examples","title":"Examples","text":"

    from river import metrics\n\ny_true = [False, True, True]\ny_pred = [True, True, True]\n\nmetric = metrics.Jaccard()\n\nfor yt, yp in zip(y_true, y_pred):\n    print(metric.update(yt, yp))\n
    Jaccard: 0.00%\nJaccard: 50.00%\nJaccard: 66.67%\n

    "},{"location":"api/metrics/Jaccard/#methods","title":"Methods","text":"get

    Return the current value of the metric.

    is_better_than

    Indicate if the current metric is better than another one.

    Parameters

    • other

    revert

    Revert the metric.

    Parameters

    • y_true \u2014 'bool'
    • y_pred \u2014 'bool | float | dict[bool, float]'
    • sample_weight \u2014 defaults to 1.0

    update

    Update the metric.

    Parameters

    • y_true \u2014 'bool'
    • y_pred \u2014 'bool | float | dict[bool, float]'
    • sample_weight \u2014 defaults to 1.0

    works_with

    Indicates whether or not a metric can work with a given model.

    Parameters

    • model \u2014 'base.Estimator'

    1. Jaccard index \u21a9

    "},{"location":"api/metrics/LogLoss/","title":"LogLoss","text":"

    Binary logarithmic loss.

    "},{"location":"api/metrics/LogLoss/#attributes","title":"Attributes","text":"
    • bigger_is_better

      Indicate if a high value is better than a low one or not.

    • requires_labels

      Indicates if labels are required, rather than probabilities.

    • works_with_weights

      Indicate whether the model takes into consideration the effect of sample weights

    "},{"location":"api/metrics/LogLoss/#examples","title":"Examples","text":"

    from river import metrics\n\ny_true = [True, False, False, True]\ny_pred = [0.9,  0.1,   0.2,   0.65]\n\nmetric = metrics.LogLoss()\nfor yt, yp in zip(y_true, y_pred):\n    metric = metric.update(yt, yp)\n    print(metric.get())\n
    0.105360\n0.105360\n0.144621\n0.216161\n

    metric\n
    LogLoss: 0.216162\n

    "},{"location":"api/metrics/LogLoss/#methods","title":"Methods","text":"get

    Return the current value of the metric.

    is_better_than

    Indicate if the current metric is better than another one.

    Parameters

    • other

    revert

    Revert the metric.

    Parameters

    • y_true \u2014 'bool'
    • y_pred \u2014 'bool | float | dict[bool, float]'
    • sample_weight \u2014 defaults to 1.0

    update

    Update the metric.

    Parameters

    • y_true \u2014 'bool'
    • y_pred \u2014 'bool | float | dict[bool, float]'
    • sample_weight \u2014 defaults to 1.0

    works_with

    Indicates whether or not a metric can work with a given model.

    Parameters

    • model

    "},{"location":"api/metrics/MAE/","title":"MAE","text":"

    Mean absolute error.

    "},{"location":"api/metrics/MAE/#attributes","title":"Attributes","text":"
    • bigger_is_better

      Indicate if a high value is better than a low one or not.

    • works_with_weights

      Indicate whether the model takes into consideration the effect of sample weights

    "},{"location":"api/metrics/MAE/#examples","title":"Examples","text":"

    from river import metrics\n\ny_true = [3, -0.5, 2, 7]\ny_pred = [2.5, 0.0, 2, 8]\n\nmetric = metrics.MAE()\n\nfor yt, yp in zip(y_true, y_pred):\n    print(metric.update(yt, yp).get())\n
    0.5\n0.5\n0.333\n0.5\n

    metric\n
    MAE: 0.5\n

    "},{"location":"api/metrics/MAE/#methods","title":"Methods","text":"get

    Return the current value of the metric.

    is_better_than

    Indicate if the current metric is better than another one.

    Parameters

    • other

    revert

    Revert the metric.

    Parameters

    • y_true \u2014 'numbers.Number'
    • y_pred \u2014 'numbers.Number'
    • sample_weight \u2014 defaults to 1.0

    update

    Update the metric.

    Parameters

    • y_true \u2014 'numbers.Number'
    • y_pred \u2014 'numbers.Number'
    • sample_weight \u2014 defaults to 1.0

    works_with

    Indicates whether or not a metric can work with a given model.

    Parameters

    • model

    "},{"location":"api/metrics/MAPE/","title":"MAPE","text":"

    Mean absolute percentage error.

    "},{"location":"api/metrics/MAPE/#attributes","title":"Attributes","text":"
    • bigger_is_better

      Indicate if a high value is better than a low one or not.

    • works_with_weights

      Indicate whether the model takes into consideration the effect of sample weights

    "},{"location":"api/metrics/MAPE/#examples","title":"Examples","text":"

    from river import metrics\n\ny_true = [3, -0.5, 2, 7]\ny_pred = [2.5, 0.0, 2, 8]\n\nmetric = metrics.MAPE()\nfor yt, yp in zip(y_true, y_pred):\n    metric = metric.update(yt, yp)\n\nmetric\n
    MAPE: 32.738095\n

    "},{"location":"api/metrics/MAPE/#methods","title":"Methods","text":"get

    Return the current value of the metric.

    is_better_than

    Indicate if the current metric is better than another one.

    Parameters

    • other

    revert

    Revert the metric.

    Parameters

    • y_true \u2014 'numbers.Number'
    • y_pred \u2014 'numbers.Number'
    • sample_weight \u2014 defaults to 1.0

    update

    Update the metric.

    Parameters

    • y_true \u2014 'numbers.Number'
    • y_pred \u2014 'numbers.Number'
    • sample_weight \u2014 defaults to 1.0

    works_with

    Indicates whether or not a metric can work with a given model.

    Parameters

    • model

    "},{"location":"api/metrics/MCC/","title":"MCC","text":"

    Matthews correlation coefficient.

    "},{"location":"api/metrics/MCC/#parameters","title":"Parameters","text":"
    • cm

      Default \u2192 None

      This parameter allows sharing the same confusion matrix between multiple metrics. Sharing a confusion matrix reduces the amount of storage and computation time.

    • pos_val

      Default \u2192 True

      Value to treat as \"positive\".

    "},{"location":"api/metrics/MCC/#attributes","title":"Attributes","text":"
    • bigger_is_better

      Indicate if a high value is better than a low one or not.

    • requires_labels

      Indicates if labels are required, rather than probabilities.

    • works_with_weights

      Indicate whether the model takes into consideration the effect of sample weights

    "},{"location":"api/metrics/MCC/#examples","title":"Examples","text":"

    from river import metrics\n\ny_true = [True, True, True, False]\ny_pred = [True, False, True, True]\n\nmcc = metrics.MCC()\n\nfor yt, yp in zip(y_true, y_pred):\n    mcc = mcc.update(yt, yp)\n\nmcc\n
    MCC: -0.333333\n

    "},{"location":"api/metrics/MCC/#methods","title":"Methods","text":"get

    Return the current value of the metric.

    is_better_than

    Indicate if the current metric is better than another one.

    Parameters

    • other

    revert

    Revert the metric.

    Parameters

    • y_true \u2014 'bool'
    • y_pred \u2014 'bool | float | dict[bool, float]'
    • sample_weight \u2014 defaults to 1.0

    update

    Update the metric.

    Parameters

    • y_true \u2014 'bool'
    • y_pred \u2014 'bool | float | dict[bool, float]'
    • sample_weight \u2014 defaults to 1.0

    works_with

    Indicates whether or not a metric can work with a given model.

    Parameters

    • model \u2014 'base.Estimator'

    1. Wikipedia article \u21a9

    "},{"location":"api/metrics/MSE/","title":"MSE","text":"

    Mean squared error.

    "},{"location":"api/metrics/MSE/#attributes","title":"Attributes","text":"
    • bigger_is_better

      Indicate if a high value is better than a low one or not.

    • works_with_weights

      Indicate whether the model takes into consideration the effect of sample weights

    "},{"location":"api/metrics/MSE/#examples","title":"Examples","text":"

    from river import metrics\n\ny_true = [3, -0.5, 2, 7]\ny_pred = [2.5, 0.0, 2, 8]\n\nmetric = metrics.MSE()\n\nfor yt, yp in zip(y_true, y_pred):\n    print(metric.update(yt, yp).get())\n
    0.25\n0.25\n0.1666\n0.375\n

    "},{"location":"api/metrics/MSE/#methods","title":"Methods","text":"get

    Return the current value of the metric.

    is_better_than

    Indicate if the current metric is better than another one.

    Parameters

    • other

    revert

    Revert the metric.

    Parameters

    • y_true \u2014 'numbers.Number'
    • y_pred \u2014 'numbers.Number'
    • sample_weight \u2014 defaults to 1.0

    update

    Update the metric.

    Parameters

    • y_true \u2014 'numbers.Number'
    • y_pred \u2014 'numbers.Number'
    • sample_weight \u2014 defaults to 1.0

    works_with

    Indicates whether or not a metric can work with a given model.

    Parameters

    • model

    "},{"location":"api/metrics/MacroF1/","title":"MacroF1","text":"

    Macro-average F1 score.

    This works by computing the F1 score per class, and then performs a global average.

    "},{"location":"api/metrics/MacroF1/#parameters","title":"Parameters","text":"
    • cm

      Default \u2192 None

      This parameter allows sharing the same confusion matrix between multiple metrics. Sharing a confusion matrix reduces the amount of storage and computation time.

    "},{"location":"api/metrics/MacroF1/#attributes","title":"Attributes","text":"
    • bigger_is_better

      Indicate if a high value is better than a low one or not.

    • requires_labels

      Indicates if labels are required, rather than probabilities.

    • works_with_weights

      Indicate whether the model takes into consideration the effect of sample weights

    "},{"location":"api/metrics/MacroF1/#examples","title":"Examples","text":"

    from river import metrics\n\ny_true = [0, 1, 2, 2, 2]\ny_pred = [0, 0, 2, 2, 1]\n\nmetric = metrics.MacroF1()\n\nfor yt, yp in zip(y_true, y_pred):\n    print(metric.update(yt, yp))\n
    MacroF1: 100.00%\nMacroF1: 33.33%\nMacroF1: 55.56%\nMacroF1: 55.56%\nMacroF1: 48.89%\n

    "},{"location":"api/metrics/MacroF1/#methods","title":"Methods","text":"get

    Return the current value of the metric.

    is_better_than

    Indicate if the current metric is better than another one.

    Parameters

    • other

    revert

    Revert the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    update

    Update the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    works_with

    Indicates whether or not a metric can work with a given model.

    Parameters

    • model \u2014 'base.Estimator'

    "},{"location":"api/metrics/MacroFBeta/","title":"MacroFBeta","text":"

    Macro-average F-Beta score.

    This works by computing the F-Beta score per class, and then performs a global average.

    "},{"location":"api/metrics/MacroFBeta/#parameters","title":"Parameters","text":"
    • beta

      Weight of precision in harmonic mean.

    • cm

      Default \u2192 None

      This parameter allows sharing the same confusion matrix between multiple metrics. Sharing a confusion matrix reduces the amount of storage and computation time.

    "},{"location":"api/metrics/MacroFBeta/#attributes","title":"Attributes","text":"
    • bigger_is_better

      Indicate if a high value is better than a low one or not.

    • requires_labels

      Indicates if labels are required, rather than probabilities.

    • works_with_weights

      Indicate whether the model takes into consideration the effect of sample weights

    "},{"location":"api/metrics/MacroFBeta/#examples","title":"Examples","text":"

    from river import metrics\n\ny_true = [0, 1, 2, 2, 2]\ny_pred = [0, 0, 2, 2, 1]\n\nmetric = metrics.MacroFBeta(beta=.8)\n\nfor yt, yp in zip(y_true, y_pred):\n    print(metric.update(yt, yp))\n
    MacroFBeta: 100.00%\nMacroFBeta: 31.06%\nMacroFBeta: 54.04%\nMacroFBeta: 54.04%\nMacroFBeta: 48.60%\n

    "},{"location":"api/metrics/MacroFBeta/#methods","title":"Methods","text":"get

    Return the current value of the metric.

    is_better_than

    Indicate if the current metric is better than another one.

    Parameters

    • other

    revert

    Revert the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    update

    Update the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    works_with

    Indicates whether or not a metric can work with a given model.

    Parameters

    • model \u2014 'base.Estimator'

    "},{"location":"api/metrics/MacroJaccard/","title":"MacroJaccard","text":"

    Macro-average Jaccard score.

    "},{"location":"api/metrics/MacroJaccard/#parameters","title":"Parameters","text":"
    • cm

      Type \u2192 confusion.ConfusionMatrix | None

      Default \u2192 None

      This parameter allows sharing the same confusion matrix between multiple metrics. Sharing a confusion matrix reduces the amount of storage and computation time.

    "},{"location":"api/metrics/MacroJaccard/#attributes","title":"Attributes","text":"
    • bigger_is_better

      Indicate if a high value is better than a low one or not.

    • requires_labels

      Indicates if labels are required, rather than probabilities.

    • works_with_weights

      Indicate whether the model takes into consideration the effect of sample weights

    "},{"location":"api/metrics/MacroJaccard/#examples","title":"Examples","text":"

    from river import metrics\n\ny_true = [0, 1, 2, 2, 2]\ny_pred = [0, 0, 2, 2, 1]\n\nmetric = metrics.MacroJaccard()\n\nfor yt, yp in zip(y_true, y_pred):\n    print(metric.update(yt, yp))\n
    MacroJaccard: 100.00%\nMacroJaccard: 25.00%\nMacroJaccard: 50.00%\nMacroJaccard: 50.00%\nMacroJaccard: 38.89%\n

    "},{"location":"api/metrics/MacroJaccard/#methods","title":"Methods","text":"get

    Return the current value of the metric.

    is_better_than

    Indicate if the current metric is better than another one.

    Parameters

    • other

    revert

    Revert the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    update

    Update the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    works_with

    Indicates whether or not a metric can work with a given model.

    Parameters

    • model \u2014 'base.Estimator'

    "},{"location":"api/metrics/MacroPrecision/","title":"MacroPrecision","text":"

    Macro-average precision score.

    "},{"location":"api/metrics/MacroPrecision/#parameters","title":"Parameters","text":"
    • cm

      Type \u2192 confusion.ConfusionMatrix | None

      Default \u2192 None

      This parameter allows sharing the same confusion matrix between multiple metrics. Sharing a confusion matrix reduces the amount of storage and computation time.

    "},{"location":"api/metrics/MacroPrecision/#attributes","title":"Attributes","text":"
    • bigger_is_better

      Indicate if a high value is better than a low one or not.

    • requires_labels

      Indicates if labels are required, rather than probabilities.

    • works_with_weights

      Indicate whether the model takes into consideration the effect of sample weights

    "},{"location":"api/metrics/MacroPrecision/#examples","title":"Examples","text":"

    from river import metrics\n\ny_true = [0, 1, 2, 2, 2]\ny_pred = [0, 0, 2, 2, 1]\n\nmetric = metrics.MacroPrecision()\n\nfor yt, yp in zip(y_true, y_pred):\n    print(metric.update(yt, yp))\n
    MacroPrecision: 100.00%\nMacroPrecision: 25.00%\nMacroPrecision: 50.00%\nMacroPrecision: 50.00%\nMacroPrecision: 50.00%\n

    "},{"location":"api/metrics/MacroPrecision/#methods","title":"Methods","text":"get

    Return the current value of the metric.

    is_better_than

    Indicate if the current metric is better than another one.

    Parameters

    • other

    revert

    Revert the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    update

    Update the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    works_with

    Indicates whether or not a metric can work with a given model.

    Parameters

    • model \u2014 'base.Estimator'

    "},{"location":"api/metrics/MacroRecall/","title":"MacroRecall","text":"

    Macro-average recall score.

    "},{"location":"api/metrics/MacroRecall/#parameters","title":"Parameters","text":"
    • cm

      Type \u2192 confusion.ConfusionMatrix | None

      Default \u2192 None

      This parameter allows sharing the same confusion matrix between multiple metrics. Sharing a confusion matrix reduces the amount of storage and computation time.

    "},{"location":"api/metrics/MacroRecall/#attributes","title":"Attributes","text":"
    • bigger_is_better

      Indicate if a high value is better than a low one or not.

    • requires_labels

      Indicates if labels are required, rather than probabilities.

    • works_with_weights

      Indicate whether the model takes into consideration the effect of sample weights

    "},{"location":"api/metrics/MacroRecall/#examples","title":"Examples","text":"

    from river import metrics\n\ny_true = [0, 1, 2, 2, 2]\ny_pred = [0, 0, 2, 2, 1]\n\nmetric = metrics.MacroRecall()\n\nfor yt, yp in zip(y_true, y_pred):\n    print(metric.update(yt, yp))\n
    MacroRecall: 100.00%\nMacroRecall: 50.00%\nMacroRecall: 66.67%\nMacroRecall: 66.67%\nMacroRecall: 55.56%\n

    "},{"location":"api/metrics/MacroRecall/#methods","title":"Methods","text":"get

    Return the current value of the metric.

    is_better_than

    Indicate if the current metric is better than another one.

    Parameters

    • other

    revert

    Revert the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    update

    Update the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    works_with

    Indicates whether or not a metric can work with a given model.

    Parameters

    • model \u2014 'base.Estimator'

    "},{"location":"api/metrics/MicroF1/","title":"MicroF1","text":"

    Micro-average F1 score.

    This computes the F1 score by merging all the predictions and true labels, and then computes a global F1 score.

    "},{"location":"api/metrics/MicroF1/#parameters","title":"Parameters","text":"
    • cm

      Default \u2192 None

      This parameter allows sharing the same confusion matrix between multiple metrics. Sharing a confusion matrix reduces the amount of storage and computation time.

    "},{"location":"api/metrics/MicroF1/#attributes","title":"Attributes","text":"
    • bigger_is_better

      Indicate if a high value is better than a low one or not.

    • requires_labels

      Indicates if labels are required, rather than probabilities.

    • works_with_weights

      Indicate whether the model takes into consideration the effect of sample weights

    "},{"location":"api/metrics/MicroF1/#examples","title":"Examples","text":"

    from river import metrics\n\ny_true = [0, 1, 2, 2, 0]\ny_pred = [0, 1, 1, 2, 1]\n\nmetric = metrics.MicroF1()\nfor yt, yp in zip(y_true, y_pred):\n    metric = metric.update(yt, yp)\n\nmetric\n
    MicroF1: 60.00%\n

    "},{"location":"api/metrics/MicroF1/#methods","title":"Methods","text":"get

    Return the current value of the metric.

    is_better_than

    Indicate if the current metric is better than another one.

    Parameters

    • other

    revert

    Revert the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    update

    Update the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    works_with

    Indicates whether or not a metric can work with a given model.

    Parameters

    • model \u2014 'base.Estimator'

    1. Why are precision, recall and F1 score equal when using micro averaging in a multi-class problem? \u21a9

    "},{"location":"api/metrics/MicroFBeta/","title":"MicroFBeta","text":"

    Micro-average F-Beta score.

    This computes the F-Beta score by merging all the predictions and true labels, and then computes a global F-Beta score.

    "},{"location":"api/metrics/MicroFBeta/#parameters","title":"Parameters","text":"
    • beta

      Type \u2192 float

      Weight of precision in the harmonic mean.

    • cm

      Default \u2192 None

      This parameter allows sharing the same confusion matrix between multiple metrics. Sharing a confusion matrix reduces the amount of storage and computation time.

    "},{"location":"api/metrics/MicroFBeta/#attributes","title":"Attributes","text":"
    • bigger_is_better

      Indicate if a high value is better than a low one or not.

    • requires_labels

      Indicates if labels are required, rather than probabilities.

    • works_with_weights

      Indicate whether the model takes into consideration the effect of sample weights

    "},{"location":"api/metrics/MicroFBeta/#examples","title":"Examples","text":"

    from river import metrics\n\ny_true = [0, 1, 2, 2, 0]\ny_pred = [0, 1, 1, 2, 1]\n\nmetric = metrics.MicroFBeta(beta=2)\nfor yt, yp in zip(y_true, y_pred):\n    metric = metric.update(yt, yp)\n\nmetric\n
    MicroFBeta: 60.00%\n

    "},{"location":"api/metrics/MicroFBeta/#methods","title":"Methods","text":"get

    Return the current value of the metric.

    is_better_than

    Indicate if the current metric is better than another one.

    Parameters

    • other

    revert

    Revert the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    update

    Update the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    works_with

    Indicates whether or not a metric can work with a given model.

    Parameters

    • model \u2014 'base.Estimator'

    1. Why are precision, recall and F1 score equal when using micro averaging in a multi-class problem?

    "},{"location":"api/metrics/MicroJaccard/","title":"MicroJaccard","text":"

    Micro-average Jaccard score.

    "},{"location":"api/metrics/MicroJaccard/#parameters","title":"Parameters","text":"
    • cm

      Type \u2192 confusion.ConfusionMatrix | None

      Default \u2192 None

      This parameter allows sharing the same confusion matrix between multiple metrics. Sharing a confusion matrix reduces the amount of storage and computation time.

    "},{"location":"api/metrics/MicroJaccard/#attributes","title":"Attributes","text":"
    • bigger_is_better

      Indicate if a high value is better than a low one or not.

    • requires_labels

      Indicates if labels are required, rather than probabilities.

    • works_with_weights

      Indicate whether the model takes into consideration the effect of sample weights

    "},{"location":"api/metrics/MicroJaccard/#examples","title":"Examples","text":"

    from river import metrics\n\ny_true = [0, 1, 2, 2, 2]\ny_pred = [0, 0, 2, 2, 1]\n\nmetric = metrics.MicroJaccard()\n\nfor yt, yp in zip(y_true, y_pred):\n    print(metric.update(yt, yp))\n
    MicroJaccard: 100.00%\nMicroJaccard: 33.33%\nMicroJaccard: 50.00%\nMicroJaccard: 60.00%\nMicroJaccard: 42.86%\n

    "},{"location":"api/metrics/MicroJaccard/#methods","title":"Methods","text":"get

    Return the current value of the metric.

    is_better_than

    Indicate if the current metric is better than another one.

    Parameters

    • other

    revert

    Revert the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    update

    Update the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    works_with

    Indicates whether or not a metric can work with a given model.

    Parameters

    • model \u2014 'base.Estimator'

    "},{"location":"api/metrics/MicroPrecision/","title":"MicroPrecision","text":"

    Micro-average precision score.

    The micro-average precision score is exactly equivalent to the micro-average recall as well as the micro-average F1 score.

    "},{"location":"api/metrics/MicroPrecision/#parameters","title":"Parameters","text":"
    • cm

      Type \u2192 confusion.ConfusionMatrix | None

      Default \u2192 None

      This parameter allows sharing the same confusion matrix between multiple metrics. Sharing a confusion matrix reduces the amount of storage and computation time.

    "},{"location":"api/metrics/MicroPrecision/#attributes","title":"Attributes","text":"
    • bigger_is_better

      Indicate if a high value is better than a low one or not.

    • requires_labels

      Indicates if labels are required, rather than probabilities.

    • works_with_weights

      Indicate whether the model takes into consideration the effect of sample weights

    "},{"location":"api/metrics/MicroPrecision/#examples","title":"Examples","text":"

    from river import metrics\n\ny_true = [0, 1, 2, 2, 2]\ny_pred = [0, 0, 2, 2, 1]\n\nmetric = metrics.MicroPrecision()\n\nfor yt, yp in zip(y_true, y_pred):\n    print(metric.update(yt, yp))\n
    MicroPrecision: 100.00%\nMicroPrecision: 50.00%\nMicroPrecision: 66.67%\nMicroPrecision: 75.00%\nMicroPrecision: 60.00%\n

    "},{"location":"api/metrics/MicroPrecision/#methods","title":"Methods","text":"get

    Return the current value of the metric.

    is_better_than

    Indicate if the current metric is better than another one.

    Parameters

    • other

    revert

    Revert the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    update

    Update the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    works_with

    Indicates whether or not a metric can work with a given model.

    Parameters

    • model \u2014 'base.Estimator'

    1. Why are precision, recall and F1 score equal when using micro averaging in a multi-class problem? \u21a9

    "},{"location":"api/metrics/MicroRecall/","title":"MicroRecall","text":"

    Micro-average recall score.

    The micro-average recall is exactly equivalent to the micro-average precision as well as the micro-average F1 score.

    "},{"location":"api/metrics/MicroRecall/#parameters","title":"Parameters","text":"
    • cm

      Type \u2192 confusion.ConfusionMatrix | None

      Default \u2192 None

      This parameter allows sharing the same confusion matrix between multiple metrics. Sharing a confusion matrix reduces the amount of storage and computation time.

    "},{"location":"api/metrics/MicroRecall/#attributes","title":"Attributes","text":"
    • bigger_is_better

      Indicate if a high value is better than a low one or not.

    • requires_labels

      Indicates if labels are required, rather than probabilities.

    • works_with_weights

      Indicate whether the model takes into consideration the effect of sample weights

    "},{"location":"api/metrics/MicroRecall/#examples","title":"Examples","text":"

    from river import metrics\n\ny_true = [0, 1, 2, 2, 2]\ny_pred = [0, 0, 2, 2, 1]\n\nmetric = metrics.MicroRecall()\n\nfor yt, yp in zip(y_true, y_pred):\n    print(metric.update(yt, yp))\n
    MicroRecall: 100.00%\nMicroRecall: 50.00%\nMicroRecall: 66.67%\nMicroRecall: 75.00%\nMicroRecall: 60.00%\n

    "},{"location":"api/metrics/MicroRecall/#methods","title":"Methods","text":"get

    Return the current value of the metric.

    is_better_than

    Indicate if the current metric is better than another one.

    Parameters

    • other

    revert

    Revert the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    update

    Update the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    works_with

    Indicates whether or not a metric can work with a given model.

    Parameters

    • model \u2014 'base.Estimator'

    1. Why are precision, recall and F1 score equal when using micro averaging in a multi-class problem? \u21a9

    "},{"location":"api/metrics/MultiFBeta/","title":"MultiFBeta","text":"

    Multi-class F-Beta score with different betas per class.

    The multiclass F-Beta score is the arithmetic average of the binary F-Beta scores of each class. The mean can be weighted by providing class weights.

    "},{"location":"api/metrics/MultiFBeta/#parameters","title":"Parameters","text":"
    • betas

      Weight of precision in the harmonic mean of each class.

    • weights

      Class weights. If not provided then uniform weights will be used.

    • cm

      Default \u2192 None

      This parameter allows sharing the same confusion matrix between multiple metrics. Sharing a confusion matrix reduces the amount of storage and computation time.

    "},{"location":"api/metrics/MultiFBeta/#attributes","title":"Attributes","text":"
    • bigger_is_better

      Indicate if a high value is better than a low one or not.

    • requires_labels

      Indicates if labels are required, rather than probabilities.

    • works_with_weights

      Indicate whether the model takes into consideration the effect of sample weights

    "},{"location":"api/metrics/MultiFBeta/#examples","title":"Examples","text":"

    from river import metrics\n\ny_true = [0, 1, 2, 2, 2]\ny_pred = [0, 0, 2, 2, 1]\n\nmetric = metrics.MultiFBeta(\n    betas={0: 0.25, 1: 1, 2: 4},\n    weights={0: 1, 1: 1, 2: 2}\n)\n\nfor yt, yp in zip(y_true, y_pred):\n    print(metric.update(yt, yp))\n
    MultiFBeta: 100.00%\nMultiFBeta: 25.76%\nMultiFBeta: 62.88%\nMultiFBeta: 62.88%\nMultiFBeta: 46.88%\n

    "},{"location":"api/metrics/MultiFBeta/#methods","title":"Methods","text":"get

    Return the current value of the metric.

    is_better_than

    Indicate if the current metric is better than another one.

    Parameters

    • other

    revert

    Revert the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    update

    Update the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    works_with

    Indicates whether or not a metric can work with a given model.

    Parameters

    • model \u2014 'base.Estimator'

    "},{"location":"api/metrics/MutualInfo/","title":"MutualInfo","text":"

    Mutual Information between two clusterings.

    The Mutual Information 1 is a measure of the similarity between two labels of the same data. Where \\(|U_i|\\) is the number of samples in cluster \\(U_i\\) and \\(|V_j|\\) is the number of the samples in cluster \\(V_j\\), the Mutual Information between clusterings \\(U\\) and \\(V\\) can be calculated as:

    \\[ MI(U,V) = \\sum_{i=1}^{|U|} \\sum_{v=1}^{|V|} \\frac{|U_i \\cup V_j|}{N} \\log \\frac{N |U_i \\cup V_j|}{|U_i| |V_j|} \\]

    This metric is independent of the absolute values of the labels: a permutation of the class or cluster label values won't change the score.

    This metric is furthermore symmetric: switching y_true and y_pred will return the same score value. This can be useful to measure the agreement of two independent label assignments strategies on the same dataset when the real ground truth is not known.

    The Mutual Information can be equivalently expressed as:

    \\[ MI(U,V) = H(U) - H(U | V) = H(V) - H(V | U) \\]

    where \\(H(U)\\) and \\(H(V)\\) are the marginal entropies, \\(H(U | V)\\) and \\(H(V | U)\\) are the conditional entropies.

    "},{"location":"api/metrics/MutualInfo/#parameters","title":"Parameters","text":"
    • cm

      Type \u2192 confusion.ConfusionMatrix | None

      Default \u2192 None

      This parameter allows sharing the same confusion matrix between multiple metrics. Sharing a confusion matrix reduces the amount of storage and computation time.

    "},{"location":"api/metrics/MutualInfo/#attributes","title":"Attributes","text":"
    • bigger_is_better

      Indicate if a high value is better than a low one or not.

    • requires_labels

      Indicates if labels are required, rather than probabilities.

    • works_with_weights

      Indicate whether the model takes into consideration the effect of sample weights

    "},{"location":"api/metrics/MutualInfo/#examples","title":"Examples","text":"

    from river import metrics\n\ny_true = [1, 1, 2, 2, 3, 3]\ny_pred = [1, 1, 1, 2, 2, 2]\n\nmetric = metrics.MutualInfo()\nfor yt, yp in zip(y_true, y_pred):\n    print(metric.update(yt, yp).get())\n
    0.0\n0.0\n0.0\n0.215761\n0.395752\n0.462098\n

    metric\n
    MutualInfo: 0.462098\n

    "},{"location":"api/metrics/MutualInfo/#methods","title":"Methods","text":"get

    Return the current value of the metric.

    is_better_than

    Indicate if the current metric is better than another one.

    Parameters

    • other

    revert

    Revert the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    update

    Update the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    works_with

    Indicates whether or not a metric can work with a given model.

    Parameters

    • model \u2014 'base.Estimator'

    1. Wikipedia contributors. (2021, March 17). Mutual information. In Wikipedia, The Free Encyclopedia, from https://en.wikipedia.org/w/index.php?title=Mutual_information&oldid=1012714929\u00a0\u21a9

    "},{"location":"api/metrics/NormalizedMutualInfo/","title":"NormalizedMutualInfo","text":"

    Normalized Mutual Information between two clusterings.

    Normalized Mutual Information (NMI) is a normalized version of the Mutual Information (MI) score to scale the results between the range of 0 (no mutual information) and 1 (perfectly mutual information). In the formula, the mutual information will be normalized by a generalized mean of the entropy of true and predicted labels, defined by the average_method.

    We note that this measure is not adjusted for chance (i.e corrected the effect of result agreement solely due to chance); as a result, the Adjusted Mutual Info Score will mostly be preferred. However, this metric is still symmetric, which means that switching true and predicted labels will not alter the score value. This fact can be useful when the metric is used to measure the agreement between two indepedent label solutions on the same dataset, when the ground truth remains unknown.

    Another advantage of the metric is that as it is based on the calculation of entropy-related measures, it is independent of the permutation of class/cluster labels.

    "},{"location":"api/metrics/NormalizedMutualInfo/#parameters","title":"Parameters","text":"
    • cm

      Default \u2192 None

      This parameter allows sharing the same confusion matrix between multiple metrics. Sharing a confusion matrix reduces the amount of storage and computation time.

    • average_method

      Default \u2192 arithmetic

      This parameter defines how to compute the normalizer in the denominator. Possible options include min, max, arithmetic and geometric.

    "},{"location":"api/metrics/NormalizedMutualInfo/#attributes","title":"Attributes","text":"
    • bigger_is_better

      Indicate if a high value is better than a low one or not.

    • requires_labels

      Indicates if labels are required, rather than probabilities.

    • works_with_weights

      Indicate whether the model takes into consideration the effect of sample weights

    "},{"location":"api/metrics/NormalizedMutualInfo/#examples","title":"Examples","text":"

    from river import metrics\n\ny_true = [1, 1, 2, 2, 3, 3]\ny_pred = [1, 1, 1, 2, 2, 2]\n\nmetric = metrics.NormalizedMutualInfo()\nfor yt, yp in zip(y_true, y_pred):\n    print(metric.update(yt, yp).get())\n
    1.0\n1.0\n0.0\n0.343711\n0.458065\n0.515803\n

    metric\n
    NormalizedMutualInfo: 0.515804\n

    "},{"location":"api/metrics/NormalizedMutualInfo/#methods","title":"Methods","text":"get

    Return the current value of the metric.

    is_better_than

    Indicate if the current metric is better than another one.

    Parameters

    • other

    revert

    Revert the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    update

    Update the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    works_with

    Indicates whether or not a metric can work with a given model.

    Parameters

    • model \u2014 'base.Estimator'

    1. Wikipedia contributors. (2021, March 17). Mutual information. In Wikipedia, The Free Encyclopedia, from https://en.wikipedia.org/w/index.php?title=Mutual_information&oldid=1012714929\u00a0\u21a9

    "},{"location":"api/metrics/Precision/","title":"Precision","text":"

    Binary precision score.

    "},{"location":"api/metrics/Precision/#parameters","title":"Parameters","text":"
    • cm

      Default \u2192 None

      This parameter allows sharing the same confusion matrix between multiple metrics. Sharing a confusion matrix reduces the amount of storage and computation time.

    • pos_val

      Default \u2192 True

      Value to treat as \"positive\".

    "},{"location":"api/metrics/Precision/#attributes","title":"Attributes","text":"
    • bigger_is_better

      Indicate if a high value is better than a low one or not.

    • requires_labels

      Indicates if labels are required, rather than probabilities.

    • works_with_weights

      Indicate whether the model takes into consideration the effect of sample weights

    "},{"location":"api/metrics/Precision/#examples","title":"Examples","text":"

    from river import metrics\n\ny_true = [True, False, True, True, True]\ny_pred = [True, True, False, True, True]\n\nmetric = metrics.Precision()\n\nfor yt, yp in zip(y_true, y_pred):\n    print(metric.update(yt, yp))\n
    Precision: 100.00%\nPrecision: 50.00%\nPrecision: 50.00%\nPrecision: 66.67%\nPrecision: 75.00%\n

    "},{"location":"api/metrics/Precision/#methods","title":"Methods","text":"get

    Return the current value of the metric.

    is_better_than

    Indicate if the current metric is better than another one.

    Parameters

    • other

    revert

    Revert the metric.

    Parameters

    • y_true \u2014 'bool'
    • y_pred \u2014 'bool | float | dict[bool, float]'
    • sample_weight \u2014 defaults to 1.0

    update

    Update the metric.

    Parameters

    • y_true \u2014 'bool'
    • y_pred \u2014 'bool | float | dict[bool, float]'
    • sample_weight \u2014 defaults to 1.0

    works_with

    Indicates whether or not a metric can work with a given model.

    Parameters

    • model \u2014 'base.Estimator'

    "},{"location":"api/metrics/R2/","title":"R2","text":"

    Coefficient of determination (\\(R^2\\)) score

    The coefficient of determination, denoted \\(R^2\\) or \\(r^2\\), is the proportion of the variance in the dependent variable that is predictable from the independent variable(s). 1

    Best possible score is 1.0 and it can be negative (because the model can be arbitrarily worse). A constant model that always predicts the expected value of \\(y\\), disregarding the input features, would get a \\(R^2\\) score of 0.0.

    \\(R^2\\) is not defined when less than 2 samples have been observed. This implementation returns 0.0 in this case.

    "},{"location":"api/metrics/R2/#attributes","title":"Attributes","text":"
    • bigger_is_better

      Indicate if a high value is better than a low one or not.

    • works_with_weights

      Indicate whether the model takes into consideration the effect of sample weights

    "},{"location":"api/metrics/R2/#examples","title":"Examples","text":"

    from river import metrics\n\ny_true = [3, -0.5, 2, 7]\ny_pred = [2.5, 0.0, 2, 8]\n\nmetric = metrics.R2()\n\nfor yt, yp in zip(y_true, y_pred):\n    print(metric.update(yt, yp).get())\n
    0.0\n0.9183\n0.9230\n0.9486\n

    "},{"location":"api/metrics/R2/#methods","title":"Methods","text":"get

    Return the current value of the metric.

    is_better_than

    Indicate if the current metric is better than another one.

    Parameters

    • other

    revert

    Revert the metric.

    Parameters

    • y_true \u2014 'numbers.Number'
    • y_pred \u2014 'numbers.Number'
    • sample_weight \u2014 defaults to 1.0

    update

    Update the metric.

    Parameters

    • y_true \u2014 'numbers.Number'
    • y_pred \u2014 'numbers.Number'
    • sample_weight \u2014 defaults to 1.0

    works_with

    Indicates whether or not a metric can work with a given model.

    Parameters

    • model \u2014 'base.Estimator'

    1. Coefficient of determination (Wikipedia) \u21a9

    "},{"location":"api/metrics/RMSE/","title":"RMSE","text":"

    Root mean squared error.

    "},{"location":"api/metrics/RMSE/#attributes","title":"Attributes","text":"
    • bigger_is_better

      Indicate if a high value is better than a low one or not.

    • works_with_weights

      Indicate whether the model takes into consideration the effect of sample weights

    "},{"location":"api/metrics/RMSE/#examples","title":"Examples","text":"

    from river import metrics\n\ny_true = [3, -0.5, 2, 7]\ny_pred = [2.5, 0.0, 2, 8]\n\nmetric = metrics.RMSE()\nfor yt, yp in zip(y_true, y_pred):\n    print(metric.update(yt, yp).get())\n
    0.5\n0.5\n0.408248\n0.612372\n

    metric\n
    RMSE: 0.612372\n

    "},{"location":"api/metrics/RMSE/#methods","title":"Methods","text":"get

    Return the current value of the metric.

    is_better_than

    Indicate if the current metric is better than another one.

    Parameters

    • other

    revert

    Revert the metric.

    Parameters

    • y_true \u2014 'numbers.Number'
    • y_pred \u2014 'numbers.Number'
    • sample_weight \u2014 defaults to 1.0

    update

    Update the metric.

    Parameters

    • y_true \u2014 'numbers.Number'
    • y_pred \u2014 'numbers.Number'
    • sample_weight \u2014 defaults to 1.0

    works_with

    Indicates whether or not a metric can work with a given model.

    Parameters

    • model

    "},{"location":"api/metrics/RMSLE/","title":"RMSLE","text":"

    Root mean squared logarithmic error.

    "},{"location":"api/metrics/RMSLE/#attributes","title":"Attributes","text":"
    • bigger_is_better

      Indicate if a high value is better than a low one or not.

    • works_with_weights

      Indicate whether the model takes into consideration the effect of sample weights

    "},{"location":"api/metrics/RMSLE/#examples","title":"Examples","text":"

    from river import metrics\n\ny_true = [3, -0.5, 2, 7]\ny_pred = [2.5, 0.0, 2, 8]\n\nmetric = metrics.RMSLE()\nfor yt, yp in zip(y_true, y_pred):\n    metric = metric.update(yt, yp)\n\nmetric\n
    RMSLE: 0.357826\n

    "},{"location":"api/metrics/RMSLE/#methods","title":"Methods","text":"get

    Return the current value of the metric.

    is_better_than

    Indicate if the current metric is better than another one.

    Parameters

    • other

    revert

    Revert the metric.

    Parameters

    • y_true \u2014 'numbers.Number'
    • y_pred \u2014 'numbers.Number'
    • sample_weight \u2014 defaults to 1.0

    update

    Update the metric.

    Parameters

    • y_true \u2014 'numbers.Number'
    • y_pred \u2014 'numbers.Number'
    • sample_weight \u2014 defaults to 1.0

    works_with

    Indicates whether or not a metric can work with a given model.

    Parameters

    • model

    "},{"location":"api/metrics/ROCAUC/","title":"ROCAUC","text":"

    Receiving Operating Characteristic Area Under the Curve.

    This metric is an approximation of the true ROC AUC. Computing the true ROC AUC would require storing all the predictions and ground truths, which isn't desirable. The approximation error is not significant as long as the predicted probabilities are well calibrated. In any case, this metric can still be used to reliably compare models between each other.

    "},{"location":"api/metrics/ROCAUC/#parameters","title":"Parameters","text":"
    • n_thresholds

      Default \u2192 10

      The number of thresholds used for discretizing the ROC curve. A higher value will lead to more accurate results, but will also cost more time and memory.

    • pos_val

      Default \u2192 True

      Value to treat as \"positive\".

    "},{"location":"api/metrics/ROCAUC/#attributes","title":"Attributes","text":"
    • bigger_is_better

      Indicate if a high value is better than a low one or not.

    • requires_labels

      Indicates if labels are required, rather than probabilities.

    • works_with_weights

      Indicate whether the model takes into consideration the effect of sample weights

    "},{"location":"api/metrics/ROCAUC/#examples","title":"Examples","text":"

    from river import metrics\n\ny_true = [ 0,  0,   1,  1]\ny_pred = [.1, .4, .35, .8]\n\nmetric = metrics.ROCAUC()\n\nfor yt, yp in zip(y_true, y_pred):\n    metric = metric.update(yt, yp)\n\nmetric\n
    ROCAUC: 87.50%\n

    The true ROC AUC is in fact 0.75. We can improve the accuracy by increasing the amount of thresholds. This comes at the cost more computation time and more memory usage.

    metric = metrics.ROCAUC(n_thresholds=20)\n\nfor yt, yp in zip(y_true, y_pred):\n    metric = metric.update(yt, yp)\n\nmetric\n
    ROCAUC: 75.00%\n

    "},{"location":"api/metrics/ROCAUC/#methods","title":"Methods","text":"get

    Return the current value of the metric.

    is_better_than

    Indicate if the current metric is better than another one.

    Parameters

    • other

    revert

    Revert the metric.

    Parameters

    • y_true \u2014 'bool'
    • y_pred \u2014 'bool | float | dict[bool, float]'
    • sample_weight \u2014 defaults to 1.0

    update

    Update the metric.

    Parameters

    • y_true \u2014 'bool'
    • y_pred \u2014 'bool | float | dict[bool, float]'
    • sample_weight \u2014 defaults to 1.0

    works_with

    Indicates whether or not a metric can work with a given model.

    Parameters

    • model \u2014 'base.Estimator'

    "},{"location":"api/metrics/Rand/","title":"Rand","text":"

    Rand Index.

    The Rand Index 1 2 is a measure of the similarity between two data clusterings. Given a set of elements S and two partitions of S to compare, X and Y, define the following:

    • a, the number of pairs of elements in S that are in the same subset in X and in the same subset in Y

    • b, the number of pairs of elements in S that are in the different subset in X and in different subsets in Y

    • c, the number of pairs of elements in S that are in the same subset in X and in different subsets in Y

    • d, the number of pairs of elements in S that are in the different subset in X and in the same subset in Y

    The Rand index, R, is

    \\[ R = \frac{a+b}{a+b+c+d} = \frac{a+b}{\frac{n(n-1)}{2}}. \\]"},{"location":"api/metrics/Rand/#parameters","title":"Parameters","text":"
    • cm

      Type \u2192 confusion.ConfusionMatrix | None

      Default \u2192 None

      This parameter allows sharing the same confusion matrix between multiple metrics. Sharing a confusion matrix reduces the amount of storage and computation time.

    "},{"location":"api/metrics/Rand/#attributes","title":"Attributes","text":"
    • bigger_is_better

      Indicate if a high value is better than a low one or not.

    • requires_labels

      Indicates if labels are required, rather than probabilities.

    • works_with_weights

      Indicate whether the model takes into consideration the effect of sample weights

    "},{"location":"api/metrics/Rand/#examples","title":"Examples","text":"

    from river import metrics\n\ny_true = [0, 0, 0, 1, 1, 1]\ny_pred = [0, 0, 1, 1, 2, 2]\n\nmetric = metrics.Rand()\n\nfor yt, yp in zip(y_true, y_pred):\n    metric = metric.update(yt, yp)\n\nmetric\n
    Rand: 0.666667\n

    "},{"location":"api/metrics/Rand/#methods","title":"Methods","text":"get

    Return the current value of the metric.

    is_better_than

    Indicate if the current metric is better than another one.

    Parameters

    • other

    revert

    Revert the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    update

    Update the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    works_with

    Indicates whether or not a metric can work with a given model.

    Parameters

    • model \u2014 'base.Estimator'

    1. Wikipedia contributors. (2021, January 13). Rand index. In Wikipedia, The Free Encyclopedia, from https://en.wikipedia.org/w/index.php?title=Rand_index&oldid=1000098911\u00a0\u21a9

    2. W. M. Rand (1971). \"Objective criteria for the evaluation of clustering methods\". Journal of the American Statistical Association. American Statistical Association. 66 (336): 846\u2013850. arXiv:1704.01036. doi:10.2307/2284239. JSTOR 2284239.\u00a0\u21a9

    "},{"location":"api/metrics/Recall/","title":"Recall","text":"

    Binary recall score.

    "},{"location":"api/metrics/Recall/#parameters","title":"Parameters","text":"
    • cm

      Default \u2192 None

      This parameter allows sharing the same confusion matrix between multiple metrics. Sharing a confusion matrix reduces the amount of storage and computation time.

    • pos_val

      Default \u2192 True

      Value to treat as \"positive\".

    "},{"location":"api/metrics/Recall/#attributes","title":"Attributes","text":"
    • bigger_is_better

      Indicate if a high value is better than a low one or not.

    • requires_labels

      Indicates if labels are required, rather than probabilities.

    • works_with_weights

      Indicate whether the model takes into consideration the effect of sample weights

    "},{"location":"api/metrics/Recall/#examples","title":"Examples","text":"

    from river import metrics\n\ny_true = [True, False, True, True, True]\ny_pred = [True, True, False, True, True]\n\nmetric = metrics.Recall()\n\nfor yt, yp in zip(y_true, y_pred):\n    print(metric.update(yt, yp))\n
    Recall: 100.00%\nRecall: 100.00%\nRecall: 50.00%\nRecall: 66.67%\nRecall: 75.00%\n

    "},{"location":"api/metrics/Recall/#methods","title":"Methods","text":"get

    Return the current value of the metric.

    is_better_than

    Indicate if the current metric is better than another one.

    Parameters

    • other

    revert

    Revert the metric.

    Parameters

    • y_true \u2014 'bool'
    • y_pred \u2014 'bool | float | dict[bool, float]'
    • sample_weight \u2014 defaults to 1.0

    update

    Update the metric.

    Parameters

    • y_true \u2014 'bool'
    • y_pred \u2014 'bool | float | dict[bool, float]'
    • sample_weight \u2014 defaults to 1.0

    works_with

    Indicates whether or not a metric can work with a given model.

    Parameters

    • model \u2014 'base.Estimator'

    "},{"location":"api/metrics/RollingROCAUC/","title":"RollingROCAUC","text":"

    Rolling version of the Receiving Operating Characteristic Area Under the Curve.

    The RollingROCAUC calculates the metric using the instances in its window of size S. It keeps a queue of the instances, when an instance is added and the queue length is equal to S, the last instance is removed. The metric has a tree with ordered instances, in order to calculate the AUC efficiently. It was implemented based on the algorithm presented in Brzezinski and Stefanowski, 2017.

    The difference between this metric and the standard ROCAUC is that the latter calculates an approximation of the real metric considering all data from the beginning of the stream, while the RollingROCAUC calculates the exact value considering only the last S instances. This approach may be beneficial if it's necessary to evaluate the model's performance over time, since calculating the metric using the entire stream may hide the current performance of the classifier.

    "},{"location":"api/metrics/RollingROCAUC/#parameters","title":"Parameters","text":"
    • window_size

      Default \u2192 1000

      The max length of the window.

    • pos_val

      Default \u2192 True

      Value to treat as \"positive\".

    "},{"location":"api/metrics/RollingROCAUC/#attributes","title":"Attributes","text":"
    • bigger_is_better

      Indicate if a high value is better than a low one or not.

    • requires_labels

      Indicates if labels are required, rather than probabilities.

    • works_with_weights

      Indicate whether the model takes into consideration the effect of sample weights

    "},{"location":"api/metrics/RollingROCAUC/#examples","title":"Examples","text":"

    from river import metrics\n\ny_true = [ 0,  1,  0,  1,  0,  1,  0,  0,   1,  1]\ny_pred = [.3, .5, .5, .7, .1, .3, .1, .4, .35, .8]\n\nmetric = metrics.RollingROCAUC(window_size=4)\n\nfor yt, yp in zip(y_true, y_pred):\n    metric = metric.update(yt, yp)\n\nmetric\n
    RollingROCAUC: 75.00%\n

    "},{"location":"api/metrics/RollingROCAUC/#methods","title":"Methods","text":"get

    Return the current value of the metric.

    is_better_than

    Indicate if the current metric is better than another one.

    Parameters

    • other

    revert

    Revert the metric.

    Parameters

    • y_true \u2014 'bool'
    • y_pred \u2014 'bool | float | dict[bool, float]'

    update

    Update the metric.

    Parameters

    • y_true \u2014 'bool'
    • y_pred \u2014 'bool | float | dict[bool, float]'

    works_with

    Indicates whether or not a metric can work with a given model.

    Parameters

    • model \u2014 'base.Estimator'

    "},{"location":"api/metrics/SMAPE/","title":"SMAPE","text":"

    Symmetric mean absolute percentage error.

    "},{"location":"api/metrics/SMAPE/#attributes","title":"Attributes","text":"
    • bigger_is_better

      Indicate if a high value is better than a low one or not.

    • works_with_weights

      Indicate whether the model takes into consideration the effect of sample weights

    "},{"location":"api/metrics/SMAPE/#examples","title":"Examples","text":"

    from river import metrics\n\ny_true = [0, 0.07533, 0.07533, 0.07533, 0.07533, 0.07533, 0.07533, 0.0672, 0.0672]\ny_pred = [0, 0.102, 0.107, 0.047, 0.1, 0.032, 0.047, 0.108, 0.089]\n\nmetric = metrics.SMAPE()\nfor yt, yp in zip(y_true, y_pred):\n    metric = metric.update(yt, yp)\n\nmetric\n
    SMAPE: 37.869392\n

    "},{"location":"api/metrics/SMAPE/#methods","title":"Methods","text":"get

    Return the current value of the metric.

    is_better_than

    Indicate if the current metric is better than another one.

    Parameters

    • other

    revert

    Revert the metric.

    Parameters

    • y_true \u2014 'numbers.Number'
    • y_pred \u2014 'numbers.Number'
    • sample_weight \u2014 defaults to 1.0

    update

    Update the metric.

    Parameters

    • y_true \u2014 'numbers.Number'
    • y_pred \u2014 'numbers.Number'
    • sample_weight \u2014 defaults to 1.0

    works_with

    Indicates whether or not a metric can work with a given model.

    Parameters

    • model

    "},{"location":"api/metrics/Silhouette/","title":"Silhouette","text":"

    Silhouette coefficient 1, roughly speaking, is the ratio between cohesion and the average distances from the points to their second-closest centroid. It rewards the clustering algorithm where points are very close to their assigned centroids and far from any other centroids, that is, clustering results with good cohesion and good separation.

    It rewards clusterings where points are very close to their assigned centroids and far from any other centroids, that is clusterings with good cohesion and good separation. 2

    The definition of Silhouette coefficient for online clustering evaluation is different from that of batch learning. It does not store information and calculate pairwise distances between all points at the same time, since the practice is too expensive for an incremental metric.

    "},{"location":"api/metrics/Silhouette/#attributes","title":"Attributes","text":"
    • bigger_is_better

      Indicates if a high value is better than a low one or not.

    "},{"location":"api/metrics/Silhouette/#examples","title":"Examples","text":"

    from river import cluster\nfrom river import stream\nfrom river import metrics\n\nX = [\n    [1, 2],\n    [1, 4],\n    [1, 0],\n    [4, 2],\n    [4, 4],\n    [4, 0],\n    [-2, 2],\n    [-2, 4],\n    [-2, 0]\n]\n\nk_means = cluster.KMeans(n_clusters=3, halflife=0.4, sigma=3, seed=0)\nmetric = metrics.Silhouette()\n\nfor x, _ in stream.iter_array(X):\n    k_means = k_means.learn_one(x)\n    y_pred = k_means.predict_one(x)\n    metric = metric.update(x, y_pred, k_means.centers)\n\nmetric\n
    Silhouette: 0.568058\n

    "},{"location":"api/metrics/Silhouette/#methods","title":"Methods","text":"get

    Return the current value of the metric.

    revert

    Revert the metric.

    Parameters

    • x
    • y_pred
    • centers
    • sample_weight \u2014 defaults to 1.0

    update

    Update the metric.

    Parameters

    • x
    • y_pred
    • centers
    • sample_weight \u2014 defaults to 1.0

    works_with

    Indicates whether or not a metric can work with a given model.

    Parameters

    • model \u2014 'base.Estimator'

    1. Rousseeuw, P. (1987). Silhouettes: a graphical aid to the intepretation and validation of cluster analysis 20, 53 - 65. DOI: 10.1016/0377-0427(87)90125-7\u00a0\u21a9

    2. Bifet, A. et al. (2018). \"Machine Learning for Data Streams\". DOI: 10.7551/mitpress/10654.001.0001.\u00a0\u21a9

    "},{"location":"api/metrics/VBeta/","title":"VBeta","text":"

    VBeta.

    VBeta (or V-Measure) 1 is an external entropy-based cluster evaluation measure. It provides an elegant solution to many problems that affect previously defined cluster evaluation measures including

    • Dependance of clustering algorithm or dataset,

    • The \"problem of matching\", where the clustering of only a portion of data points are evaluated, and

    • Accurate evaluation and combination of two desirable aspects of clustering, homogeneity and completeness.

    Based upon the calculations of homogeneity and completeness, a clustering solution's V-measure is calculated by computing the weighted harmonic mean of homogeneity and completeness,

    \\[ V_{\\beta} = \\frac{(1 + \\beta) \\times h \\times c}{\\beta \\times h + c}. \\]"},{"location":"api/metrics/VBeta/#parameters","title":"Parameters","text":"
    • beta

      Type \u2192 float

      Default \u2192 1.0

      Weight of Homogeneity in the harmonic mean.

    • cm

      Default \u2192 None

      This parameter allows sharing the same confusion matrix between multiple metrics. Sharing a confusion matrix reduces the amount of storage and computation time.

    "},{"location":"api/metrics/VBeta/#attributes","title":"Attributes","text":"
    • bigger_is_better

      Indicate if a high value is better than a low one or not.

    • requires_labels

      Indicates if labels are required, rather than probabilities.

    • works_with_weights

      Indicate whether the model takes into consideration the effect of sample weights

    "},{"location":"api/metrics/VBeta/#examples","title":"Examples","text":"

    from river import metrics\n\ny_true = [1, 1, 2, 2, 3, 3]\ny_pred = [1, 1, 1, 2, 2, 2]\n\nmetric = metrics.VBeta(beta=1.0)\nfor yt, yp in zip(y_true, y_pred):\n    print(metric.update(yt, yp).get())\n
    1.0\n1.0\n0.0\n0.3437110184854507\n0.4580652856440158\n0.5158037429793888\n

    metric\n
    VBeta: 51.58%\n

    "},{"location":"api/metrics/VBeta/#methods","title":"Methods","text":"get

    Return the current value of the metric.

    is_better_than

    Indicate if the current metric is better than another one.

    Parameters

    • other

    revert

    Revert the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    update

    Update the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    works_with

    Indicates whether or not a metric can work with a given model.

    Parameters

    • model \u2014 'base.Estimator'

    1. Andrew Rosenberg and Julia Hirschberg (2007). V-Measure: A conditional entropy-based external cluster evaluation measure. Proceedings of the 2007 Joing Conference on Empirical Methods in Natural Language Processing and Computational Natural Language Learning, pp. 410 - 420, Prague, June 2007.\u00a0\u21a9

    "},{"location":"api/metrics/WeightedF1/","title":"WeightedF1","text":"

    Weighted-average F1 score.

    This works by computing the F1 score per class, and then performs a global weighted average by using the support of each class.

    "},{"location":"api/metrics/WeightedF1/#parameters","title":"Parameters","text":"
    • cm

      Default \u2192 None

      This parameter allows sharing the same confusion matrix between multiple metrics. Sharing a confusion matrix reduces the amount of storage and computation time.

    "},{"location":"api/metrics/WeightedF1/#attributes","title":"Attributes","text":"
    • bigger_is_better

      Indicate if a high value is better than a low one or not.

    • requires_labels

      Indicates if labels are required, rather than probabilities.

    • works_with_weights

      Indicate whether the model takes into consideration the effect of sample weights

    "},{"location":"api/metrics/WeightedF1/#examples","title":"Examples","text":"

    from river import metrics\n\ny_true = [0, 1, 2, 2, 2]\ny_pred = [0, 0, 2, 2, 1]\n\nmetric = metrics.WeightedF1()\n\nfor yt, yp in zip(y_true, y_pred):\n    print(metric.update(yt, yp))\n
    WeightedF1: 100.00%\nWeightedF1: 33.33%\nWeightedF1: 55.56%\nWeightedF1: 66.67%\nWeightedF1: 61.33%\n

    "},{"location":"api/metrics/WeightedF1/#methods","title":"Methods","text":"get

    Return the current value of the metric.

    is_better_than

    Indicate if the current metric is better than another one.

    Parameters

    • other

    revert

    Revert the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    update

    Update the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    works_with

    Indicates whether or not a metric can work with a given model.

    Parameters

    • model \u2014 'base.Estimator'

    "},{"location":"api/metrics/WeightedFBeta/","title":"WeightedFBeta","text":"

    Weighted-average F-Beta score.

    This works by computing the F-Beta score per class, and then performs a global weighted average according to the support of each class.

    "},{"location":"api/metrics/WeightedFBeta/#parameters","title":"Parameters","text":"
    • beta

      Weight of precision in the harmonic mean.

    • cm

      Default \u2192 None

      This parameter allows sharing the same confusion matrix between multiple metrics. Sharing a confusion matrix reduces the amount of storage and computation time.

    "},{"location":"api/metrics/WeightedFBeta/#attributes","title":"Attributes","text":"
    • bigger_is_better

      Indicate if a high value is better than a low one or not.

    • requires_labels

      Indicates if labels are required, rather than probabilities.

    • works_with_weights

      Indicate whether the model takes into consideration the effect of sample weights

    "},{"location":"api/metrics/WeightedFBeta/#examples","title":"Examples","text":"

    from river import metrics\n\ny_true = [0, 1, 2, 2, 2]\ny_pred = [0, 0, 2, 2, 1]\n\nmetric = metrics.WeightedFBeta(beta=0.8)\n\nfor yt, yp in zip(y_true, y_pred):\n    print(metric.update(yt, yp))\n
    WeightedFBeta: 100.00%\nWeightedFBeta: 31.06%\nWeightedFBeta: 54.04%\nWeightedFBeta: 65.53%\nWeightedFBeta: 62.63%\n

    "},{"location":"api/metrics/WeightedFBeta/#methods","title":"Methods","text":"get

    Return the current value of the metric.

    is_better_than

    Indicate if the current metric is better than another one.

    Parameters

    • other

    revert

    Revert the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    update

    Update the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    works_with

    Indicates whether or not a metric can work with a given model.

    Parameters

    • model \u2014 'base.Estimator'

    "},{"location":"api/metrics/WeightedJaccard/","title":"WeightedJaccard","text":"

    Weighted average Jaccard score.

    "},{"location":"api/metrics/WeightedJaccard/#parameters","title":"Parameters","text":"
    • cm

      Type \u2192 confusion.ConfusionMatrix | None

      Default \u2192 None

      This parameter allows sharing the same confusion matrix between multiple metrics. Sharing a confusion matrix reduces the amount of storage and computation time.

    "},{"location":"api/metrics/WeightedJaccard/#attributes","title":"Attributes","text":"
    • bigger_is_better

      Indicate if a high value is better than a low one or not.

    • requires_labels

      Indicates if labels are required, rather than probabilities.

    • works_with_weights

      Indicate whether the model takes into consideration the effect of sample weights

    "},{"location":"api/metrics/WeightedJaccard/#examples","title":"Examples","text":"

    from river import metrics\n\ny_true = [0, 1, 2, 2, 2]\ny_pred = [0, 0, 2, 2, 1]\n\nmetric = metrics.WeightedJaccard()\n\nfor yt, yp in zip(y_true, y_pred):\n    print(metric.update(yt, yp))\n
    WeightedJaccard: 100.00%\nWeightedJaccard: 25.00%\nWeightedJaccard: 50.00%\nWeightedJaccard: 62.50%\nWeightedJaccard: 50.00%\n

    "},{"location":"api/metrics/WeightedJaccard/#methods","title":"Methods","text":"get

    Return the current value of the metric.

    is_better_than

    Indicate if the current metric is better than another one.

    Parameters

    • other

    revert

    Revert the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    update

    Update the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    works_with

    Indicates whether or not a metric can work with a given model.

    Parameters

    • model \u2014 'base.Estimator'

    "},{"location":"api/metrics/WeightedPrecision/","title":"WeightedPrecision","text":"

    Weighted-average precision score.

    This uses the support of each label to compute an average score, whereas metrics.MacroPrecision ignores the support.

    "},{"location":"api/metrics/WeightedPrecision/#parameters","title":"Parameters","text":"
    • cm

      Type \u2192 confusion.ConfusionMatrix | None

      Default \u2192 None

      This parameter allows sharing the same confusion matrix between multiple metrics. Sharing a confusion matrix reduces the amount of storage and computation time.

    "},{"location":"api/metrics/WeightedPrecision/#attributes","title":"Attributes","text":"
    • bigger_is_better

      Indicate if a high value is better than a low one or not.

    • requires_labels

      Indicates if labels are required, rather than probabilities.

    • works_with_weights

      Indicate whether the model takes into consideration the effect of sample weights

    "},{"location":"api/metrics/WeightedPrecision/#examples","title":"Examples","text":"

    from river import metrics\n\ny_true = [0, 1, 2, 2, 2]\ny_pred = [0, 0, 2, 2, 1]\n\nmetric = metrics.WeightedPrecision()\n\nfor yt, yp in zip(y_true, y_pred):\n    print(metric.update(yt, yp))\n
    WeightedPrecision: 100.00%\nWeightedPrecision: 25.00%\nWeightedPrecision: 50.00%\nWeightedPrecision: 62.50%\nWeightedPrecision: 70.00%\n

    "},{"location":"api/metrics/WeightedPrecision/#methods","title":"Methods","text":"get

    Return the current value of the metric.

    is_better_than

    Indicate if the current metric is better than another one.

    Parameters

    • other

    revert

    Revert the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    update

    Update the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    works_with

    Indicates whether or not a metric can work with a given model.

    Parameters

    • model \u2014 'base.Estimator'

    "},{"location":"api/metrics/WeightedRecall/","title":"WeightedRecall","text":"

    Weighted-average recall score.

    This uses the support of each label to compute an average score, whereas MacroRecall ignores the support.

    "},{"location":"api/metrics/WeightedRecall/#parameters","title":"Parameters","text":"
    • cm

      Type \u2192 confusion.ConfusionMatrix | None

      Default \u2192 None

      This parameter allows sharing the same confusion matrix between multiple metrics. Sharing a confusion matrix reduces the amount of storage and computation time.

    "},{"location":"api/metrics/WeightedRecall/#attributes","title":"Attributes","text":"
    • bigger_is_better

      Indicate if a high value is better than a low one or not.

    • requires_labels

      Indicates if labels are required, rather than probabilities.

    • works_with_weights

      Indicate whether the model takes into consideration the effect of sample weights

    "},{"location":"api/metrics/WeightedRecall/#examples","title":"Examples","text":"

    from river import metrics\n\ny_true = [0, 1, 2, 2, 2]\ny_pred = [0, 0, 2, 2, 1]\n\nmetric = metrics.WeightedRecall()\n\nfor yt, yp in zip(y_true, y_pred):\n    print(metric.update(yt, yp))\n
    WeightedRecall: 100.00%\nWeightedRecall: 50.00%\nWeightedRecall: 66.67%\nWeightedRecall: 75.00%\nWeightedRecall: 60.00%\n

    "},{"location":"api/metrics/WeightedRecall/#methods","title":"Methods","text":"get

    Return the current value of the metric.

    is_better_than

    Indicate if the current metric is better than another one.

    Parameters

    • other

    revert

    Revert the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    update

    Update the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    works_with

    Indicates whether or not a metric can work with a given model.

    Parameters

    • model \u2014 'base.Estimator'

    "},{"location":"api/metrics/base/BinaryMetric/","title":"BinaryMetric","text":"

    Mother class for all binary classification metrics.

    "},{"location":"api/metrics/base/BinaryMetric/#parameters","title":"Parameters","text":"
    • cm

      Default \u2192 None

      This parameter allows sharing the same confusion matrix between multiple metrics. Sharing a confusion matrix reduces the amount of storage and computation time.

    • pos_val

      Default \u2192 True

      Value to treat as \"positive\".

    "},{"location":"api/metrics/base/BinaryMetric/#attributes","title":"Attributes","text":"
    • bigger_is_better

      Indicate if a high value is better than a low one or not.

    • requires_labels

      Indicates if labels are required, rather than probabilities.

    • works_with_weights

      Indicate whether the model takes into consideration the effect of sample weights

    "},{"location":"api/metrics/base/BinaryMetric/#methods","title":"Methods","text":"get

    Return the current value of the metric.

    is_better_than

    Indicate if the current metric is better than another one.

    Parameters

    • other

    revert

    Revert the metric.

    Parameters

    • y_true \u2014 'bool'
    • y_pred \u2014 'bool | float | dict[bool, float]'
    • sample_weight \u2014 defaults to 1.0

    update

    Update the metric.

    Parameters

    • y_true \u2014 'bool'
    • y_pred \u2014 'bool | float | dict[bool, float]'
    • sample_weight \u2014 defaults to 1.0

    works_with

    Indicates whether or not a metric can work with a given model.

    Parameters

    • model \u2014 'base.Estimator'

    "},{"location":"api/metrics/base/ClassificationMetric/","title":"ClassificationMetric","text":"

    Mother class for all classification metrics.

    "},{"location":"api/metrics/base/ClassificationMetric/#parameters","title":"Parameters","text":"
    • cm

      Type \u2192 confusion.ConfusionMatrix | None

      Default \u2192 None

      This parameter allows sharing the same confusion matrix between multiple metrics. Sharing a confusion matrix reduces the amount of storage and computation time.

    "},{"location":"api/metrics/base/ClassificationMetric/#attributes","title":"Attributes","text":"
    • bigger_is_better

      Indicate if a high value is better than a low one or not.

    • requires_labels

      Indicates if labels are required, rather than probabilities.

    • works_with_weights

      Indicate whether the model takes into consideration the effect of sample weights

    "},{"location":"api/metrics/base/ClassificationMetric/#methods","title":"Methods","text":"get

    Return the current value of the metric.

    is_better_than

    Indicate if the current metric is better than another one.

    Parameters

    • other

    revert

    Revert the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    update

    Update the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    works_with

    Indicates whether or not a metric can work with a given model.

    Parameters

    • model \u2014 'base.Estimator'

    "},{"location":"api/metrics/base/Metric/","title":"Metric","text":"

    Mother class for all metrics.

    "},{"location":"api/metrics/base/Metric/#attributes","title":"Attributes","text":"
    • bigger_is_better

      Indicate if a high value is better than a low one or not.

    • works_with_weights

      Indicate whether the model takes into consideration the effect of sample weights

    "},{"location":"api/metrics/base/Metric/#methods","title":"Methods","text":"get

    Return the current value of the metric.

    is_better_than

    Indicate if the current metric is better than another one.

    Parameters

    • other

    revert

    Revert the metric.

    Parameters

    • y_true
    • y_pred

    update

    Update the metric.

    Parameters

    • y_true
    • y_pred

    works_with

    Indicates whether or not a metric can work with a given model.

    Parameters

    • model \u2014 'base.Estimator'

    "},{"location":"api/metrics/base/Metrics/","title":"Metrics","text":"

    A container class for handling multiple metrics at once.

    "},{"location":"api/metrics/base/Metrics/#parameters","title":"Parameters","text":"
    • metrics

    • str_sep

      Default \u2192 ,

    "},{"location":"api/metrics/base/Metrics/#attributes","title":"Attributes","text":"
    • bigger_is_better

      Indicate if a high value is better than a low one or not.

    • requires_labels

    • works_with_weights

      Indicate whether the model takes into consideration the effect of sample weights

    "},{"location":"api/metrics/base/Metrics/#methods","title":"Methods","text":"is_better_than

    Indicate if the current metric is better than another one.

    Parameters

    • other

    revert

    Revert the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    works_with

    Indicates whether or not a metric can work with a given model.

    Parameters

    • model \u2014 'base.Estimator'

    "},{"location":"api/metrics/base/MultiClassMetric/","title":"MultiClassMetric","text":"

    Mother class for all multi-class classification metrics.

    "},{"location":"api/metrics/base/MultiClassMetric/#parameters","title":"Parameters","text":"
    • cm

      Type \u2192 confusion.ConfusionMatrix | None

      Default \u2192 None

      This parameter allows sharing the same confusion matrix between multiple metrics. Sharing a confusion matrix reduces the amount of storage and computation time.

    "},{"location":"api/metrics/base/MultiClassMetric/#attributes","title":"Attributes","text":"
    • bigger_is_better

      Indicate if a high value is better than a low one or not.

    • requires_labels

      Indicates if labels are required, rather than probabilities.

    • works_with_weights

      Indicate whether the model takes into consideration the effect of sample weights

    "},{"location":"api/metrics/base/MultiClassMetric/#methods","title":"Methods","text":"get

    Return the current value of the metric.

    is_better_than

    Indicate if the current metric is better than another one.

    Parameters

    • other

    revert

    Revert the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    update

    Update the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    works_with

    Indicates whether or not a metric can work with a given model.

    Parameters

    • model \u2014 'base.Estimator'

    "},{"location":"api/metrics/base/RegressionMetric/","title":"RegressionMetric","text":"

    Mother class for all regression metrics.

    "},{"location":"api/metrics/base/RegressionMetric/#attributes","title":"Attributes","text":"
    • bigger_is_better

      Indicate if a high value is better than a low one or not.

    • works_with_weights

      Indicate whether the model takes into consideration the effect of sample weights

    "},{"location":"api/metrics/base/RegressionMetric/#methods","title":"Methods","text":"get

    Return the current value of the metric.

    is_better_than

    Indicate if the current metric is better than another one.

    Parameters

    • other

    revert

    Revert the metric.

    Parameters

    • y_true \u2014 'numbers.Number'
    • y_pred \u2014 'numbers.Number'

    update

    Update the metric.

    Parameters

    • y_true \u2014 'numbers.Number'
    • y_pred \u2014 'numbers.Number'

    works_with

    Indicates whether or not a metric can work with a given model.

    Parameters

    • model \u2014 'base.Estimator'

    "},{"location":"api/metrics/base/WrapperMetric/","title":"WrapperMetric","text":""},{"location":"api/metrics/base/WrapperMetric/#attributes","title":"Attributes","text":"
    • bigger_is_better

      Indicate if a high value is better than a low one or not.

    • metric

      Gives access to the wrapped metric.

    • requires_labels

    • works_with_weights

      Indicate whether the model takes into consideration the effect of sample weights

    "},{"location":"api/metrics/base/WrapperMetric/#methods","title":"Methods","text":"get

    Return the current value of the metric.

    is_better_than

    Indicate if the current metric is better than another one.

    Parameters

    • other

    revert

    Revert the metric.

    Parameters

    • y_true
    • y_pred

    update

    Update the metric.

    Parameters

    • y_true
    • y_pred

    works_with

    Indicates whether or not a metric can work with a given model.

    Parameters

    • model \u2014 'base.Estimator'

    "},{"location":"api/metrics/multioutput/ExactMatch/","title":"ExactMatch","text":"

    Exact match score.

    This is the most strict multi-label metric, defined as the number of samples that have all their labels correctly classified, divided by the total number of samples.

    "},{"location":"api/metrics/multioutput/ExactMatch/#attributes","title":"Attributes","text":"
    • bigger_is_better

      Indicate if a high value is better than a low one or not.

    • requires_labels

    • works_with_weights

      Indicate whether the model takes into consideration the effect of sample weights

    "},{"location":"api/metrics/multioutput/ExactMatch/#examples","title":"Examples","text":"

    from river import metrics\n\ny_true = [\n    {0: False, 1: True, 2: True},\n    {0: True, 1: True, 2: False},\n    {0: True, 1: True, 2: False},\n]\n\ny_pred = [\n    {0: True, 1: True, 2: True},\n    {0: True, 1: False, 2: False},\n    {0: True, 1: True, 2: False},\n]\n\nmetric = metrics.multioutput.ExactMatch()\nfor yt, yp in zip(y_true, y_pred):\n    metric = metric.update(yt, yp)\n\nmetric\n
    ExactMatch: 33.33%\n

    "},{"location":"api/metrics/multioutput/ExactMatch/#methods","title":"Methods","text":"get

    Return the current value of the metric.

    is_better_than

    Indicate if the current metric is better than another one.

    Parameters

    • other

    revert

    Revert the metric.

    Parameters

    • y_true \u2014 'dict[str | int, base.typing.ClfTarget]'
    • y_pred \u2014 'dict[str | int, base.typing.ClfTarget] | dict[str | int, dict[base.typing.ClfTarget, float]]'
    • sample_weight \u2014 defaults to 1.0

    update

    Update the metric.

    Parameters

    • y_true \u2014 'dict[str | int, base.typing.ClfTarget]'
    • y_pred \u2014 'dict[str | int, base.typing.ClfTarget] | dict[str | int, dict[base.typing.ClfTarget, float]]'
    • sample_weight \u2014 defaults to 1.0

    works_with

    Indicates whether or not a metric can work with a given model.

    Parameters

    • model

    "},{"location":"api/metrics/multioutput/MacroAverage/","title":"MacroAverage","text":"

    Macro-average wrapper.

    A copy of the provided metric is made for each output. The arithmetic average of all the metrics is returned.

    "},{"location":"api/metrics/multioutput/MacroAverage/#parameters","title":"Parameters","text":"
    • metric

      A classification or a regression metric.

    "},{"location":"api/metrics/multioutput/MacroAverage/#attributes","title":"Attributes","text":"
    • bigger_is_better

      Indicate if a high value is better than a low one or not.

    • metric

      Gives access to the wrapped metric.

    • requires_labels

    • works_with_weights

      Indicate whether the model takes into consideration the effect of sample weights

    "},{"location":"api/metrics/multioutput/MacroAverage/#methods","title":"Methods","text":"get

    Return the current value of the metric.

    is_better_than

    Indicate if the current metric is better than another one.

    Parameters

    • other

    revert

    Revert the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    update

    Update the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    works_with

    Indicates whether or not a metric can work with a given model.

    Parameters

    • model \u2014 'base.Estimator'

    "},{"location":"api/metrics/multioutput/MicroAverage/","title":"MicroAverage","text":"

    Micro-average wrapper.

    The provided metric is updated with the value of each output.

    "},{"location":"api/metrics/multioutput/MicroAverage/#parameters","title":"Parameters","text":"
    • metric

      A classification or a regression metric.

    "},{"location":"api/metrics/multioutput/MicroAverage/#attributes","title":"Attributes","text":"
    • bigger_is_better

      Indicate if a high value is better than a low one or not.

    • metric

      Gives access to the wrapped metric.

    • requires_labels

    • works_with_weights

      Indicate whether the model takes into consideration the effect of sample weights

    "},{"location":"api/metrics/multioutput/MicroAverage/#methods","title":"Methods","text":"get

    Return the current value of the metric.

    is_better_than

    Indicate if the current metric is better than another one.

    Parameters

    • other

    revert

    Revert the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    update

    Update the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    works_with

    Indicates whether or not a metric can work with a given model.

    Parameters

    • model \u2014 'base.Estimator'

    "},{"location":"api/metrics/multioutput/MultiLabelConfusionMatrix/","title":"MultiLabelConfusionMatrix","text":"

    Multi-label confusion matrix.

    Under the hood, this stores one metrics.ConfusionMatrix for each output.

    "},{"location":"api/metrics/multioutput/MultiLabelConfusionMatrix/#examples","title":"Examples","text":"

    from river import metrics\n\ncm = metrics.multioutput.MultiLabelConfusionMatrix()\n\ny_true = [\n    {0: False, 1: True, 2: True},\n    {0: True, 1: True, 2: False}\n]\n\ny_pred = [\n    {0: True, 1: True, 2: True},\n    {0: True, 1: False, 2: False}\n]\n\nfor yt, yp in zip(y_true, y_pred):\n    cm = cm.update(yt, yp)\n\ncm\n
    0\n            False   True\n    False       0      1\n     True       0      1\n<BLANKLINE>\n1\n            False   True\n    False       0      0\n     True       1      1\n<BLANKLINE>\n2\n            False   True\n    False       1      0\n     True       0      1\n

    "},{"location":"api/metrics/multioutput/MultiLabelConfusionMatrix/#methods","title":"Methods","text":"revert update"},{"location":"api/metrics/multioutput/PerOutput/","title":"PerOutput","text":"

    Per-output wrapper.

    A copy of the metric is maintained for each output.

    "},{"location":"api/metrics/multioutput/PerOutput/#parameters","title":"Parameters","text":"
    • metric

      A classification or a regression metric.

    "},{"location":"api/metrics/multioutput/PerOutput/#attributes","title":"Attributes","text":"
    • bigger_is_better

      Indicate if a high value is better than a low one or not.

    • metric

      Gives access to the wrapped metric.

    • requires_labels

    • works_with_weights

      Indicate whether the model takes into consideration the effect of sample weights

    "},{"location":"api/metrics/multioutput/PerOutput/#methods","title":"Methods","text":"get

    Return the current value of the metric.

    is_better_than

    Indicate if the current metric is better than another one.

    Parameters

    • other

    revert

    Revert the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    update

    Update the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    works_with

    Indicates whether or not a metric can work with a given model.

    Parameters

    • model \u2014 'base.Estimator'

    "},{"location":"api/metrics/multioutput/SampleAverage/","title":"SampleAverage","text":"

    Sample-average wrapper.

    The provided metric is evaluate on each sample. The arithmetic average over all the samples is returned. This is equivalent to using average='samples' in scikit-learn.

    "},{"location":"api/metrics/multioutput/SampleAverage/#parameters","title":"Parameters","text":"
    • metric

      A classification or a regression metric.

    "},{"location":"api/metrics/multioutput/SampleAverage/#attributes","title":"Attributes","text":"
    • bigger_is_better

      Indicate if a high value is better than a low one or not.

    • metric

      Gives access to the wrapped metric.

    • requires_labels

    • works_with_weights

      Indicate whether the model takes into consideration the effect of sample weights

    "},{"location":"api/metrics/multioutput/SampleAverage/#examples","title":"Examples","text":"

    from river import metrics\n\ny_true = [\n    {0: False, 1: True, 2: True},\n    {0: True, 1: True, 2: False}\n]\ny_pred = [\n    {0: True, 1: True, 2: True},\n    {0: True, 1: False, 2: False}\n]\n\nsample_jaccard = metrics.multioutput.SampleAverage(metrics.Jaccard())\n\nfor yt, yp in zip(y_true, y_pred):\n    sample_jaccard = sample_jaccard.update(yt, yp)\nsample_jaccard\n
    SampleAverage(Jaccard): 58.33%\n

    "},{"location":"api/metrics/multioutput/SampleAverage/#methods","title":"Methods","text":"get

    Return the current value of the metric.

    is_better_than

    Indicate if the current metric is better than another one.

    Parameters

    • other

    revert

    Revert the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    update

    Update the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    works_with

    Indicates whether or not a metric can work with a given model.

    Parameters

    • model \u2014 'base.Estimator'

    "},{"location":"api/metrics/multioutput/base/MultiOutputClassificationMetric/","title":"MultiOutputClassificationMetric","text":"

    Mother class for all multi-output classification metrics.

    "},{"location":"api/metrics/multioutput/base/MultiOutputClassificationMetric/#parameters","title":"Parameters","text":"
    • cm

      Type \u2192 MultiLabelConfusionMatrix | None

      Default \u2192 None

      This parameter allows sharing the same confusion matrix between multiple metrics. Sharing a confusion matrix reduces the amount of storage and computation time.

    "},{"location":"api/metrics/multioutput/base/MultiOutputClassificationMetric/#attributes","title":"Attributes","text":"
    • bigger_is_better

      Indicate if a high value is better than a low one or not.

    • requires_labels

    • works_with_weights

      Indicate whether the model takes into consideration the effect of sample weights

    "},{"location":"api/metrics/multioutput/base/MultiOutputClassificationMetric/#methods","title":"Methods","text":"get

    Return the current value of the metric.

    is_better_than

    Indicate if the current metric is better than another one.

    Parameters

    • other

    revert

    Revert the metric.

    Parameters

    • y_true \u2014 'dict[str | int, base.typing.ClfTarget]'
    • y_pred \u2014 'dict[str | int, base.typing.ClfTarget] | dict[str | int, dict[base.typing.ClfTarget, float]]'
    • sample_weight \u2014 defaults to 1.0

    update

    Update the metric.

    Parameters

    • y_true \u2014 'dict[str | int, base.typing.ClfTarget]'
    • y_pred \u2014 'dict[str | int, base.typing.ClfTarget] | dict[str | int, dict[base.typing.ClfTarget, float]]'
    • sample_weight \u2014 defaults to 1.0

    works_with

    Indicates whether or not a metric can work with a given model.

    Parameters

    • model \u2014 'base.Estimator'

    "},{"location":"api/metrics/multioutput/base/MultiOutputRegressionMetric/","title":"MultiOutputRegressionMetric","text":"

    Mother class for all multi-output regression metrics.

    "},{"location":"api/metrics/multioutput/base/MultiOutputRegressionMetric/#attributes","title":"Attributes","text":"
    • bigger_is_better

      Indicate if a high value is better than a low one or not.

    • works_with_weights

      Indicate whether the model takes into consideration the effect of sample weights

    "},{"location":"api/metrics/multioutput/base/MultiOutputRegressionMetric/#methods","title":"Methods","text":"get

    Return the current value of the metric.

    is_better_than

    Indicate if the current metric is better than another one.

    Parameters

    • other

    revert

    Revert the metric.

    Parameters

    • y_true \u2014 'dict[str | int, float | int]'
    • y_pred \u2014 'dict[str | int, float | int]'

    update

    Update the metric.

    Parameters

    • y_true \u2014 'dict[str | int, float | int]'
    • y_pred \u2014 'dict[str | int, float | int]'

    works_with

    Indicates whether or not a metric can work with a given model.

    Parameters

    • model \u2014 'base.Estimator'

    "},{"location":"api/misc/SDFT/","title":"SDFT","text":"

    Sliding Discrete Fourier Transform (SDFT).

    Initially, the coefficients are all equal to 0, up until enough values have been seen. A call to numpy.fft.fft is triggered once window_size values have been seen. Subsequent values will update the coefficients online. This is much faster than recomputing an FFT from scratch for every new value.

    "},{"location":"api/misc/SDFT/#parameters","title":"Parameters","text":"
    • window_size

      The size of the window.

    "},{"location":"api/misc/SDFT/#attributes","title":"Attributes","text":"
    • window_size
    "},{"location":"api/misc/SDFT/#examples","title":"Examples","text":"
    import numpy as np\nfrom river import misc\n\nX = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n\nwindow_size = 5\nsdft = misc.SDFT(window_size)\n\nfor i, x in enumerate(X):\n    sdft = sdft.update(x)\n\n    if i + 1 >= window_size:\n        assert np.allclose(sdft.coefficients, np.fft.fft(X[i+1 - window_size:i+1]))\n
    "},{"location":"api/misc/SDFT/#methods","title":"Methods","text":"update
    1. Jacobsen, E. and Lyons, R., 2003. The sliding DFT. IEEE Signal Processing Magazine, 20(2), pp.74-80. \u21a9

    2. Understanding and Implementing the Sliding DFT \u21a9

    "},{"location":"api/misc/Skyline/","title":"Skyline","text":"

    A skyline is set of points which is not dominated by any other point.

    This implementation uses a block nested loop. Identical observations are all part of the skyline if applicable.

    "},{"location":"api/misc/Skyline/#parameters","title":"Parameters","text":"
    • minimize

      Type \u2192 list | None

      Default \u2192 None

      A list of features for which the values need to be minimized. Can be omitted as long as maximize is specified.

    • maximize

      Type \u2192 list | None

      Default \u2192 None

      A list of features for which the values need to be maximized. Can be omitted as long as minimize is specified.

    "},{"location":"api/misc/Skyline/#examples","title":"Examples","text":"

    Here is an example taken from this blog post.

    import random\nfrom river import misc\n\ncity_prices = {\n    'Bordeaux': 4045,\n    'Lyon': 4547,\n    'Toulouse': 3278\n}\n\ndef random_house():\n    city = random.choice(['Bordeaux', 'Lyon', 'Toulouse'])\n    size = round(random.gauss(200, 50))\n    price = round(random.uniform(0.8, 1.2) * city_prices[city] * size)\n    return {'city': city, 'size': size, 'price': price}\n\nskyline = misc.Skyline(minimize=['price'], maximize=['size'])\n\nrandom.seed(42)\n\nfor _ in range(100):\n    house = random_house()\n    skyline = skyline.update(house)\n\nprint(len(skyline))\n
    13\n

    print(skyline[0])\n
    {'city': 'Toulouse', 'size': 280, 'price': 763202}\n

    Here is another example using the kart data from Mario Kart: Double Dash!!.

    import collections\nfrom river import misc\n\nKart = collections.namedtuple(\n     'Kart',\n     'name speed off_road acceleration weight turbo'\n)\n\nkarts = [\n    Kart('Red Fire', 5, 4, 4, 5, 2),\n    Kart('Green Fire', 7, 3, 3, 4, 2),\n    Kart('Heart Coach', 4, 6, 6, 5, 2),\n    Kart('Bloom Coach', 6, 4, 5, 3, 2),\n    Kart('Turbo Yoshi', 4, 5, 6, 6, 2),\n    Kart('Turbo Birdo', 6, 4, 4, 7, 2),\n    Kart('Goo-Goo Buggy', 1, 9, 9, 2, 3),\n    Kart('Rattle Buggy', 2, 9, 8, 2, 3),\n    Kart('Toad Kart', 3, 9, 7, 2, 3),\n    Kart('Toadette Kart', 1, 9, 9, 2, 3),\n    Kart('Koopa Dasher', 2, 8, 8, 3, 3),\n    Kart('Para-Wing', 1, 8, 9, 3, 3),\n    Kart('DK Jumbo', 8, 2, 2, 8, 1),\n    Kart('Barrel Train', 8, 7, 3, 5, 3),\n    Kart('Koopa King', 9, 1, 1, 9, 1),\n    Kart('Bullet Blaster', 8, 1, 4, 1, 3),\n    Kart('Wario Car', 7, 3, 3, 7, 1),\n    Kart('Waluigi Racer', 5, 9, 5, 6, 2),\n    Kart('Piranha Pipes', 8, 7, 2, 9, 1),\n    Kart('Boo Pipes', 2, 9, 8, 9, 1),\n    Kart('Parade Kart', 7, 3, 4, 7, 3)\n]\n\nskyline = misc.Skyline(\n    maximize=['speed', 'off_road', 'acceleration', 'turbo'],\n    minimize=['weight']\n)\n\nfor kart in karts:\n    skyline = skyline.update(kart._asdict())\n\nbest_cart_names = [kart['name'] for kart in skyline]\nfor name in best_cart_names:\n    print(f'- {name}')\n
    - Green Fire\n- Heart Coach\n- Bloom Coach\n- Goo-Goo Buggy\n- Rattle Buggy\n- Toad Kart\n- Toadette Kart\n- Barrel Train\n- Koopa King\n- Bullet Blaster\n- Waluigi Racer\n- Parade Kart\n

    for name in sorted(set(kart.name for kart in karts) - set(best_cart_names)):\n    print(f'- {name}')\n
    - Boo Pipes\n- DK Jumbo\n- Koopa Dasher\n- Para-Wing\n- Piranha Pipes\n- Red Fire\n- Turbo Birdo\n- Turbo Yoshi\n- Wario Car\n

    "},{"location":"api/misc/Skyline/#methods","title":"Methods","text":"
    1. Skyline queries in Python \u21a9

    2. Borzsony, S., Kossmann, D. and Stocker, K., 2001, April. The skyline operator. In Proceedings 17th international conference on data engineering (pp. 421-430). IEEE. \u21a9

    3. Tao, Y. and Papadias, D., 2006. Maintaining sliding window skylines on data streams. IEEE Transactions on Knowledge and Data Engineering, 18(3), pp.377-391. \u21a9

    "},{"location":"api/model-selection/BanditClassifier/","title":"BanditClassifier","text":"

    Bandit-based model selection for classification.

    Each model is associated with an arm. At each learn_one call, the policy decides which arm/model to pull. The reward is the performance of the model on the provided sample. The predict_one and predict_proba_one methods use the current best model.

    "},{"location":"api/model-selection/BanditClassifier/#parameters","title":"Parameters","text":"
    • models

      The models to select from.

    • metric

      Type \u2192 metrics.base.ClassificationMetric

      The metric that is used to measure the performance of each model.

    • policy

      Type \u2192 bandit.base.Policy

      The bandit policy to use.

    "},{"location":"api/model-selection/BanditClassifier/#attributes","title":"Attributes","text":"
    • best_model

    • models

    "},{"location":"api/model-selection/BanditClassifier/#examples","title":"Examples","text":"

    from river import bandit\nfrom river import datasets\nfrom river import evaluate\nfrom river import linear_model\nfrom river import metrics\nfrom river import model_selection\nfrom river import optim\nfrom river import preprocessing\n\nmodels = [\n    linear_model.LogisticRegression(optimizer=optim.SGD(lr=lr))\n    for lr in [0.0001, 0.001, 1e-05, 0.01]\n]\n\ndataset = datasets.Phishing()\nmodel = (\n    preprocessing.StandardScaler() |\n    model_selection.BanditClassifier(\n        models,\n        metric=metrics.Accuracy(),\n        policy=bandit.EpsilonGreedy(\n            epsilon=0.1,\n            decay=0.001,\n            burn_in=20,\n            seed=42\n        )\n    )\n)\nmetric = metrics.Accuracy()\n\nevaluate.progressive_val_score(dataset, model, metric)\n
    Accuracy: 88.96%\n

    "},{"location":"api/model-selection/BanditClassifier/#methods","title":"Methods","text":"learn_one

    Update the model with a set of features x and a label y.

    Parameters

    • x
    • y

    Returns

    self

    predict_one

    Predict the label of a set of features x.

    Parameters

    • x \u2014 'dict'
    • kwargs

    Returns

    base.typing.ClfTarget | None: The predicted label.

    predict_proba_one

    Predict the probability of each label for a dictionary of features x.

    Parameters

    • x

    Returns

    A dictionary that associates a probability which each label.

    "},{"location":"api/model-selection/BanditRegressor/","title":"BanditRegressor","text":"

    Bandit-based model selection for regression.

    Each model is associated with an arm. At each learn_one call, the policy decides which arm/model to pull. The reward is the performance of the model on the provided sample. The predict_one method uses the current best model.

    "},{"location":"api/model-selection/BanditRegressor/#parameters","title":"Parameters","text":"
    • models

      The models to select from.

    • metric

      Type \u2192 metrics.base.RegressionMetric

      The metric that is used to measure the performance of each model.

    • policy

      Type \u2192 bandit.base.Policy

      The bandit policy to use.

    "},{"location":"api/model-selection/BanditRegressor/#attributes","title":"Attributes","text":"
    • best_model

    • models

    "},{"location":"api/model-selection/BanditRegressor/#examples","title":"Examples","text":"

    from river import bandit\nfrom river import datasets\nfrom river import evaluate\nfrom river import linear_model\nfrom river import metrics\nfrom river import model_selection\nfrom river import optim\nfrom river import preprocessing\n\nmodels = [\n    linear_model.LinearRegression(optimizer=optim.SGD(lr=lr))\n    for lr in [0.0001, 0.001, 1e-05, 0.01]\n]\n\ndataset = datasets.TrumpApproval()\nmodel = (\n    preprocessing.StandardScaler() |\n    model_selection.BanditRegressor(\n        models,\n        metric=metrics.MAE(),\n        policy=bandit.EpsilonGreedy(\n            epsilon=0.1,\n            decay=0.001,\n            burn_in=100,\n            seed=42\n        )\n    )\n)\nmetric = metrics.MAE()\n\nevaluate.progressive_val_score(dataset, model, metric)\n
    MAE: 3.134089\n

    Here's another example using the UCB policy. The latter is more sensitive to the target scale, and usually works better when the target is rescaled.

    models = [\n    linear_model.LinearRegression(optimizer=optim.SGD(lr=lr))\n    for lr in [0.0001, 0.001, 1e-05, 0.01]\n]\n\nmodel = (\n    preprocessing.StandardScaler() |\n    preprocessing.TargetStandardScaler(\n        model_selection.BanditRegressor(\n            models,\n            metric=metrics.MAE(),\n            policy=bandit.UCB(\n                delta=1,\n                burn_in=100\n            )\n        )\n    )\n)\nmetric = metrics.MAE()\n\nevaluate.progressive_val_score(dataset, model, metric)\n
    MAE: 0.875333\n

    "},{"location":"api/model-selection/BanditRegressor/#methods","title":"Methods","text":"learn_one

    Fits to a set of features x and a real-valued target y.

    Parameters

    • x
    • y

    Returns

    self

    predict_one

    Predict the output of features x.

    Parameters

    • x

    Returns

    The prediction.

    "},{"location":"api/model-selection/GreedyRegressor/","title":"GreedyRegressor","text":"

    Greedy selection regressor.

    This selection method simply updates each model at each time step. The current best model is used to make predictions. It's greedy in the sense that updating each model can be costly. On the other hand, bandit-like algorithms are more temperate in that only update a subset of the models at each step.

    "},{"location":"api/model-selection/GreedyRegressor/#parameters","title":"Parameters","text":"
    • models

      Type \u2192 list[base.Regressor]

      The models to select from.

    • metric

      Type \u2192 metrics.base.RegressionMetric | None

      Default \u2192 None

      The metric that is used to measure the performance of each model.

    "},{"location":"api/model-selection/GreedyRegressor/#attributes","title":"Attributes","text":"
    • best_model

      The current best model.

    • models

    "},{"location":"api/model-selection/GreedyRegressor/#examples","title":"Examples","text":"

    from river import datasets\nfrom river import evaluate\nfrom river import linear_model\nfrom river import metrics\nfrom river import model_selection\nfrom river import optim\nfrom river import preprocessing\n\nmodels = [\n    linear_model.LinearRegression(optimizer=optim.SGD(lr=lr))\n    for lr in [1e-5, 1e-4, 1e-3, 1e-2]\n]\n\ndataset = datasets.TrumpApproval()\nmetric = metrics.MAE()\nmodel = (\n    preprocessing.StandardScaler() |\n    model_selection.GreedyRegressor(models, metric)\n)\n\nevaluate.progressive_val_score(dataset, model, metric)\n
    MAE: 1.319678\n

    "},{"location":"api/model-selection/GreedyRegressor/#methods","title":"Methods","text":"learn_one

    Fits to a set of features x and a real-valued target y.

    Parameters

    • x \u2014 'dict'
    • y \u2014 'base.typing.RegTarget'

    Returns

    Regressor: self

    predict_one

    Predict the output of features x.

    Parameters

    • x

    Returns

    The prediction.

    "},{"location":"api/model-selection/SuccessiveHalvingClassifier/","title":"SuccessiveHalvingClassifier","text":"

    Successive halving algorithm for classification.

    Successive halving is a method for performing model selection without having to train each model on all the dataset. At certain points in time (called \"rungs\"), the worst performing will be discarded and the best ones will keep competing between each other. The rung values are designed so that at most budget model updates will be performed in total.

    If you have k combinations of hyperparameters and that your dataset contains n observations, then the maximal budget you can allocate is:

    \\[\\frac{2kn}{eta}\\]

    It is recommended that you check this beforehand. This bound can't be checked by the function because the size of the dataset is not known. In fact it is potentially infinite, in which case the algorithm will terminate once all the budget has been spent.

    If you have a budget of B, and that your dataset contains n observations, then the number of hyperparameter combinations that will spend all the budget and go through all the data is:

    \\[\\left\\lceil\\left\\lfloor\\frac{B}{2n}\\right\\rfloor \\times eta \\right\\rceil\\]"},{"location":"api/model-selection/SuccessiveHalvingClassifier/#parameters","title":"Parameters","text":"
    • models

      The models to compare.

    • metric

      Type \u2192 metrics.base.Metric

      Metric used for comparing models with.

    • budget

      Type \u2192 int

      Total number of model updates you wish to allocate.

    • eta

      Default \u2192 2

      Rate of elimination. At every rung, math.ceil(k / eta) models are kept, where k is the number of models that have reached the rung. A higher eta value will focus on less models but will allocate more iterations to the best models.

    • verbose

      Default \u2192 False

      Whether to display progress or not.

    • print_kwargs

      Extra keyword arguments are passed to the print function. For instance, this allows providing a file argument, which indicates where to output progress.

    "},{"location":"api/model-selection/SuccessiveHalvingClassifier/#attributes","title":"Attributes","text":"
    • best_model

      The current best model.

    • models

    "},{"location":"api/model-selection/SuccessiveHalvingClassifier/#examples","title":"Examples","text":"

    As an example, let's use successive halving to tune the optimizer of a logistic regression. We'll first define the model.

    from river import linear_model\nfrom river import preprocessing\n\nmodel = (\n    preprocessing.StandardScaler() |\n    linear_model.LogisticRegression()\n)\n

    Let's now define a grid of parameters which we would like to compare. We'll try different optimizers with various learning rates.

    from river import utils\nfrom river import optim\n\nmodels = utils.expand_param_grid(model, {\n    'LogisticRegression': {\n        'optimizer': [\n            (optim.SGD, {'lr': [.1, .01, .005]}),\n            (optim.Adam, {'beta_1': [.01, .001], 'lr': [.1, .01, .001]}),\n            (optim.Adam, {'beta_1': [.1], 'lr': [.001]}),\n        ]\n    }\n})\n

    We can check how many models we've created.

    len(models)\n
    10\n

    We can now pass these models to a SuccessiveHalvingClassifier. We also need to pick a metric to compare the models, and a budget which indicates how many iterations to run before picking the best model and discarding the rest.

    from river import model_selection\n\nsh = model_selection.SuccessiveHalvingClassifier(\n    models,\n    metric=metrics.Accuracy(),\n    budget=2000,\n    eta=2,\n    verbose=True\n)\n

    A SuccessiveHalvingClassifier is also a classifier with a learn_one and a predict_proba_one method. We can therefore evaluate it like any other classifier with evaluate.progressive_val_score.

    from river import datasets\nfrom river import evaluate\nfrom river import metrics\n\nevaluate.progressive_val_score(\n    dataset=datasets.Phishing(),\n    model=sh,\n    metric=metrics.ROCAUC()\n)\n
    [1] 5 removed       5 left  50 iterations   budget used: 500        budget left: 1500       best Accuracy: 80.00%\n[2] 2 removed       3 left  100 iterations  budget used: 1000       budget left: 1000       best Accuracy: 84.00%\n[3] 1 removed       2 left  166 iterations  budget used: 1498       budget left: 502        best Accuracy: 86.14%\n[4] 1 removed       1 left  250 iterations  budget used: 1998       budget left: 2  best Accuracy: 84.80%\nROCAUC: 95.22%\n

    We can now view the best model.

    sh.best_model\n
    Pipeline (\n  StandardScaler (\n    with_std=True\n  ),\n  LogisticRegression (\n    optimizer=Adam (\n      lr=Constant (\n        learning_rate=0.01\n      )\n      beta_1=0.01\n      beta_2=0.999\n      eps=1e-08\n    )\n    loss=Log (\n      weight_pos=1.\n      weight_neg=1.\n    )\n    l2=0.\n    l1=0.\n    intercept_init=0.\n    intercept_lr=Constant (\n      learning_rate=0.01\n    )\n    clip_gradient=1e+12\n    initializer=Zeros ()\n  )\n)\n

    "},{"location":"api/model-selection/SuccessiveHalvingClassifier/#methods","title":"Methods","text":"learn_one

    Update the model with a set of features x and a label y.

    Parameters

    • x \u2014 'dict'
    • y \u2014 'base.typing.ClfTarget'

    Returns

    Classifier: self

    predict_one

    Predict the label of a set of features x.

    Parameters

    • x \u2014 'dict'
    • kwargs

    Returns

    base.typing.ClfTarget | None: The predicted label.

    predict_proba_one

    Predict the probability of each label for a dictionary of features x.

    Parameters

    • x

    Returns

    A dictionary that associates a probability which each label.

    1. Jamieson, K. and Talwalkar, A., 2016, May. Non-stochastic best arm identification and hyperparameter optimization. In Artificial Intelligence and Statistics (pp. 240-248). \u21a9

    2. Li, L., Jamieson, K., Rostamizadeh, A., Gonina, E., Hardt, M., Recht, B. and Talwalkar, A., 2018. Massively parallel hyperparameter tuning. arXiv preprint arXiv:1810.05934. \u21a9

    3. Li, L., Jamieson, K., DeSalvo, G., Rostamizadeh, A. and Talwalkar, A., 2017. Hyperband: A novel bandit-based approach to hyperparameter optimization. The Journal of Machine Learning Research, 18(1), pp.6765-6816. \u21a9

    "},{"location":"api/model-selection/SuccessiveHalvingRegressor/","title":"SuccessiveHalvingRegressor","text":"

    Successive halving algorithm for regression.

    Successive halving is a method for performing model selection without having to train each model on all the dataset. At certain points in time (called \"rungs\"), the worst performing will be discarded and the best ones will keep competing between each other. The rung values are designed so that at most budget model updates will be performed in total.

    If you have k combinations of hyperparameters and that your dataset contains n observations, then the maximal budget you can allocate is:

    \\[\\frac{2kn}{eta}\\]

    It is recommended that you check this beforehand. This bound can't be checked by the function because the size of the dataset is not known. In fact it is potentially infinite, in which case the algorithm will terminate once all the budget has been spent.

    If you have a budget of B, and that your dataset contains n observations, then the number of hyperparameter combinations that will spend all the budget and go through all the data is:

    \\[\\left\\lceil\\left\\lfloor\\frac{B}{2n}\\right\\rfloor \\times eta \\right\\rceil\\]"},{"location":"api/model-selection/SuccessiveHalvingRegressor/#parameters","title":"Parameters","text":"
    • models

      The models to compare.

    • metric

      Type \u2192 metrics.base.Metric

      Metric used for comparing models with.

    • budget

      Type \u2192 int

      Total number of model updates you wish to allocate.

    • eta

      Default \u2192 2

      Rate of elimination. At every rung, math.ceil(k / eta) models are kept, where k is the number of models that have reached the rung. A higher eta value will focus on less models but will allocate more iterations to the best models.

    • verbose

      Default \u2192 False

      Whether to display progress or not.

    • print_kwargs

      Extra keyword arguments are passed to the print function. For instance, this allows providing a file argument, which indicates where to output progress.

    "},{"location":"api/model-selection/SuccessiveHalvingRegressor/#attributes","title":"Attributes","text":"
    • best_model

      The current best model.

    • models

    "},{"location":"api/model-selection/SuccessiveHalvingRegressor/#examples","title":"Examples","text":"

    As an example, let's use successive halving to tune the optimizer of a linear regression. We'll first define the model.

    from river import linear_model\nfrom river import preprocessing\n\nmodel = (\n    preprocessing.StandardScaler() |\n    linear_model.LinearRegression(intercept_lr=.1)\n)\n

    Let's now define a grid of parameters which we would like to compare. We'll try different optimizers with various learning rates.

    from river import optim\nfrom river import utils\n\nmodels = utils.expand_param_grid(model, {\n    'LinearRegression': {\n        'optimizer': [\n            (optim.SGD, {'lr': [.1, .01, .005]}),\n            (optim.Adam, {'beta_1': [.01, .001], 'lr': [.1, .01, .001]}),\n            (optim.Adam, {'beta_1': [.1], 'lr': [.001]}),\n        ]\n    }\n})\n

    We can check how many models we've created.

    len(models)\n
    10\n

    We can now pass these models to a SuccessiveHalvingRegressor. We also need to pick a metric to compare the models, and a budget which indicates how many iterations to run before picking the best model and discarding the rest.

    from river import model_selection\n\nsh = model_selection.SuccessiveHalvingRegressor(\n    models,\n    metric=metrics.MAE(),\n    budget=2000,\n    eta=2,\n    verbose=True\n)\n

    A SuccessiveHalvingRegressor is also a regressor with a learn_one and a predict_one method. We can therefore evaluate it like any other classifier with evaluate.progressive_val_score.

    from river import datasets\nfrom river import evaluate\nfrom river import metrics\n\nevaluate.progressive_val_score(\n    dataset=datasets.TrumpApproval(),\n    model=sh,\n    metric=metrics.MAE()\n)\n
    [1] 5 removed       5 left  50 iterations   budget used: 500        budget left: 1500       best MAE: 4.419643\n[2] 2 removed       3 left  100 iterations  budget used: 1000       budget left: 1000       best MAE: 2.392266\n[3] 1 removed       2 left  166 iterations  budget used: 1498       budget left: 502        best MAE: 1.541383\n[4] 1 removed       1 left  250 iterations  budget used: 1998       budget left: 2  best MAE: 1.112122\nMAE: 0.490688\n

    We can now view the best model.

    sh.best_model\n
    Pipeline (\n  StandardScaler (\n    with_std=True\n  ),\n  LinearRegression (\n    optimizer=Adam (\n      lr=Constant (\n        learning_rate=0.1\n      )\n      beta_1=0.01\n      beta_2=0.999\n      eps=1e-08\n    )\n    loss=Squared ()\n    l2=0.\n    l1=0.\n    intercept_init=0.\n    intercept_lr=Constant (\n      learning_rate=0.1\n    )\n    clip_gradient=1e+12\n    initializer=Zeros ()\n  )\n)\n

    "},{"location":"api/model-selection/SuccessiveHalvingRegressor/#methods","title":"Methods","text":"learn_one

    Fits to a set of features x and a real-valued target y.

    Parameters

    • x \u2014 'dict'
    • y \u2014 'base.typing.RegTarget'

    Returns

    Regressor: self

    predict_one

    Predict the output of features x.

    Parameters

    • x

    Returns

    The prediction.

    1. Jamieson, K. and Talwalkar, A., 2016, May. Non-stochastic best arm identification and hyperparameter optimization. In Artificial Intelligence and Statistics (pp. 240-248). \u21a9

    2. Li, L., Jamieson, K., Rostamizadeh, A., Gonina, E., Hardt, M., Recht, B. and Talwalkar, A., 2018. Massively parallel hyperparameter tuning. arXiv preprint arXiv:1810.05934. \u21a9

    3. Li, L., Jamieson, K., DeSalvo, G., Rostamizadeh, A. and Talwalkar, A., 2017. Hyperband: A novel bandit-based approach to hyperparameter optimization. The Journal of Machine Learning Research, 18(1), pp.6765-6816. \u21a9

    "},{"location":"api/model-selection/base/ModelSelectionClassifier/","title":"ModelSelectionClassifier","text":"

    A model selector for classification.

    "},{"location":"api/model-selection/base/ModelSelectionClassifier/#parameters","title":"Parameters","text":"
    • models

      Type \u2192 Iterator[base.Estimator]

    • metric

      Type \u2192 metrics.base.Metric

    "},{"location":"api/model-selection/base/ModelSelectionClassifier/#attributes","title":"Attributes","text":"
    • best_model

      The current best model.

    • models

    "},{"location":"api/model-selection/base/ModelSelectionClassifier/#methods","title":"Methods","text":"learn_one

    Update the model with a set of features x and a label y.

    Parameters

    • x \u2014 'dict'
    • y \u2014 'base.typing.ClfTarget'

    Returns

    Classifier: self

    predict_one

    Predict the label of a set of features x.

    Parameters

    • x \u2014 'dict'
    • kwargs

    Returns

    base.typing.ClfTarget | None: The predicted label.

    predict_proba_one

    Predict the probability of each label for a dictionary of features x.

    Parameters

    • x

    Returns

    A dictionary that associates a probability which each label.

    "},{"location":"api/model-selection/base/ModelSelectionRegressor/","title":"ModelSelectionRegressor","text":"

    A model selector for regression.

    "},{"location":"api/model-selection/base/ModelSelectionRegressor/#parameters","title":"Parameters","text":"
    • models

      Type \u2192 Iterator[base.Estimator]

    • metric

      Type \u2192 metrics.base.Metric

    "},{"location":"api/model-selection/base/ModelSelectionRegressor/#attributes","title":"Attributes","text":"
    • best_model

      The current best model.

    • models

    "},{"location":"api/model-selection/base/ModelSelectionRegressor/#methods","title":"Methods","text":"learn_one

    Fits to a set of features x and a real-valued target y.

    Parameters

    • x \u2014 'dict'
    • y \u2014 'base.typing.RegTarget'

    Returns

    Regressor: self

    predict_one

    Predict the output of features x.

    Parameters

    • x

    Returns

    The prediction.

    "},{"location":"api/multiclass/OneVsOneClassifier/","title":"OneVsOneClassifier","text":"

    One-vs-One (OvO) multiclass strategy.

    This strategy consists in fitting one binary classifier for each pair of classes. Because we are in a streaming context, the number of classes isn't known from the start, hence new classifiers are instantiated on the fly.

    The number of classifiers is k * (k - 1) / 2, where k is the number of classes. However, each call to learn_one only requires training k - 1 models. Indeed, only the models that pertain to the given label have to be trained. Meanwhile, making a prediction requires going through each and every model.

    "},{"location":"api/multiclass/OneVsOneClassifier/#parameters","title":"Parameters","text":"
    • classifier

      A binary classifier, although a multi-class classifier will work too.

    "},{"location":"api/multiclass/OneVsOneClassifier/#attributes","title":"Attributes","text":"
    • classifiers (dict)

      A mapping between pairs of classes and classifiers. The keys are tuples which contain a pair of classes. Each pair is sorted in lexicographical order.

    "},{"location":"api/multiclass/OneVsOneClassifier/#examples","title":"Examples","text":"

    from river import datasets\nfrom river import evaluate\nfrom river import linear_model\nfrom river import metrics\nfrom river import multiclass\nfrom river import preprocessing\n\ndataset = datasets.ImageSegments()\n\nscaler = preprocessing.StandardScaler()\novo = multiclass.OneVsOneClassifier(linear_model.LogisticRegression())\nmodel = scaler | ovo\n\nmetric = metrics.MacroF1()\n\nevaluate.progressive_val_score(dataset, model, metric)\n
    MacroF1: 80.76%\n

    "},{"location":"api/multiclass/OneVsOneClassifier/#methods","title":"Methods","text":"learn_one

    Update the model with a set of features x and a label y.

    Parameters

    • x
    • y
    • kwargs

    Returns

    self

    predict_one

    Predict the label of a set of features x.

    Parameters

    • x
    • kwargs

    Returns

    The predicted label.

    predict_proba_one

    Predict the probability of each label for a dictionary of features x.

    Parameters

    • x \u2014 'dict'

    Returns

    dict[base.typing.ClfTarget, float]: A dictionary that associates a probability which each label.

    "},{"location":"api/multiclass/OneVsRestClassifier/","title":"OneVsRestClassifier","text":"

    One-vs-the-rest (OvR) multiclass strategy.

    This strategy consists in fitting one binary classifier per class. Because we are in a streaming context, the number of classes isn't known from the start. Hence, new classifiers are instantiated on the fly. Likewise, the predicted probabilities will only include the classes seen up to a given point in time.

    Note that this classifier supports mini-batches as well as single instances.

    The computational complexity for both learning and predicting grows linearly with the number of classes. If you have a very large number of classes, then you might want to consider using an multiclass.OutputCodeClassifier instead.

    "},{"location":"api/multiclass/OneVsRestClassifier/#parameters","title":"Parameters","text":"
    • classifier

      Type \u2192 base.Classifier

      A binary classifier, although a multi-class classifier will work too.

    "},{"location":"api/multiclass/OneVsRestClassifier/#attributes","title":"Attributes","text":"
    • classifiers (dict)

      A mapping between classes and classifiers.

    "},{"location":"api/multiclass/OneVsRestClassifier/#examples","title":"Examples","text":"

    from river import datasets\nfrom river import evaluate\nfrom river import linear_model\nfrom river import metrics\nfrom river import multiclass\nfrom river import preprocessing\n\ndataset = datasets.ImageSegments()\n\nscaler = preprocessing.StandardScaler()\novr = multiclass.OneVsRestClassifier(linear_model.LogisticRegression())\nmodel = scaler | ovr\n\nmetric = metrics.MacroF1()\n\nevaluate.progressive_val_score(dataset, model, metric)\n
    MacroF1: 77.46%\n

    This estimator also also supports mini-batching.

    for X in pd.read_csv(dataset.path, chunksize=64):\n    y = X.pop('category')\n    y_pred = model.predict_many(X)\n    model = model.learn_many(X, y)\n
    "},{"location":"api/multiclass/OneVsRestClassifier/#methods","title":"Methods","text":"learn_many learn_one

    Update the model with a set of features x and a label y.

    Parameters

    • x
    • y
    • kwargs

    Returns

    self

    predict_many predict_one

    Predict the label of a set of features x.

    Parameters

    • x \u2014 'dict'
    • kwargs

    Returns

    base.typing.ClfTarget | None: The predicted label.

    predict_proba_many predict_proba_one

    Predict the probability of each label for a dictionary of features x.

    Parameters

    • x
    • kwargs

    Returns

    A dictionary that associates a probability which each label.

    "},{"location":"api/multiclass/OutputCodeClassifier/","title":"OutputCodeClassifier","text":"

    Output-code multiclass strategy.

    This also referred to as \"error-correcting output codes\".

    This class allows to learn a multi-class classification problem with a binary classifier. Each class is converted to a code of 0s and 1s. The length of the code is called the code size. A copy of the classifier made for code. The codes associated with the classes are stored in a code book.

    When a new sample arrives, the label's code is retrieved from the code book. Then, each classifier is trained on the relevant part of code, which is either a 0 or a 1.

    For predicting, each classifier outputs a probability. These are then compared to each code in the code book, and the label which is the \"closest\" is chosen as the most likely class. Closeness is determined in terms of Manhattan distance.

    One specificity of online learning is that we don't how many classes there are initially. Therefore, a random procedure generates random codes on the fly whenever a previously unseed label appears.

    "},{"location":"api/multiclass/OutputCodeClassifier/#parameters","title":"Parameters","text":"
    • classifier

      Type \u2192 base.Classifier

      A binary classifier, although a multi-class classifier will work too.

    • code_size

      Type \u2192 int

      The code size, which dictates how many copies of the provided classifiers to train. Must be strictly positive.

    • coding_method

      Type \u2192 str

      Default \u2192 random

      The method used to generate the codes. Can be either 'exact' or 'random'. The 'exact' method generates all possible codes of a given size in memory, and streams them in a random order. The 'random' method generates random codes of a given size on the fly. The 'exact' method necessarily generates different codes for each class, but requires more memory. The 'random' method can generate duplicate codes for different classes, but requires less memory.

    • seed

      Type \u2192 int | None

      Default \u2192 None

      A random seed number that can be set for reproducibility.

    "},{"location":"api/multiclass/OutputCodeClassifier/#examples","title":"Examples","text":"

    from river import datasets\nfrom river import evaluate\nfrom river import linear_model\nfrom river import metrics\nfrom river import multiclass\nfrom river import preprocessing\n\ndataset = datasets.ImageSegments()\n\nscaler = preprocessing.StandardScaler()\nooc = multiclass.OutputCodeClassifier(\n    classifier=linear_model.LogisticRegression(),\n    code_size=10,\n    coding_method='random',\n    seed=1\n)\nmodel = scaler | ooc\n\nmetric = metrics.MacroF1()\n\nevaluate.progressive_val_score(dataset, model, metric)\n
    MacroF1: 79.58%\n

    "},{"location":"api/multiclass/OutputCodeClassifier/#methods","title":"Methods","text":"learn_one

    Update the model with a set of features x and a label y.

    Parameters

    • x
    • y
    • kwargs

    Returns

    self

    predict_one

    Predict the label of a set of features x.

    Parameters

    • x
    • kwargs

    Returns

    The predicted label.

    predict_proba_one

    Predict the probability of each label for a dictionary of features x.

    Parameters

    • x \u2014 'dict'

    Returns

    dict[base.typing.ClfTarget, float]: A dictionary that associates a probability which each label.

    1. Dietterich, T.G. and Bakiri, G., 1994. Solving multiclass learning problems via error-correcting output codes. Journal of artificial intelligence research, 2, pp.263-286. \u21a9

    2. James, G. and Hastie, T., 1998. The error coding method and PICTs. Journal of Computational and Graphical statistics, 7(3), pp.377-387. \u21a9

    "},{"location":"api/multioutput/ClassifierChain/","title":"ClassifierChain","text":"

    A multi-output model that arranges classifiers into a chain.

    This will create one model per output. The prediction of the first output will be used as a feature in the second model. The prediction for the second output will be used as a feature for the third model, etc. This \"chain model\" is therefore capable of capturing dependencies between outputs.

    "},{"location":"api/multioutput/ClassifierChain/#parameters","title":"Parameters","text":"
    • model

      Type \u2192 base.Classifier

      A classifier model used for each label.

    • order

      Type \u2192 list | None

      Default \u2192 None

      A list with the targets order in which to construct the chain. If None then the order will be inferred from the order of the keys in the target.

    "},{"location":"api/multioutput/ClassifierChain/#examples","title":"Examples","text":"

    from river import feature_selection\nfrom river import linear_model\nfrom river import metrics\nfrom river import multioutput\nfrom river import preprocessing\nfrom river import stream\nfrom sklearn import datasets\n\ndataset = stream.iter_sklearn_dataset(\n    dataset=datasets.fetch_openml('yeast', version=4, parser='auto', as_frame=False),\n    shuffle=True,\n    seed=42\n)\n\nmodel = feature_selection.VarianceThreshold(threshold=0.01)\nmodel |= preprocessing.StandardScaler()\nmodel |= multioutput.ClassifierChain(\n    model=linear_model.LogisticRegression(),\n    order=list(range(14))\n)\n\nmetric = metrics.multioutput.MicroAverage(metrics.Jaccard())\n\nfor x, y in dataset:\n    # Convert y values to booleans\n    y = {i: yi == 'TRUE' for i, yi in y.items()}\n    y_pred = model.predict_one(x)\n    metric = metric.update(y, y_pred)\n    model = model.learn_one(x, y)\n\nmetric\n
    MicroAverage(Jaccard): 41.81%\n

    "},{"location":"api/multioutput/ClassifierChain/#methods","title":"Methods","text":"learn_one

    Update the model with a set of features x and the labels y.

    Parameters

    • x
    • y
    • kwargs

    Returns

    self

    predict_one

    Predict the labels of a set of features x.

    Parameters

    • x \u2014 'dict'
    • kwargs

    Returns

    dict[FeatureName, bool]: The predicted labels.

    predict_proba_one

    Predict the probability of each label appearing given dictionary of features x.

    Parameters

    • x
    • kwargs

    Returns

    A dictionary that associates a probability which each label.

    1. Multi-Output Chain Models and their Application in Data Streams \u21a9

    "},{"location":"api/multioutput/MonteCarloClassifierChain/","title":"MonteCarloClassifierChain","text":"

    Monte Carlo Sampling Classifier Chains.

    Probabilistic Classifier Chains using Monte Carlo sampling, as described in 1.

    m samples are taken from the posterior distribution. Therefore we need a probabilistic interpretation of the output, and thus, this is a particular variety of ProbabilisticClassifierChain.

    "},{"location":"api/multioutput/MonteCarloClassifierChain/#parameters","title":"Parameters","text":"
    • model

      Type \u2192 base.Classifier

    • m

      Type \u2192 int

      Default \u2192 10

      Number of samples to take from the posterior distribution.

    • seed

      Type \u2192 int | None

      Default \u2192 None

      Random number generator seed for reproducibility.

    "},{"location":"api/multioutput/MonteCarloClassifierChain/#examples","title":"Examples","text":"

    from river import feature_selection\nfrom river import linear_model\nfrom river import metrics\nfrom river import multioutput\nfrom river import preprocessing\nfrom river.datasets import synth\n\ndataset = synth.Logical(seed=42, n_tiles=100)\n\nmodel = multioutput.MonteCarloClassifierChain(\n    model=linear_model.LogisticRegression(),\n    m=10,\n    seed=42\n)\n\nmetric = metrics.multioutput.MicroAverage(metrics.Jaccard())\n\nfor x, y in dataset:\n   y_pred = model.predict_one(x)\n   y_pred = {k: y_pred.get(k, 0) for k in y}\n   metric = metric.update(y, y_pred)\n   model = model.learn_one(x, y)\n\nmetric\n
    MicroAverage(Jaccard): 51.79%\n

    "},{"location":"api/multioutput/MonteCarloClassifierChain/#methods","title":"Methods","text":"learn_one

    Update the model with a set of features x and the labels y.

    Parameters

    • x
    • y
    • kwargs

    Returns

    self

    predict_one

    Predict the labels of a set of features x.

    Parameters

    • x \u2014 'dict'
    • kwargs

    Returns

    dict[FeatureName, bool]: The predicted labels.

    predict_proba_one

    Predict the probability of each label appearing given dictionary of features x.

    Parameters

    • x
    • kwargs

    Returns

    A dictionary that associates a probability which each label.

    1. Read, J., Martino, L., & Luengo, D. (2014). Efficient monte carlo methods for multi-dimensional learning with classifier chains. Pattern Recognition, 47(3), 1535-1546.\u00a0\u21a9

    "},{"location":"api/multioutput/MultiClassEncoder/","title":"MultiClassEncoder","text":"

    Convert a multi-label task into multiclass.

    Assigns a class to each unique combination of labels, and proceeds with training the supplied multi-class classifier.

    The transformation is done by converting the label set, which could be seen as a binary number, into an integer representing a class. At prediction time, the predicted integer is converted back to a binary number which is the predicted label set.

    "},{"location":"api/multioutput/MultiClassEncoder/#parameters","title":"Parameters","text":"
    • model

      Type \u2192 base.Classifier

      The classifier used for learning.

    "},{"location":"api/multioutput/MultiClassEncoder/#examples","title":"Examples","text":"

    from river import forest\nfrom river import metrics\nfrom river import multioutput\nfrom river.datasets import synth\n\ndataset = synth.Logical(seed=42, n_tiles=100)\n\nmodel = multioutput.MultiClassEncoder(\n    model=forest.ARFClassifier(seed=7)\n)\n\nmetric = metrics.multioutput.MicroAverage(metrics.Jaccard())\n\nfor x, y in dataset:\n   y_pred = model.predict_one(x)\n   y_pred = {k: y_pred.get(k, 0) for k in y}\n   metric = metric.update(y, y_pred)\n   model = model.learn_one(x, y)\n\nmetric\n
    MicroAverage(Jaccard): 95.10%\n

    "},{"location":"api/multioutput/MultiClassEncoder/#methods","title":"Methods","text":"learn_one

    Update the model with a set of features x and the labels y.

    Parameters

    • x \u2014 'dict'
    • y \u2014 'dict[FeatureName, bool]'

    Returns

    MultiLabelClassifier: self

    predict_one

    Predict the labels of a set of features x.

    Parameters

    • x \u2014 'dict'
    • kwargs

    Returns

    dict[FeatureName, bool]: The predicted labels.

    predict_proba_one

    Predict the probability of each label appearing given dictionary of features x.

    Parameters

    • x \u2014 'dict'
    • kwargs

    Returns

    dict[FeatureName, dict[bool, float]]: A dictionary that associates a probability which each label.

    "},{"location":"api/multioutput/ProbabilisticClassifierChain/","title":"ProbabilisticClassifierChain","text":"

    Probabilistic Classifier Chains.

    The Probabilistic Classifier Chains (PCC) 1 is a Bayes-optimal method based on the Classifier Chains (CC).

    Consider the concept of chaining classifiers as searching a path in a binary tree whose leaf nodes are associated with a label \\(y \\in Y\\). While CC searches only a single path in the aforementioned binary tree, PCC looks at each of the \\(2^l\\) paths, where \\(l\\) is the number of labels. This limits the applicability of the method to data sets with a small to moderate number of labels. The authors recommend no more than about 15 labels for real-world applications.

    "},{"location":"api/multioutput/ProbabilisticClassifierChain/#parameters","title":"Parameters","text":"
    • model

      Type \u2192 base.Classifier

    "},{"location":"api/multioutput/ProbabilisticClassifierChain/#examples","title":"Examples","text":"

    from river import linear_model\nfrom river import metrics\nfrom river import multioutput\nfrom river.datasets import synth\n\ndataset = synth.Logical(seed=42, n_tiles=100)\n\nmodel = multioutput.ProbabilisticClassifierChain(\n    model=linear_model.LogisticRegression()\n)\n\nmetric = metrics.multioutput.MicroAverage(metrics.Jaccard())\n\nfor x, y in dataset:\n   y_pred = model.predict_one(x)\n   y_pred = {k: y_pred.get(k, 0) for k in y}\n   metric = metric.update(y, y_pred)\n   model = model.learn_one(x, y)\n\nmetric\n
    MicroAverage(Jaccard): 51.84%\n

    "},{"location":"api/multioutput/ProbabilisticClassifierChain/#methods","title":"Methods","text":"learn_one

    Update the model with a set of features x and the labels y.

    Parameters

    • x
    • y
    • kwargs

    Returns

    self

    predict_one

    Predict the labels of a set of features x.

    Parameters

    • x \u2014 'dict'
    • kwargs

    Returns

    dict[FeatureName, bool]: The predicted labels.

    predict_proba_one

    Predict the probability of each label appearing given dictionary of features x.

    Parameters

    • x
    • kwargs

    Returns

    A dictionary that associates a probability which each label.

    1. Cheng, W., H\u00fcllermeier, E., & Dembczynski, K. J. (2010). Bayes optimal multilabel classification via probabilistic classifier chains. In Proceedings of the 27th international conference on machine learning (ICML-10) (pp. 279-286).\u00a0\u21a9

    "},{"location":"api/multioutput/RegressorChain/","title":"RegressorChain","text":"

    A multi-output model that arranges regressors into a chain.

    This will create one model per output. The prediction of the first output will be used as a feature in the second output. The prediction for the second output will be used as a feature for the third, etc. This \"chain model\" is therefore capable of capturing dependencies between outputs.

    "},{"location":"api/multioutput/RegressorChain/#parameters","title":"Parameters","text":"
    • model

      Type \u2192 base.Regressor

      The regression model used to make predictions for each target.

    • order

      Type \u2192 list | None

      Default \u2192 None

      A list with the targets order in which to construct the chain. If None then the order will be inferred from the order of the keys in the target.

    "},{"location":"api/multioutput/RegressorChain/#examples","title":"Examples","text":"

    from river import evaluate\nfrom river import linear_model\nfrom river import metrics\nfrom river import multioutput\nfrom river import preprocessing\nfrom river import stream\n\nfrom sklearn import datasets\n\ndataset = stream.iter_sklearn_dataset(\n    dataset=datasets.load_linnerud(),\n    shuffle=True,\n    seed=42\n)\n\nmodel = multioutput.RegressorChain(\n    model=(\n        preprocessing.StandardScaler() |\n        linear_model.LinearRegression(intercept_lr=0.3)\n    ),\n    order=[0, 1, 2]\n)\n\nmetric = metrics.multioutput.MicroAverage(metrics.MAE())\n\nevaluate.progressive_val_score(dataset, model, metric)\n
    MicroAverage(MAE): 12.733525\n

    "},{"location":"api/multioutput/RegressorChain/#methods","title":"Methods","text":"learn_one

    Fits to a set of features x and a real-valued target y.

    Parameters

    • x
    • y
    • kwargs

    Returns

    self

    predict_one

    Predict the outputs of features x.

    Parameters

    • x
    • kwargs

    Returns

    The predictions.

    "},{"location":"api/naive-bayes/BernoulliNB/","title":"BernoulliNB","text":"

    Bernoulli Naive Bayes.

    Bernoulli Naive Bayes model learns from occurrences between features such as word counts and discrete classes. The input vector must contain positive values, such as counts or TF-IDF values.

    "},{"location":"api/naive-bayes/BernoulliNB/#parameters","title":"Parameters","text":"
    • alpha

      Default \u2192 1.0

      Additive (Laplace/Lidstone) smoothing parameter (use 0 for no smoothing).

    • true_threshold

      Default \u2192 0.0

      Threshold for binarizing (mapping to booleans) features.

    "},{"location":"api/naive-bayes/BernoulliNB/#attributes","title":"Attributes","text":"
    • class_counts (collections.Counter)

      Number of times each class has been seen.

    • feature_counts (collections.defaultdict)

      Total frequencies per feature and class.

    "},{"location":"api/naive-bayes/BernoulliNB/#examples","title":"Examples","text":"

    import pandas as pd\nfrom river import compose\nfrom river import feature_extraction\nfrom river import naive_bayes\n\ndocs = [\n    (\"Chinese Beijing Chinese\", \"yes\"),\n    (\"Chinese Chinese Shanghai\", \"yes\"),\n    (\"Chinese Macao\", \"yes\"),\n    (\"Tokyo Japan Chinese\", \"no\")\n]\n\nmodel = compose.Pipeline(\n    (\"tokenize\", feature_extraction.BagOfWords(lowercase=False)),\n    (\"nb\", naive_bayes.BernoulliNB(alpha=1))\n)\n\nfor sentence, label in docs:\n    model = model.learn_one(sentence, label)\n\nmodel[\"nb\"].p_class(\"yes\")\n
    0.75\n
    model[\"nb\"].p_class(\"no\")\n
    0.25\n

    model.predict_proba_one(\"test\")\n
    {'yes': 0.8831539823829913, 'no': 0.11684601761700895}\n

    model.predict_one(\"test\")\n
    'yes'\n

    You can train the model and make predictions in mini-batch mode using the class methods learn_many and predict_many.

    df_docs = pd.DataFrame(docs, columns = [\"docs\", \"y\"])\n\nX = pd.Series([\n   \"Chinese Beijing Chinese\",\n   \"Chinese Chinese Shanghai\",\n   \"Chinese Macao\",\n   \"Tokyo Japan Chinese\"\n])\n\ny = pd.Series([\"yes\", \"yes\", \"yes\", \"no\"])\n\nmodel = compose.Pipeline(\n    (\"tokenize\", feature_extraction.BagOfWords(lowercase=False)),\n    (\"nb\", naive_bayes.BernoulliNB(alpha=1))\n)\n\nmodel = model.learn_many(X, y)\n\nunseen = pd.Series([\"Taiwanese Taipei\", \"Chinese Shanghai\"])\n\nmodel.predict_proba_many(unseen)\n
             no       yes\n0  0.116846  0.883154\n1  0.047269  0.952731\n

    model.predict_many(unseen)\n
    0    yes\n1    yes\ndtype: object\n

    "},{"location":"api/naive-bayes/BernoulliNB/#methods","title":"Methods","text":"joint_log_likelihood

    Computes the joint log likelihood of input features.

    Parameters

    • x \u2014 'dict'

    Returns

    float: Mapping between classes and joint log likelihood.

    joint_log_likelihood_many

    Computes the joint log likelihood of input features.

    Parameters

    • X \u2014 'pd.DataFrame'

    Returns

    pd.DataFrame: Input samples joint log likelihood.

    learn_many

    Learn from a batch of count vectors.

    Parameters

    • X \u2014 'pd.DataFrame'
    • y \u2014 'pd.Series'

    Returns

    MiniBatchClassifier: self

    learn_one

    Updates the model with a single observation.

    Parameters

    • x \u2014 'dict'
    • y \u2014 'base.typing.ClfTarget'

    Returns

    Classifier: self

    p_class p_class_many p_feature_given_class predict_many

    Predict the outcome for each given sample.

    Parameters

    • X \u2014 'pd.DataFrame'

    Returns

    pd.Series: The predicted labels.

    predict_one

    Predict the label of a set of features x.

    Parameters

    • x \u2014 'dict'
    • kwargs

    Returns

    base.typing.ClfTarget | None: The predicted label.

    predict_proba_many

    Return probabilities using the log-likelihoods in mini-batchs setting.

    Parameters

    • X \u2014 'pd.DataFrame'

    predict_proba_one

    Return probabilities using the log-likelihoods.

    Parameters

    • x \u2014 'dict'

    1. The Bernoulli model \u21a9

    "},{"location":"api/naive-bayes/ComplementNB/","title":"ComplementNB","text":"

    Naive Bayes classifier for multinomial models.

    Complement Naive Bayes model learns from occurrences between features such as word counts and discrete classes. ComplementNB is suitable for imbalance dataset. The input vector must contain positive values, such as counts or TF-IDF values.

    "},{"location":"api/naive-bayes/ComplementNB/#parameters","title":"Parameters","text":"
    • alpha

      Default \u2192 1.0

      Additive (Laplace/Lidstone) smoothing parameter (use 0 for no smoothing).

    "},{"location":"api/naive-bayes/ComplementNB/#attributes","title":"Attributes","text":"
    • class_dist (proba.Multinomial)

      Class prior probability distribution.

    • feature_counts (collections.defaultdict)

      Total frequencies per feature and class.

    • class_totals (collections.Counter)

      Total frequencies per class.

    "},{"location":"api/naive-bayes/ComplementNB/#examples","title":"Examples","text":"

    import pandas as pd\nfrom river import compose\nfrom river import feature_extraction\nfrom river import naive_bayes\n\ndocs = [\n    (\"Chinese Beijing Chinese\", \"yes\"),\n    (\"Chinese Chinese Shanghai\", \"yes\"),\n    (\"Chinese Macao\", \"maybe\"),\n    (\"Tokyo Japan Chinese\", \"no\")\n]\n\nmodel = compose.Pipeline(\n    (\"tokenize\", feature_extraction.BagOfWords(lowercase=False)),\n    (\"nb\", naive_bayes.ComplementNB(alpha=1))\n)\n\nfor sentence, label in docs:\n    model = model.learn_one(sentence, label)\n\nmodel[\"nb\"].p_class(\"yes\")\n
    0.5\n

    model[\"nb\"].p_class(\"no\")\n
    0.25\n

    model[\"nb\"].p_class(\"maybe\")\n
    0.25\n

    model.predict_proba_one(\"test\")\n
    {'yes': 0.275, 'maybe': 0.375, 'no': 0.35}\n

    model.predict_one(\"test\")\n
    'maybe'\n

    You can train the model and make predictions in mini-batch mode using the class methods learn_many and predict_many.

    df_docs = pd.DataFrame(docs, columns = [\"docs\", \"y\"])\n\nX = pd.Series([\n   \"Chinese Beijing Chinese\",\n   \"Chinese Chinese Shanghai\",\n   \"Chinese Macao\",\n   \"Tokyo Japan Chinese\"\n])\n\ny = pd.Series([\"yes\", \"yes\", \"maybe\", \"no\"])\n\nmodel = compose.Pipeline(\n    (\"tokenize\", feature_extraction.BagOfWords(lowercase=False)),\n    (\"nb\", naive_bayes.ComplementNB(alpha=1))\n)\n\nmodel = model.learn_many(X, y)\n\nunseen = pd.Series([\"Taiwanese Taipei\", \"Chinese Shanghai\"])\n\nmodel.predict_proba_many(unseen)\n
          maybe        no       yes\n0  0.415129  0.361624  0.223247\n1  0.248619  0.216575  0.534807\n

    model.predict_many(unseen)\n
    0    maybe\n1      yes\ndtype: object\n

    "},{"location":"api/naive-bayes/ComplementNB/#methods","title":"Methods","text":"joint_log_likelihood

    Computes the joint log likelihood of input features.

    Parameters

    • x \u2014 'dict'

    Returns

    float: Mapping between classes and joint log likelihood.

    joint_log_likelihood_many

    Computes the joint log likelihood of input features.

    Parameters

    • X \u2014 'pd.DataFrame'

    Returns

    pd.DataFrame: Input samples joint log likelihood.

    learn_many

    Learn from a batch of count vectors.

    Parameters

    • X \u2014 'pd.DataFrame'
    • y \u2014 'pd.Series'

    Returns

    MiniBatchClassifier: self

    learn_one

    Updates the model with a single observation.

    Parameters

    • x \u2014 'dict'
    • y \u2014 'base.typing.ClfTarget'

    Returns

    Classifier: self

    p_class p_class_many predict_many

    Predict the outcome for each given sample.

    Parameters

    • X \u2014 'pd.DataFrame'

    Returns

    pd.Series: The predicted labels.

    predict_one

    Predict the label of a set of features x.

    Parameters

    • x \u2014 'dict'
    • kwargs

    Returns

    base.typing.ClfTarget | None: The predicted label.

    predict_proba_many

    Return probabilities using the log-likelihoods in mini-batchs setting.

    Parameters

    • X \u2014 'pd.DataFrame'

    predict_proba_one

    Return probabilities using the log-likelihoods.

    Parameters

    • x \u2014 'dict'

    1. Rennie, J.D., Shih, L., Teevan, J. and Karger, D.R., 2003. Tackling the poor assumptions of naive bayes text classifiers. In Proceedings of the 20th international conference on machine learning (ICML-03) (pp. 616-623) \u21a9

    2. StackExchange discussion \u21a9

    "},{"location":"api/naive-bayes/GaussianNB/","title":"GaussianNB","text":"

    Gaussian Naive Bayes.

    A Gaussian distribution \\(G_{cf}\\) is maintained for each class \\(c\\) and each feature \\(f\\). Each Gaussian is updated using the amount associated with each feature; the details can be be found in proba.Gaussian. The joint log-likelihood is then obtained by summing the log probabilities of each feature associated with each class.

    "},{"location":"api/naive-bayes/GaussianNB/#examples","title":"Examples","text":"

    from river import naive_bayes\nfrom river import stream\nimport numpy as np\n\nX = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])\nY = np.array([1, 1, 1, 2, 2, 2])\n\nmodel = naive_bayes.GaussianNB()\n\nfor x, y in stream.iter_array(X, Y):\n    _ = model.learn_one(x, y)\n\nmodel.predict_one({0: -0.8, 1: -1})\n
    1\n

    "},{"location":"api/naive-bayes/GaussianNB/#methods","title":"Methods","text":"joint_log_likelihood joint_log_likelihood_many learn_one

    Update the model with a set of features x and a label y.

    Parameters

    • x \u2014 'dict'
    • y \u2014 'base.typing.ClfTarget'

    Returns

    Classifier: self

    p_class predict_one

    Predict the label of a set of features x.

    Parameters

    • x \u2014 'dict'
    • kwargs

    Returns

    base.typing.ClfTarget | None: The predicted label.

    predict_proba_one

    Return probabilities using the log-likelihoods.

    Parameters

    • x \u2014 'dict'

    "},{"location":"api/naive-bayes/MultinomialNB/","title":"MultinomialNB","text":"

    Naive Bayes classifier for multinomial models.

    Multinomial Naive Bayes model learns from occurrences between features such as word counts and discrete classes. The input vector must contain positive values, such as counts or TF-IDF values.

    "},{"location":"api/naive-bayes/MultinomialNB/#parameters","title":"Parameters","text":"
    • alpha

      Default \u2192 1.0

      Additive (Laplace/Lidstone) smoothing parameter (use 0 for no smoothing).

    "},{"location":"api/naive-bayes/MultinomialNB/#attributes","title":"Attributes","text":"
    • class_dist (proba.Multinomial)

      Class prior probability distribution.

    • feature_counts (collections.defaultdict)

      Total frequencies per feature and class.

    • class_totals (collections.Counter)

      Total frequencies per class.

    "},{"location":"api/naive-bayes/MultinomialNB/#examples","title":"Examples","text":"

    import pandas as pd\nfrom river import compose\nfrom river import feature_extraction\nfrom river import naive_bayes\n\ndocs = [\n    (\"Chinese Beijing Chinese\", \"yes\"),\n    (\"Chinese Chinese Shanghai\", \"yes\"),\n    (\"Chinese Macao\", \"maybe\"),\n    (\"Tokyo Japan Chinese\", \"no\")\n]\n\nmodel = compose.Pipeline(\n    (\"tokenize\", feature_extraction.BagOfWords(lowercase=False)),\n    (\"nb\", naive_bayes.MultinomialNB(alpha=1))\n)\n\nfor sentence, label in docs:\n    model = model.learn_one(sentence, label)\n\nmodel[\"nb\"].p_class(\"yes\")\n
    0.5\n

    model[\"nb\"].p_class(\"no\")\n
    0.25\n

    model[\"nb\"].p_class(\"maybe\")\n
    0.25\n

    model.predict_proba_one(\"test\")\n
    {'yes': 0.413, 'maybe': 0.310, 'no': 0.275}\n

    model.predict_one(\"test\")\n
    'yes'\n

    You can train the model and make predictions in mini-batch mode using the class methods learn_many and predict_many.

    df_docs = pd.DataFrame(docs, columns = [\"docs\", \"y\"])\n\nX = pd.Series([\n   \"Chinese Beijing Chinese\",\n   \"Chinese Chinese Shanghai\",\n   \"Chinese Macao\",\n   \"Tokyo Japan Chinese\"\n])\n\ny = pd.Series([\"yes\", \"yes\", \"maybe\", \"no\"])\n\nmodel = compose.Pipeline(\n    (\"tokenize\", feature_extraction.BagOfWords(lowercase=False)),\n    (\"nb\", naive_bayes.MultinomialNB(alpha=1))\n)\n\nmodel = model.learn_many(X, y)\n\nunseen = pd.Series([\"Taiwanese Taipei\", \"Chinese Shanghai\"])\n\nmodel.predict_proba_many(unseen)\n
          maybe        no       yes\n0  0.373272  0.294931  0.331797\n1  0.160396  0.126733  0.712871\n

    model.predict_many(unseen)\n
    0    maybe\n1      yes\ndtype: object\n

    "},{"location":"api/naive-bayes/MultinomialNB/#methods","title":"Methods","text":"joint_log_likelihood

    Computes the joint log likelihood of input features.

    Parameters

    • x \u2014 'dict'

    Returns

    float: Mapping between classes and joint log likelihood.

    joint_log_likelihood_many

    Computes the joint log likelihood of input features.

    Parameters

    • X \u2014 'pd.DataFrame'

    Returns

    pd.DataFrame: Input samples joint log likelihood.

    learn_many

    Learn from a batch of count vectors.

    Parameters

    • X \u2014 'pd.DataFrame'
    • y \u2014 'pd.Series'

    Returns

    MiniBatchClassifier: self

    learn_one

    Updates the model with a single observation.

    Parameters

    • x \u2014 'dict'
    • y \u2014 'base.typing.ClfTarget'

    Returns

    Classifier: self

    p_class p_class_many p_feature_given_class predict_many

    Predict the outcome for each given sample.

    Parameters

    • X \u2014 'pd.DataFrame'

    Returns

    pd.Series: The predicted labels.

    predict_one

    Predict the label of a set of features x.

    Parameters

    • x \u2014 'dict'
    • kwargs

    Returns

    base.typing.ClfTarget | None: The predicted label.

    predict_proba_many

    Return probabilities using the log-likelihoods in mini-batchs setting.

    Parameters

    • X \u2014 'pd.DataFrame'

    predict_proba_one

    Return probabilities using the log-likelihoods.

    Parameters

    • x \u2014 'dict'

    1. Naive Bayes text classification \u21a9

    "},{"location":"api/neighbors/KNNClassifier/","title":"KNNClassifier","text":"

    K-Nearest Neighbors (KNN) for classification.

    Samples are stored using a first-in, first-out strategy. The strategy to perform search queries in the data buffer is defined by the engine parameter.

    "},{"location":"api/neighbors/KNNClassifier/#parameters","title":"Parameters","text":"
    • n_neighbors

      Type \u2192 int

      Default \u2192 5

      The number of nearest neighbors to search for.

    • engine

      Type \u2192 BaseNN | None

      Default \u2192 None

      The search engine used to store the instances and perform search queries. Depending on the choose engine, search will be exact or approximate. Please, consult the documentation of each available search engine for more details on its usage. By default, use the SWINN search engine for approximate search queries.

    • weighted

      Type \u2192 bool

      Default \u2192 True

      Weight the contribution of each neighbor by it's inverse distance.

    • cleanup_every

      Type \u2192 int

      Default \u2192 0

      This determines at which rate old classes are cleaned up. Classes that have been seen in the past but that are not present in the current window are dropped. Classes are never dropped when this is set to 0.

    • softmax

      Type \u2192 bool

      Default \u2192 False

      Whether or not to use softmax normalization to normalize the neighbors contributions. Votes are divided by the total number of votes if this is False.

    "},{"location":"api/neighbors/KNNClassifier/#examples","title":"Examples","text":"
    import functools\nfrom river import datasets\nfrom river import evaluate\nfrom river import metrics\nfrom river import neighbors\nfrom river import preprocessing\nfrom river import utils\n\ndataset = datasets.Phishing()\n

    To select a custom distance metric which takes one or several parameter, you can wrap your chosen distance using functools.partial:

    l1_dist = functools.partial(utils.math.minkowski_distance, p=1)\n\nmodel = (\n    preprocessing.StandardScaler() |\n    neighbors.KNNClassifier(\n        engine=neighbors.SWINN(\n            dist_func=l1_dist,\n            seed=42\n        )\n    )\n)\n\nevaluate.progressive_val_score(dataset, model, metrics.Accuracy())\n
    Accuracy: 89.67%\n

    "},{"location":"api/neighbors/KNNClassifier/#methods","title":"Methods","text":"clean_up_classes

    Clean up classes added to the window.

    Classes that are added (and removed) from the window may no longer be valid. This method cleans up the window and and ensures only known classes are added, and we do not consider \"None\" a class. It is called every cleanup_every step, or can be called manually.

    learn_one

    Update the model with a set of features x and a label y.

    Parameters

    • x \u2014 'dict'
    • y \u2014 'base.typing.ClfTarget'

    Returns

    Classifier: self

    predict_one

    Predict the label of a set of features x.

    Parameters

    • x \u2014 'dict'
    • kwargs

    Returns

    base.typing.ClfTarget | None: The predicted label.

    predict_proba_one

    Predict the probability of each label for a dictionary of features x.

    Parameters

    • x \u2014 'dict'
    • kwargs

    Returns

    dict[base.typing.ClfTarget, float]: A dictionary that associates a probability which each label.

    "},{"location":"api/neighbors/KNNClassifier/#notes","title":"Notes","text":"

    Note that since the window is moving and we keep track of all classes that are added at some point, a class might be returned in a result (with a value of 0) if it is no longer in the window. You can call model.clean_up_classes(), or set cleanup_every to a non-zero value.

    "},{"location":"api/neighbors/KNNRegressor/","title":"KNNRegressor","text":"

    K-Nearest Neighbors regressor.

    Samples are stored using a first-in, first-out strategy. The strategy to perform search queries in the data buffer is defined by the engine parameter. Predictions are obtained by aggregating the values of the closest n_neighbors stored samples with respect to a query sample.

    "},{"location":"api/neighbors/KNNRegressor/#parameters","title":"Parameters","text":"
    • n_neighbors

      Type \u2192 int

      Default \u2192 5

      The number of nearest neighbors to search for.

    • engine

      Type \u2192 BaseNN | None

      Default \u2192 None

      The search engine used to store the instances and perform search queries. Depending on the choose engine, search will be exact or approximate. Please, consult the documentation of each available search engine for more details on its usage. By default, use the SWINN search engine for approximate search queries.

    • aggregation_method

      Type \u2192 str

      Default \u2192 mean

      The method to aggregate the target values of neighbors. | 'mean' | 'median' | 'weighted_mean'

    "},{"location":"api/neighbors/KNNRegressor/#examples","title":"Examples","text":"

    from river import datasets\nfrom river import evaluate\nfrom river import metrics\nfrom river import neighbors\nfrom river import preprocessing\n\ndataset = datasets.TrumpApproval()\n\nmodel = neighbors.KNNRegressor()\nevaluate.progressive_val_score(dataset, model, metrics.RMSE())\n
    RMSE: 1.427743\n

    "},{"location":"api/neighbors/KNNRegressor/#methods","title":"Methods","text":"learn_one

    Fits to a set of features x and a real-valued target y.

    Parameters

    • x \u2014 'dict'
    • y \u2014 'base.typing.RegTarget'

    Returns

    Regressor: self

    predict_one

    Predict the output of features x.

    Parameters

    • x \u2014 'dict'
    • kwargs

    Returns

    base.typing.RegTarget: The prediction.

    "},{"location":"api/neighbors/LazySearch/","title":"LazySearch","text":"

    Exact nearest neighbors using a lazy search estrategy.

    "},{"location":"api/neighbors/LazySearch/#parameters","title":"Parameters","text":"
    • window_size

      Type \u2192 int

      Default \u2192 50

      Size of the sliding window use to search neighbors with.

    • min_distance_keep

      Type \u2192 float

      Default \u2192 0.0

      The minimum distance (similarity) to consider adding a point to the window. E.g., a value of 0.0 will add even exact duplicates.

    • dist_func

      Type \u2192 DistanceFunc | FunctionWrapper | None

      Default \u2192 None

      A distance function which accepts two input items to compare. If not set, use the Minkowski distance with p=2.

    "},{"location":"api/neighbors/LazySearch/#methods","title":"Methods","text":"append

    Add a point to the window, optionally with extra metadata.

    Parameters

    • item \u2014 'typing.Any'
    • extra \u2014 'typing.Any | None' \u2014 defaults to None
    • kwargs

    search

    Find the n_neighbors closest points to item, along with their distances.

    Parameters

    • item \u2014 'typing.Any'
    • n_neighbors \u2014 'int'
    • kwargs

    update

    Update the window with a new point, only added if > min distance.

    If min distance is 0, we do not need to do the calculation. The item (and extra metadata) will not be added to the window if it is too close to an existing point.

    Parameters

    • item \u2014 'typing.Any'
    • n_neighbors \u2014 'int' \u2014 defaults to 1
    • extra \u2014 'typing.Any | None' \u2014 defaults to None

    Returns

    A boolean (true/false) to indicate if the point was added.

    "},{"location":"api/neighbors/LazySearch/#notes","title":"Notes","text":"

    Updates are by default stored by the FIFO (first in first out) method, which means that when the size limit is reached, old samples are dumped to give room for new samples. This is circular, meaning that older points are dumped first. This also gives the implementation a temporal aspect, because older samples are replaced with newer ones.

    The parameter min_dinstance_keep controls the addition of new items to the window - items that are far enough away (> min_distance_keep) are added to the window. Thus a value of 0 indicates that we add all points, and increasing from 0 makes it less likely we will keep a new item.

    "},{"location":"api/neighbors/SWINN/","title":"SWINN","text":"

    Sliding WIndow-based Nearest Neighbor (SWINN) search using Graphs.

    Extends the NNDescent algorithm1 to handle vertex addition and removal in a FIFO data ingestion policy. SWINN builds and keeps a directed graph where edges connect the nearest neighbors. Any distance metric can be used to build the graph. By using a directed graph, the user must set the desired number of neighbors. More neighbors imply more accurate search queries at the cost of increased running time and memory usage. Note that although the number of directed neighbors is limited by the user, there is no direct control on the number of reverse neighbors, i.e., the number of vertices that have an edge to a given vertex.

    The basic idea of SWINN and NNDescent is that \"the neighbor of my neighbors might as well be my neighbor\". Hence, the connections are constantly revisited to improve the graph structure. The algorithm for creating and maintaining the search graph can be described in general lines as follows:

    • Start with a random neighborhood graph;

    • For each node in the search graph: refine the current neighborhood by checking if there are better neighborhood options among the neighbors of the current neighbors;

    • If the total number of neighborhood changes is smaller than a given stopping criterion, then stop.

    SWINN adds strategies to remove vertices from the search graph and pruning redundant edges. SWINN is more efficient when the selected maxlen is greater than 500. For small sized data windows, using the lazy/exhaustive search, i.e., neighbors.LazySearch might be a better idea.

    "},{"location":"api/neighbors/SWINN/#parameters","title":"Parameters","text":"
    • graph_k

      Type \u2192 int

      Default \u2192 20

      The maximum number of direct nearest neighbors each node has.

    • dist_func

      Type \u2192 DistanceFunc | FunctionWrapper | None

      Default \u2192 None

      The distance function used to compare two items. If not set, use the Minkowski distance with p=2.

    • maxlen

      Type \u2192 int

      Default \u2192 1000

      The maximum size of the data buffer.

    • warm_up

      Type \u2192 int

      Default \u2192 500

      How many data instances to observe before starting the search graph.

    • max_candidates

      Type \u2192 int

      Default \u2192 None

      The maximum number of vertices to consider when performing local neighborhood joins. If not set SWINN will use min(50, max(50, self.graph_k)).

    • delta

      Type \u2192 float

      Default \u2192 0.0001

      Early stop parameter for the neighborhood refinement procedure. NNDescent will stop running if the maximum number of iterations is reached or the number of edge changes after an iteration is smaller than or equal to delta * graph_k * n_nodes. In the last expression, n_nodes refers to the number of graph nodes involved in the (local) neighborhood refinement.

    • prune_prob

      Type \u2192 float

      Default \u2192 0.0

      The probability of removing redundant edges. Must be between 0 and 1. If set to zero, no edge will be pruned. When set to one, every potentially redundant edge will be dropped.

    • n_iters

      Type \u2192 int

      Default \u2192 10

      The maximum number of NNDescent iterations to perform to refine the search index.

    • seed

      Type \u2192 int

      Default \u2192 None

      Random seed for reproducibility.

    "},{"location":"api/neighbors/SWINN/#methods","title":"Methods","text":"append

    Add a new item to the search index.

    Data is stored using the FIFO strategy. Both the data buffer and the search graph are updated. The addition of a new item will trigger the removal of the oldest item, if the maximum size was reached. All edges of the removed node are also dropped and safety procedures are applied to ensure its neighbors keep accessible. The addition of a new item also trigger local neighborhood refinement procedures, to ensure the search index is effective and the node degree constraints are met.

    Parameters

    • item \u2014 'typing.Any'
    • kwargs

    connectivity

    Get a list with the size of each connected component in the search graph.

    This metric provides an overview of reachability in the search index by using Kruskal's algorithm to build a forest of connected components. We want our search index to have a single connected component, i.e., the case where we get a list containing a single number which is equal to maxlen. If that is not the case, not every node in the search graph can be reached from any given starting point. You may want to try increasing graph_k to improve connectivity. However, keep in mind the following aspects: 1) computing this metric is a costly operation (\\(O(E\\log V)\\)), where \\(E\\) and \\(V\\) are, respectively, the number of edges and vertices in the search graph; 2) often, connectivity comes at the price of increased computational costs. Tweaking the sample_rate might help in such situations. The best possible scenario is to decrease the value of graph_k while keeping a single connected component.

    Returns

    list[int]: A list of the number of elements in each connected component of the graph.

    search

    Search the underlying nearest neighbor graph given a query item.

    In case not enough samples were observed, i.e., the number of stored samples is smaller than warm_up, then the search switches to a brute force strategy.

    Parameters

    • item \u2014 'typing.Any'
    • n_neighbors \u2014 'int'
    • epsilon \u2014 'float' \u2014 defaults to 0.1
    • kwargs

    Returns

    tuple[list, list]: neighbors, dists

    "},{"location":"api/neighbors/SWINN/#notes","title":"Notes","text":"

    There is an accuracy/speed trade-off between graph_k and sample_rate. To ensure a single connected component, and thus an effective search index, one can increase graph_k. The connectivity method is a helper to determine whether the search index has a single connected component. However, search accuracy might come at the cost of increased memory usage and slow processing. To alleviate that, one can rely on decreasing the sample_rate to avoid exploring all the undirected edges of a node during search queries and local graph refinements. Moreover, the edge pruning procedures also help decreasing the computational costs. Note that, anything that limits the number of explored neighbors or prunes edges might have a negative impact on search accuracy.

    1. Dong, W., Moses, C., & Li, K. (2011, March). Efficient k-nearest neighbor graph construction for generic similarity measures. In Proceedings of the 20th international conference on World wide web (pp. 577-586).\u00a0\u21a9

    "},{"location":"api/neural-net/MLPRegressor/","title":"MLPRegressor","text":"

    Multi-layer Perceptron for regression.

    This model is still work in progress. Here are some features that still need implementing:

    • learn_one and predict_one just cast the input dict to a single row dataframe and then

      call learn_many and predict_many respectively. This is very inefficient. - Not all of the optimizers in the optim module can be used as they are not all vectorised.

    • Emerging and disappearing features are not supported. Each instance/batch has to have the

      same features. - The gradient haven't been numerically checked.

    "},{"location":"api/neural-net/MLPRegressor/#parameters","title":"Parameters","text":"
    • hidden_dims

      The dimensions of the hidden layers. For example, specifying (10, 20) means that there are two hidden layers with 10 and 20 neurons, respectively. Note that the number of layers the network contains is equal to the number of hidden layers plus two (to account for the input and output layers).

    • activations

      The activation functions to use at each layer, including the input and output layers. Therefore you need to specify three activation if you specify one hidden layer.

    • loss

      Type \u2192 optim.losses.Loss | None

      Default \u2192 None

      Loss function. Defaults to optim.losses.Squared.

    • optimizer

      Type \u2192 optim.base.Optimizer | None

      Default \u2192 None

      Optimizer. Defaults to optim.SGD with the learning rate set to 0.01.

    • seed

      Type \u2192 int | None

      Default \u2192 None

      Random number generation seed. Set this for reproducibility.

    "},{"location":"api/neural-net/MLPRegressor/#attributes","title":"Attributes","text":"
    • n_layers

      Return the number of layers in the network. The number of layers is equal to the number of hidden layers plus 2. The 2 accounts for the input layer and the output layer.

    "},{"location":"api/neural-net/MLPRegressor/#examples","title":"Examples","text":"

    from river import datasets\nfrom river import evaluate\nfrom river import neural_net as nn\nfrom river import optim\nfrom river import preprocessing as pp\nfrom river import metrics\n\nmodel = (\n    pp.StandardScaler() |\n    nn.MLPRegressor(\n        hidden_dims=(5,),\n        activations=(\n            nn.activations.ReLU,\n            nn.activations.ReLU,\n            nn.activations.Identity\n        ),\n        optimizer=optim.SGD(1e-3),\n        seed=42\n    )\n)\n\ndataset = datasets.TrumpApproval()\n\nmetric = metrics.MAE()\n\nevaluate.progressive_val_score(dataset, model, metric)\n
    MAE: 1.580578\n

    You can also use this to process mini-batches of data.

    model = (\n    pp.StandardScaler() |\n    nn.MLPRegressor(\n        hidden_dims=(10,),\n        activations=(\n            nn.activations.ReLU,\n            nn.activations.ReLU,\n            nn.activations.ReLU\n        ),\n        optimizer=optim.SGD(1e-4),\n        seed=42\n    )\n)\n\ndataset = datasets.TrumpApproval()\nbatch_size = 32\n\nfor epoch in range(10):\n    for xb in pd.read_csv(dataset.path, chunksize=batch_size):\n        yb = xb.pop('five_thirty_eight')\n        y_pred = model.predict_many(xb)\n        model = model.learn_many(xb, yb)\n\nmodel.predict_many(xb)\n
          five_thirty_eight\n992           39.405231\n993           46.447481\n994           42.121865\n995           40.251148\n996           40.836378\n997           40.893153\n998           40.949927\n999           48.416504\n1000          42.077830\n

    "},{"location":"api/neural-net/MLPRegressor/#methods","title":"Methods","text":"call

    Make predictions.

    Parameters

    • X \u2014 'pd.DataFrame'

    learn_many

    Train the network.

    Parameters

    • X \u2014 'pd.DataFrame'
    • y \u2014 'pd.DataFrame'

    learn_one

    Fits to a set of features x and a real-valued target y.

    Parameters

    • x \u2014 'dict'
    • y \u2014 'base.typing.RegTarget'

    Returns

    Regressor: self

    predict_many predict_one

    Predict the output of features x.

    Parameters

    • x \u2014 'dict'

    Returns

    base.typing.RegTarget: The prediction.

    "},{"location":"api/neural-net/activations/Identity/","title":"Identity","text":"

    Identity activation function.

    "},{"location":"api/neural-net/activations/Identity/#methods","title":"Methods","text":"apply

    Apply the activation function to a layer output z.

    • z

    gradient

    Return the gradient with respect to a layer output z.

    • z

    "},{"location":"api/neural-net/activations/ReLU/","title":"ReLU","text":"

    Rectified Linear Unit (ReLU) activation function.

    "},{"location":"api/neural-net/activations/ReLU/#methods","title":"Methods","text":"apply

    Apply the activation function to a layer output z.

    • z

    gradient

    Return the gradient with respect to a layer output z.

    • z

    "},{"location":"api/neural-net/activations/Sigmoid/","title":"Sigmoid","text":"

    Sigmoid activation function.

    "},{"location":"api/neural-net/activations/Sigmoid/#methods","title":"Methods","text":"apply

    Apply the activation function to a layer output z.

    • z

    gradient

    Return the gradient with respect to a layer output z.

    • z

    "},{"location":"api/optim/AMSGrad/","title":"AMSGrad","text":"

    AMSGrad optimizer.

    "},{"location":"api/optim/AMSGrad/#parameters","title":"Parameters","text":"
    • lr

      Type \u2192 int | float | optim.base.Scheduler

      Default \u2192 0.1

      The learning rate.

    • beta_1

      Default \u2192 0.9

    • beta_2

      Default \u2192 0.999

    • eps

      Default \u2192 1e-08

    • correct_bias

      Default \u2192 True

    "},{"location":"api/optim/AMSGrad/#attributes","title":"Attributes","text":"
    • m (collections.defaultdict)

    • v (collections.defaultdict)

    • v_hat (collections.defaultdict)

    "},{"location":"api/optim/AMSGrad/#examples","title":"Examples","text":"

    from river import datasets\nfrom river import evaluate\nfrom river import linear_model\nfrom river import metrics\nfrom river import optim\nfrom river import preprocessing\n\ndataset = datasets.Phishing()\noptimizer = optim.AMSGrad()\nmodel = (\n    preprocessing.StandardScaler() |\n    linear_model.LogisticRegression(optimizer)\n)\nmetric = metrics.F1()\n\nevaluate.progressive_val_score(dataset, model, metric)\n
    F1: 86.60%\n

    "},{"location":"api/optim/AMSGrad/#methods","title":"Methods","text":"look_ahead

    Updates a weight vector before a prediction is made.

    Parameters: w (dict): A dictionary of weight parameters. The weights are modified in-place. Returns: The updated weights.

    Parameters

    • w \u2014 'dict'

    step

    Updates a weight vector given a gradient.

    Parameters

    • w \u2014 'dict | VectorLike'
    • g \u2014 'dict | VectorLike'

    Returns

    dict | VectorLike: The updated weights.

    1. Reddi, S.J., Kale, S. and Kumar, S., 2019. On the convergence of adam and beyond. arXiv preprint arXiv:1904.09237 \u21a9

    "},{"location":"api/optim/AdaBound/","title":"AdaBound","text":"

    AdaBound optimizer.

    "},{"location":"api/optim/AdaBound/#parameters","title":"Parameters","text":"
    • lr

      Default \u2192 0.001

      The learning rate.

    • beta_1

      Default \u2192 0.9

    • beta_2

      Default \u2192 0.999

    • eps

      Default \u2192 1e-08

    • gamma

      Default \u2192 0.001

    • final_lr

      Default \u2192 0.1

    "},{"location":"api/optim/AdaBound/#attributes","title":"Attributes","text":"
    • m (collections.defaultdict)

    • s (collections.defaultdict)

    "},{"location":"api/optim/AdaBound/#examples","title":"Examples","text":"

    from river import datasets\nfrom river import evaluate\nfrom river import linear_model\nfrom river import metrics\nfrom river import optim\nfrom river import preprocessing\n\ndataset = datasets.Phishing()\noptimizer = optim.AdaBound()\nmodel = (\n    preprocessing.StandardScaler() |\n    linear_model.LogisticRegression(optimizer)\n)\nmetric = metrics.F1()\n\nevaluate.progressive_val_score(dataset, model, metric)\n
    F1: 88.06%\n

    "},{"location":"api/optim/AdaBound/#methods","title":"Methods","text":"look_ahead

    Updates a weight vector before a prediction is made.

    Parameters: w (dict): A dictionary of weight parameters. The weights are modified in-place. Returns: The updated weights.

    Parameters

    • w \u2014 'dict'

    step

    Updates a weight vector given a gradient.

    Parameters

    • w \u2014 'dict | VectorLike'
    • g \u2014 'dict | VectorLike'

    Returns

    dict | VectorLike: The updated weights.

    1. Luo, L., Xiong, Y., Liu, Y. and Sun, X., 2019. Adaptive gradient methods with dynamic bound of learning rate. arXiv preprint arXiv:1902.09843 \u21a9

    "},{"location":"api/optim/AdaDelta/","title":"AdaDelta","text":"

    AdaDelta optimizer.

    "},{"location":"api/optim/AdaDelta/#parameters","title":"Parameters","text":"
    • rho

      Default \u2192 0.95

    • eps

      Default \u2192 1e-08

    "},{"location":"api/optim/AdaDelta/#attributes","title":"Attributes","text":"
    • g2 (collections.defaultdict)

    • s2 (collections.defaultdict)

    "},{"location":"api/optim/AdaDelta/#examples","title":"Examples","text":"

    from river import datasets\nfrom river import evaluate\nfrom river import linear_model\nfrom river import metrics\nfrom river import optim\nfrom river import preprocessing\n\ndataset = datasets.Phishing()\noptimizer = optim.AdaDelta()\nmodel = (\n    preprocessing.StandardScaler() |\n    linear_model.LogisticRegression(optimizer)\n)\nmetric = metrics.F1()\n\nevaluate.progressive_val_score(dataset, model, metric)\n
    F1: 80.56%\n

    "},{"location":"api/optim/AdaDelta/#methods","title":"Methods","text":"look_ahead

    Updates a weight vector before a prediction is made.

    Parameters: w (dict): A dictionary of weight parameters. The weights are modified in-place. Returns: The updated weights.

    Parameters

    • w \u2014 'dict'

    step

    Updates a weight vector given a gradient.

    Parameters

    • w \u2014 'dict | VectorLike'
    • g \u2014 'dict | VectorLike'

    Returns

    dict | VectorLike: The updated weights.

    1. Zeiler, M.D., 2012. Adadelta: an adaptive learning rate method. arXiv preprint arXiv:1212.5701. \u21a9

    "},{"location":"api/optim/AdaGrad/","title":"AdaGrad","text":"

    AdaGrad optimizer.

    "},{"location":"api/optim/AdaGrad/#parameters","title":"Parameters","text":"
    • lr

      Default \u2192 0.1

    • eps

      Default \u2192 1e-08

    "},{"location":"api/optim/AdaGrad/#attributes","title":"Attributes","text":"
    • g2 (collections.defaultdict)
    "},{"location":"api/optim/AdaGrad/#examples","title":"Examples","text":"

    from river import datasets\nfrom river import evaluate\nfrom river import linear_model\nfrom river import metrics\nfrom river import optim\nfrom river import preprocessing\n\ndataset = datasets.Phishing()\noptimizer = optim.AdaGrad()\nmodel = (\n    preprocessing.StandardScaler() |\n    linear_model.LogisticRegression(optimizer)\n)\nmetric = metrics.F1()\n\nevaluate.progressive_val_score(dataset, model, metric)\n
    F1: 88.01%\n

    "},{"location":"api/optim/AdaGrad/#methods","title":"Methods","text":"look_ahead

    Updates a weight vector before a prediction is made.

    Parameters: w (dict): A dictionary of weight parameters. The weights are modified in-place. Returns: The updated weights.

    Parameters

    • w \u2014 'dict'

    step

    Updates a weight vector given a gradient.

    Parameters

    • w \u2014 'dict | VectorLike'
    • g \u2014 'dict | VectorLike'

    Returns

    dict | VectorLike: The updated weights.

    1. Duchi, J., Hazan, E. and Singer, Y., 2011. Adaptive subgradient methods for online learning and stochastic optimization. Journal of machine learning research, 12(Jul), pp.2121-2159. \u21a9

    "},{"location":"api/optim/AdaMax/","title":"AdaMax","text":"

    AdaMax optimizer.

    "},{"location":"api/optim/AdaMax/#parameters","title":"Parameters","text":"
    • lr

      Default \u2192 0.1

    • beta_1

      Default \u2192 0.9

    • beta_2

      Default \u2192 0.999

    • eps

      Default \u2192 1e-08

    "},{"location":"api/optim/AdaMax/#attributes","title":"Attributes","text":"
    • m (collections.defaultdict)

    • v (collections.defaultdict)

    "},{"location":"api/optim/AdaMax/#examples","title":"Examples","text":"

    from river import datasets\nfrom river import evaluate\nfrom river import linear_model\nfrom river import metrics\nfrom river import optim\nfrom river import preprocessing\n\ndataset = datasets.Phishing()\noptimizer = optim.AdaMax()\nmodel = (\n    preprocessing.StandardScaler() |\n    linear_model.LogisticRegression(optimizer)\n)\nmetric = metrics.F1()\n\nevaluate.progressive_val_score(dataset, model, metric)\n
    F1: 87.61%\n

    "},{"location":"api/optim/AdaMax/#methods","title":"Methods","text":"look_ahead

    Updates a weight vector before a prediction is made.

    Parameters: w (dict): A dictionary of weight parameters. The weights are modified in-place. Returns: The updated weights.

    Parameters

    • w \u2014 'dict'

    step

    Updates a weight vector given a gradient.

    Parameters

    • w \u2014 'dict | VectorLike'
    • g \u2014 'dict | VectorLike'

    Returns

    dict | VectorLike: The updated weights.

    1. Kingma, D.P. and Ba, J., 2014. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980. \u21a9

    2. Ruder, S., 2016. An overview of gradient descent optimization algorithms. arXiv preprint arXiv:1609.04747. \u21a9

    "},{"location":"api/optim/Adam/","title":"Adam","text":"

    Adam optimizer.

    "},{"location":"api/optim/Adam/#parameters","title":"Parameters","text":"
    • lr

      Default \u2192 0.1

    • beta_1

      Default \u2192 0.9

    • beta_2

      Default \u2192 0.999

    • eps

      Default \u2192 1e-08

    "},{"location":"api/optim/Adam/#attributes","title":"Attributes","text":"
    • m (collections.defaultdict)

    • v (collections.defaultdict)

    "},{"location":"api/optim/Adam/#examples","title":"Examples","text":"

    from river import datasets\nfrom river import evaluate\nfrom river import linear_model\nfrom river import metrics\nfrom river import optim\nfrom river import preprocessing\n\ndataset = datasets.Phishing()\noptimizer = optim.Adam()\nmodel = (\n    preprocessing.StandardScaler() |\n    linear_model.LogisticRegression(optimizer)\n)\nmetric = metrics.F1()\n\nevaluate.progressive_val_score(dataset, model, metric)\n
    F1: 86.52%\n

    "},{"location":"api/optim/Adam/#methods","title":"Methods","text":"look_ahead

    Updates a weight vector before a prediction is made.

    Parameters: w (dict): A dictionary of weight parameters. The weights are modified in-place. Returns: The updated weights.

    Parameters

    • w \u2014 'dict'

    step

    Updates a weight vector given a gradient.

    Parameters

    • w \u2014 'dict | VectorLike'
    • g \u2014 'dict | VectorLike'

    Returns

    dict | VectorLike: The updated weights.

    1. Kingma, D.P. and Ba, J., 2014. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980. \u21a9

    "},{"location":"api/optim/Averager/","title":"Averager","text":"

    Averaged stochastic gradient descent.

    This is a wrapper that can be applied to any stochastic gradient descent optimiser. Note that this implementation differs than what may be found elsewhere. Essentially, the average of the weights is usually only used at the end of the optimisation, once all the data has been seen. However, in this implementation the optimiser returns the current averaged weights.

    "},{"location":"api/optim/Averager/#parameters","title":"Parameters","text":"
    • optimizer

      Type \u2192 optim.base.Optimizer

      An optimizer for which the produced weights will be averaged.

    • start

      Type \u2192 int

      Default \u2192 0

      Indicates the number of iterations to wait before starting the average. Essentially, nothing happens differently before the number of iterations reaches this value.

    "},{"location":"api/optim/Averager/#attributes","title":"Attributes","text":"
    • learning_rate
    "},{"location":"api/optim/Averager/#examples","title":"Examples","text":"

    from river import datasets\nfrom river import evaluate\nfrom river import linear_model\nfrom river import metrics\nfrom river import optim\nfrom river import preprocessing\n\ndataset = datasets.Phishing()\noptimizer = optim.Averager(optim.SGD(0.01), 100)\nmodel = (\n    preprocessing.StandardScaler() |\n    linear_model.LogisticRegression(optimizer)\n)\nmetric = metrics.F1()\n\nevaluate.progressive_val_score(dataset, model, metric)\n
    F1: 87.97%\n

    "},{"location":"api/optim/Averager/#methods","title":"Methods","text":"look_ahead

    Updates a weight vector before a prediction is made.

    Parameters: w (dict): A dictionary of weight parameters. The weights are modified in-place. Returns: The updated weights.

    Parameters

    • w \u2014 'dict'

    step

    Updates a weight vector given a gradient.

    Parameters

    • w \u2014 'dict | VectorLike'
    • g \u2014 'dict | VectorLike'

    Returns

    dict | VectorLike: The updated weights.

    1. Bottou, L., 2010. Large-scale machine learning with stochastic gradient descent. In Proceedings of COMPSTAT'2010 (pp. 177-186). Physica-Verlag HD. \u21a9

    2. Stochastic Algorithms for One-Pass Learning slides by L\u00e9on Bottou \u21a9

    3. Xu, W., 2011. Towards optimal one pass large scale learning with averaged stochastic gradient descent. arXiv preprint arXiv:1107.2490. \u21a9

    "},{"location":"api/optim/FTRLProximal/","title":"FTRLProximal","text":"

    FTRL-Proximal optimizer.

    "},{"location":"api/optim/FTRLProximal/#parameters","title":"Parameters","text":"
    • alpha

      Default \u2192 0.05

    • beta

      Default \u2192 1.0

    • l1

      Default \u2192 0.0

    • l2

      Default \u2192 1.0

    "},{"location":"api/optim/FTRLProximal/#attributes","title":"Attributes","text":"
    • z (collections.defaultdict)

    • n (collections.defaultdict)

    "},{"location":"api/optim/FTRLProximal/#examples","title":"Examples","text":"

    from river import datasets\nfrom river import evaluate\nfrom river import linear_model\nfrom river import metrics\nfrom river import optim\nfrom river import preprocessing\n\ndataset = datasets.Phishing()\noptimizer = optim.FTRLProximal()\nmodel = (\n    preprocessing.StandardScaler() |\n    linear_model.LogisticRegression(optimizer)\n)\nmetric = metrics.F1()\n\nevaluate.progressive_val_score(dataset, model, metric)\n
    F1: 87.56%\n

    "},{"location":"api/optim/FTRLProximal/#methods","title":"Methods","text":"look_ahead

    Updates a weight vector before a prediction is made.

    Parameters: w (dict): A dictionary of weight parameters. The weights are modified in-place. Returns: The updated weights.

    Parameters

    • w \u2014 'dict'

    step

    Updates a weight vector given a gradient.

    Parameters

    • w \u2014 'dict | VectorLike'
    • g \u2014 'dict | VectorLike'

    Returns

    dict | VectorLike: The updated weights.

    1. McMahan, H.B., Holt, G., Sculley, D., Young, M., Ebner, D., Grady, J., Nie, L., Phillips, T., Davydov, E., Golovin, D. and Chikkerur, S., 2013, August. Ad click prediction: a view from the trenches. In Proceedings of the 19th ACM SIGKDD international conference on Knowledge discovery and data mining (pp. 1222-1230) \u21a9

    2. Tensorflow's FtrlOptimizer \u21a9

    "},{"location":"api/optim/Momentum/","title":"Momentum","text":"

    Momentum optimizer.

    "},{"location":"api/optim/Momentum/#parameters","title":"Parameters","text":"
    • lr

      Default \u2192 0.1

    • rho

      Default \u2192 0.9

    "},{"location":"api/optim/Momentum/#attributes","title":"Attributes","text":"
    • learning_rate
    "},{"location":"api/optim/Momentum/#examples","title":"Examples","text":"

    from river import datasets\nfrom river import evaluate\nfrom river import linear_model\nfrom river import metrics\nfrom river import optim\nfrom river import preprocessing\n\ndataset = datasets.Phishing()\noptimizer = optim.Momentum()\nmodel = (\n    preprocessing.StandardScaler() |\n    linear_model.LogisticRegression(optimizer)\n)\nmetric = metrics.F1()\n\nevaluate.progressive_val_score(dataset, model, metric)\n
    F1: 84.09%\n

    "},{"location":"api/optim/Momentum/#methods","title":"Methods","text":"look_ahead

    Updates a weight vector before a prediction is made.

    Parameters: w (dict): A dictionary of weight parameters. The weights are modified in-place. Returns: The updated weights.

    Parameters

    • w \u2014 'dict'

    step

    Updates a weight vector given a gradient.

    Parameters

    • w \u2014 'dict | VectorLike'
    • g \u2014 'dict | VectorLike'

    Returns

    dict | VectorLike: The updated weights.

    "},{"location":"api/optim/Nadam/","title":"Nadam","text":"

    Nadam optimizer.

    "},{"location":"api/optim/Nadam/#parameters","title":"Parameters","text":"
    • lr

      Default \u2192 0.1

    • beta_1

      Default \u2192 0.9

    • beta_2

      Default \u2192 0.999

    • eps

      Default \u2192 1e-08

    "},{"location":"api/optim/Nadam/#attributes","title":"Attributes","text":"
    • learning_rate
    "},{"location":"api/optim/Nadam/#examples","title":"Examples","text":"

    from river import datasets\nfrom river import evaluate\nfrom river import linear_model\nfrom river import metrics\nfrom river import optim\nfrom river import preprocessing\n\ndataset = datasets.Phishing()\noptimizer = optim.Nadam()\nmodel = (\n    preprocessing.StandardScaler() |\n    linear_model.LogisticRegression(optimizer)\n)\nmetric = metrics.F1()\n\nevaluate.progressive_val_score(dataset, model, metric)\n
    F1: 86.60%\n

    "},{"location":"api/optim/Nadam/#methods","title":"Methods","text":"look_ahead

    Updates a weight vector before a prediction is made.

    Parameters: w (dict): A dictionary of weight parameters. The weights are modified in-place. Returns: The updated weights.

    Parameters

    • w \u2014 'dict'

    step

    Updates a weight vector given a gradient.

    Parameters

    • w \u2014 'dict | VectorLike'
    • g \u2014 'dict | VectorLike'

    Returns

    dict | VectorLike: The updated weights.

    1. Nadam: A combination of adam and nesterov \u21a9

    "},{"location":"api/optim/NesterovMomentum/","title":"NesterovMomentum","text":"

    Nesterov Momentum optimizer.

    "},{"location":"api/optim/NesterovMomentum/#parameters","title":"Parameters","text":"
    • lr

      Default \u2192 0.1

    • rho

      Default \u2192 0.9

    "},{"location":"api/optim/NesterovMomentum/#attributes","title":"Attributes","text":"
    • learning_rate
    "},{"location":"api/optim/NesterovMomentum/#examples","title":"Examples","text":"

    from river import datasets\nfrom river import evaluate\nfrom river import linear_model\nfrom river import metrics\nfrom river import optim\nfrom river import preprocessing\n\ndataset = datasets.Phishing()\noptimizer = optim.NesterovMomentum()\nmodel = (\n    preprocessing.StandardScaler() |\n    linear_model.LogisticRegression(optimizer)\n)\nmetric = metrics.F1()\n\nevaluate.progressive_val_score(dataset, model, metric)\n
    F1: 84.22%\n

    "},{"location":"api/optim/NesterovMomentum/#methods","title":"Methods","text":"look_ahead

    Updates a weight vector before a prediction is made.

    Parameters: w (dict): A dictionary of weight parameters. The weights are modified in-place. Returns: The updated weights.

    Parameters

    • w \u2014 'dict'

    step

    Updates a weight vector given a gradient.

    Parameters

    • w \u2014 'dict | VectorLike'
    • g \u2014 'dict | VectorLike'

    Returns

    dict | VectorLike: The updated weights.

    "},{"location":"api/optim/RMSProp/","title":"RMSProp","text":"

    RMSProp optimizer.

    "},{"location":"api/optim/RMSProp/#parameters","title":"Parameters","text":"
    • lr

      Default \u2192 0.1

    • rho

      Default \u2192 0.9

    • eps

      Default \u2192 1e-08

    "},{"location":"api/optim/RMSProp/#attributes","title":"Attributes","text":"
    • learning_rate
    "},{"location":"api/optim/RMSProp/#examples","title":"Examples","text":"

    from river import datasets\nfrom river import evaluate\nfrom river import linear_model\nfrom river import metrics\nfrom river import optim\nfrom river import preprocessing\n\ndataset = datasets.Phishing()\noptimizer = optim.RMSProp()\nmodel = (\n    preprocessing.StandardScaler() |\n    linear_model.LogisticRegression(optimizer)\n)\nmetric = metrics.F1()\n\nevaluate.progressive_val_score(dataset, model, metric)\n
    F1: 87.24%\n

    "},{"location":"api/optim/RMSProp/#methods","title":"Methods","text":"look_ahead

    Updates a weight vector before a prediction is made.

    Parameters: w (dict): A dictionary of weight parameters. The weights are modified in-place. Returns: The updated weights.

    Parameters

    • w \u2014 'dict'

    step

    Updates a weight vector given a gradient.

    Parameters

    • w \u2014 'dict | VectorLike'
    • g \u2014 'dict | VectorLike'

    Returns

    dict | VectorLike: The updated weights.

    1. Divide the gradient by a running average of itsrecent magnitude \u21a9

    "},{"location":"api/optim/SGD/","title":"SGD","text":"

    Plain stochastic gradient descent.

    "},{"location":"api/optim/SGD/#parameters","title":"Parameters","text":"
    • lr

      Default \u2192 0.01

    "},{"location":"api/optim/SGD/#attributes","title":"Attributes","text":"
    • learning_rate
    "},{"location":"api/optim/SGD/#examples","title":"Examples","text":"

    from river import datasets\nfrom river import evaluate\nfrom river import linear_model\nfrom river import metrics\nfrom river import optim\nfrom river import preprocessing\n\ndataset = datasets.Phishing()\noptimizer = optim.SGD(0.1)\nmodel = (\n    preprocessing.StandardScaler() |\n    linear_model.LogisticRegression(optimizer)\n)\nmetric = metrics.F1()\n\nevaluate.progressive_val_score(dataset, model, metric)\n
    F1: 87.85%\n

    "},{"location":"api/optim/SGD/#methods","title":"Methods","text":"look_ahead

    Updates a weight vector before a prediction is made.

    Parameters: w (dict): A dictionary of weight parameters. The weights are modified in-place. Returns: The updated weights.

    Parameters

    • w \u2014 'dict'

    step

    Updates a weight vector given a gradient.

    Parameters

    • w \u2014 'dict | VectorLike'
    • g \u2014 'dict | VectorLike'

    Returns

    dict | VectorLike: The updated weights.

    1. Robbins, H. and Monro, S., 1951. A stochastic approximation method. The annals of mathematical statistics, pp.400-407 \u21a9

    "},{"location":"api/optim/base/Initializer/","title":"Initializer","text":"

    An initializer is used to set initial weights in a model.

    "},{"location":"api/optim/base/Initializer/#methods","title":"Methods","text":"call

    Returns a fresh set of weights.

    Parameters

    • shape \u2014 defaults to 1

    "},{"location":"api/optim/base/Loss/","title":"Loss","text":"

    Base class for all loss functions.

    "},{"location":"api/optim/base/Loss/#methods","title":"Methods","text":"call

    Returns the loss.

    Parameters

    • y_true
    • y_pred

    Returns

    The loss(es).

    gradient

    Return the gradient with respect to y_pred.

    Parameters

    • y_true
    • y_pred

    Returns

    The gradient(s).

    mean_func

    Mean function.

    This is the inverse of the link function. Typically, a loss function takes as input the raw output of a model. In the case of classification, the raw output would be logits. The mean function can be used to convert the raw output into a value that makes sense to the user, such as a probability.

    Parameters

    • y_pred

    Returns

    The adjusted prediction(s).

    "},{"location":"api/optim/base/Optimizer/","title":"Optimizer","text":"

    Optimizer interface.

    Every optimizer inherits from this base interface.

    "},{"location":"api/optim/base/Optimizer/#parameters","title":"Parameters","text":"
    • lr

      Type \u2192 int | float | Scheduler

    "},{"location":"api/optim/base/Optimizer/#attributes","title":"Attributes","text":"
    • learning_rate (float)

      Returns the current learning rate value.

    "},{"location":"api/optim/base/Optimizer/#methods","title":"Methods","text":"look_ahead

    Updates a weight vector before a prediction is made.

    Parameters: w (dict): A dictionary of weight parameters. The weights are modified in-place. Returns: The updated weights.

    Parameters

    • w \u2014 'dict'

    step

    Updates a weight vector given a gradient.

    Parameters

    • w \u2014 'dict | VectorLike'
    • g \u2014 'dict | VectorLike'

    Returns

    dict | VectorLike: The updated weights.

    "},{"location":"api/optim/base/Scheduler/","title":"Scheduler","text":"

    Can be used to program the learning rate schedule of an optim.base.Optimizer.

    "},{"location":"api/optim/base/Scheduler/#methods","title":"Methods","text":"get

    Returns the learning rate at a given iteration.

    Parameters

    • t \u2014 'int'

    "},{"location":"api/optim/initializers/Constant/","title":"Constant","text":"

    Constant initializer which always returns the same value.

    "},{"location":"api/optim/initializers/Constant/#parameters","title":"Parameters","text":"
    • value

      Type \u2192 float

    "},{"location":"api/optim/initializers/Constant/#examples","title":"Examples","text":"

    from river import optim\n\ninit = optim.initializers.Constant(value=3.14)\n\ninit(shape=1)\n
    3.14\n

    init(shape=2)\n
    array([3.14, 3.14])\n

    "},{"location":"api/optim/initializers/Constant/#methods","title":"Methods","text":"call

    Returns a fresh set of weights.

    Parameters

    • shape \u2014 defaults to 1

    "},{"location":"api/optim/initializers/Normal/","title":"Normal","text":"

    Random normal initializer which simulate a normal distribution with specified parameters.

    "},{"location":"api/optim/initializers/Normal/#parameters","title":"Parameters","text":"
    • mu

      Default \u2192 0.0

      The mean of the normal distribution

    • sigma

      Default \u2192 1.0

      The standard deviation of the normal distribution

    • seed

      Type \u2192 int | None

      Default \u2192 None

      Random number generation seed that can be set for reproducibility.

    "},{"location":"api/optim/initializers/Normal/#examples","title":"Examples","text":"

    from river import optim\n\ninit = optim.initializers.Normal(mu=0, sigma=1, seed=42)\n\ninit(shape=1)\n
    0.496714\n

    init(shape=2)\n
    array([-0.1382643 ,  0.64768854])\n

    "},{"location":"api/optim/initializers/Normal/#methods","title":"Methods","text":"call

    Returns a fresh set of weights.

    Parameters

    • shape \u2014 defaults to 1

    "},{"location":"api/optim/initializers/Zeros/","title":"Zeros","text":"

    Constant initializer which always returns zeros.

    "},{"location":"api/optim/initializers/Zeros/#examples","title":"Examples","text":"

    from river import optim\n\ninit = optim.initializers.Zeros()\n\ninit(shape=1)\n
    0.0\n

    init(shape=2)\n
    array([0., 0.])\n

    "},{"location":"api/optim/initializers/Zeros/#methods","title":"Methods","text":"call

    Returns a fresh set of weights.

    Parameters

    • shape \u2014 defaults to 1

    "},{"location":"api/optim/losses/Absolute/","title":"Absolute","text":"

    Absolute loss, also known as the mean absolute error or L1 loss.

    Mathematically, it is defined as

    \\[L = |p_i - y_i|\\]

    It's gradient w.r.t. to \\(p_i\\) is

    \\[\\frac{\\partial L}{\\partial p_i} = sgn(p_i - y_i)\\]"},{"location":"api/optim/losses/Absolute/#examples","title":"Examples","text":"

    from river import optim\n\nloss = optim.losses.Absolute()\nloss(-42, 42)\n
    84\n
    loss.gradient(1, 2)\n
    1\n
    loss.gradient(2, 1)\n
    -1\n

    "},{"location":"api/optim/losses/Absolute/#methods","title":"Methods","text":"call

    Returns the loss.

    Parameters

    • y_true
    • y_pred

    Returns

    The loss(es).

    gradient

    Return the gradient with respect to y_pred.

    Parameters

    • y_true
    • y_pred

    Returns

    The gradient(s).

    mean_func

    Mean function.

    This is the inverse of the link function. Typically, a loss function takes as input the raw output of a model. In the case of classification, the raw output would be logits. The mean function can be used to convert the raw output into a value that makes sense to the user, such as a probability.

    Parameters

    • y_pred

    Returns

    The adjusted prediction(s).

    "},{"location":"api/optim/losses/BinaryFocalLoss/","title":"BinaryFocalLoss","text":"

    Binary focal loss.

    This implements the \"star\" algorithm from the appendix of the focal loss paper.

    "},{"location":"api/optim/losses/BinaryFocalLoss/#parameters","title":"Parameters","text":"
    • gamma

      Default \u2192 2

    • beta

      Default \u2192 1

    "},{"location":"api/optim/losses/BinaryFocalLoss/#methods","title":"Methods","text":"call

    Returns the loss.

    Parameters

    • y_true
    • y_pred

    Returns

    The loss(es).

    gradient

    Return the gradient with respect to y_pred.

    Parameters

    • y_true
    • y_pred

    Returns

    The gradient(s).

    mean_func

    Mean function.

    This is the inverse of the link function. Typically, a loss function takes as input the raw output of a model. In the case of classification, the raw output would be logits. The mean function can be used to convert the raw output into a value that makes sense to the user, such as a probability.

    Parameters

    • y_pred

    Returns

    The adjusted prediction(s).

    1. Lin, T.Y., Goyal, P., Girshick, R., He, K. and Doll\u00e1r, P., 2017. Focal loss for dense object detection. In Proceedings of the IEEE international conference on computer vision (pp. 2980-2988)

    "},{"location":"api/optim/losses/BinaryLoss/","title":"BinaryLoss","text":"

    A loss appropriate for binary classification tasks.

    "},{"location":"api/optim/losses/BinaryLoss/#methods","title":"Methods","text":"call

    Returns the loss.

    Parameters

    • y_true
    • y_pred

    Returns

    The loss(es).

    gradient

    Return the gradient with respect to y_pred.

    Parameters

    • y_true
    • y_pred

    Returns

    The gradient(s).

    mean_func

    Mean function.

    This is the inverse of the link function. Typically, a loss function takes as input the raw output of a model. In the case of classification, the raw output would be logits. The mean function can be used to convert the raw output into a value that makes sense to the user, such as a probability.

    Parameters

    • y_pred

    Returns

    The adjusted prediction(s).

    "},{"location":"api/optim/losses/Cauchy/","title":"Cauchy","text":"

    Cauchy loss function.

    "},{"location":"api/optim/losses/Cauchy/#parameters","title":"Parameters","text":"
    • C

      Default \u2192 80

    "},{"location":"api/optim/losses/Cauchy/#methods","title":"Methods","text":"call

    Returns the loss.

    Parameters

    • y_true
    • y_pred

    Returns

    The loss(es).

    gradient

    Return the gradient with respect to y_pred.

    Parameters

    • y_true
    • y_pred

    Returns

    The gradient(s).

    mean_func

    Mean function.

    This is the inverse of the link function. Typically, a loss function takes as input the raw output of a model. In the case of classification, the raw output would be logits. The mean function can be used to convert the raw output into a value that makes sense to the user, such as a probability.

    Parameters

    • y_pred

    Returns

    The adjusted prediction(s).

    1. \"Effect of MAE\" Kaggle discussion \u21a9

    2. Paris Madness Kaggle kernel \u21a9

    "},{"location":"api/optim/losses/CrossEntropy/","title":"CrossEntropy","text":"

    Cross entropy loss.

    This is a generalization of logistic loss to multiple classes.

    "},{"location":"api/optim/losses/CrossEntropy/#parameters","title":"Parameters","text":"
    • class_weight

      Type \u2192 dict[base.typing.ClfTarget, float] | None

      Default \u2192 None

      A dictionary that indicates what weight to associate with each class.

    "},{"location":"api/optim/losses/CrossEntropy/#examples","title":"Examples","text":"

    from river import optim\n\ny_true = [0, 1, 2, 2]\ny_pred = [\n    {0: 0.29450637, 1: 0.34216758, 2: 0.36332605},\n    {0: 0.21290077, 1: 0.32728332, 2: 0.45981591},\n    {0: 0.42860913, 1: 0.33380113, 2: 0.23758974},\n    {0: 0.44941979, 1: 0.32962558, 2: 0.22095463}\n]\n\nloss = optim.losses.CrossEntropy()\n\nfor yt, yp in zip(y_true, y_pred):\n    print(loss(yt, yp))\n
    1.222454\n1.116929\n1.437209\n1.509797\n

    for yt, yp in zip(y_true, y_pred):\n    print(loss.gradient(yt, yp))\n
    {0: -0.70549363, 1: 0.34216758, 2: 0.36332605}\n{0: 0.21290077, 1: -0.67271668, 2: 0.45981591}\n{0: 0.42860913, 1: 0.33380113, 2: -0.76241026}\n{0: 0.44941979, 1: 0.32962558, 2: -0.77904537}\n

    "},{"location":"api/optim/losses/CrossEntropy/#methods","title":"Methods","text":"call

    Returns the loss.

    Parameters

    • y_true
    • y_pred

    Returns

    The loss(es).

    gradient

    Return the gradient with respect to y_pred.

    Parameters

    • y_true
    • y_pred

    Returns

    The gradient(s).

    mean_func

    Mean function.

    This is the inverse of the link function. Typically, a loss function takes as input the raw output of a model. In the case of classification, the raw output would be logits. The mean function can be used to convert the raw output into a value that makes sense to the user, such as a probability.

    Parameters

    • y_pred

    Returns

    The adjusted prediction(s).

    1. What is Softmax regression and how is it related to Logistic regression? \u21a9

    "},{"location":"api/optim/losses/EpsilonInsensitiveHinge/","title":"EpsilonInsensitiveHinge","text":"

    Epsilon-insensitive hinge loss.

    "},{"location":"api/optim/losses/EpsilonInsensitiveHinge/#parameters","title":"Parameters","text":"
    • eps

      Default \u2192 0.1

    "},{"location":"api/optim/losses/EpsilonInsensitiveHinge/#methods","title":"Methods","text":"call

    Returns the loss.

    Parameters

    • y_true
    • y_pred

    Returns

    The loss(es).

    gradient

    Return the gradient with respect to y_pred.

    Parameters

    • y_true
    • y_pred

    Returns

    The gradient(s).

    mean_func

    Mean function.

    This is the inverse of the link function. Typically, a loss function takes as input the raw output of a model. In the case of classification, the raw output would be logits. The mean function can be used to convert the raw output into a value that makes sense to the user, such as a probability.

    Parameters

    • y_pred

    Returns

    The adjusted prediction(s).

    "},{"location":"api/optim/losses/Hinge/","title":"Hinge","text":"

    Computes the hinge loss.

    Mathematically, it is defined as

    \\[L = max(0, 1 - p_i * y_i)\\]

    It's gradient w.r.t. to \\(p_i\\) is

    \\[ \\\\frac{\\\\partial L}{\\\\partial y_i} = \\\\left\\{ \\\\begin{array}{ll} \\\\ 0 & p_iy_i \\geqslant 1 \\\\\\\\ \\\\ - y_i & p_iy_i < 1 \\\\end{array} \\\\right. \\]"},{"location":"api/optim/losses/Hinge/#parameters","title":"Parameters","text":"
    • threshold

      Default \u2192 1.0

      Margin threshold. 1 yield the loss used in SVMs, whilst 0 is equivalent to the loss used in the Perceptron algorithm.

    "},{"location":"api/optim/losses/Hinge/#examples","title":"Examples","text":"

    from river import optim\n\nloss = optim.losses.Hinge(threshold=1)\nloss(1, .2)\n
    0.8\n

    loss.gradient(1, .2)\n
    -1\n

    "},{"location":"api/optim/losses/Hinge/#methods","title":"Methods","text":"call

    Returns the loss.

    Parameters

    • y_true
    • y_pred

    Returns

    The loss(es).

    gradient

    Return the gradient with respect to y_pred.

    Parameters

    • y_true
    • y_pred

    Returns

    The gradient(s).

    mean_func

    Mean function.

    This is the inverse of the link function. Typically, a loss function takes as input the raw output of a model. In the case of classification, the raw output would be logits. The mean function can be used to convert the raw output into a value that makes sense to the user, such as a probability.

    Parameters

    • y_pred

    Returns

    The adjusted prediction(s).

    "},{"location":"api/optim/losses/Huber/","title":"Huber","text":"

    Huber loss.

    Variant of the squared loss that is robust to outliers.

    "},{"location":"api/optim/losses/Huber/#parameters","title":"Parameters","text":"
    • epsilon

      Default \u2192 0.1

    "},{"location":"api/optim/losses/Huber/#methods","title":"Methods","text":"call

    Returns the loss.

    Parameters

    • y_true
    • y_pred

    Returns

    The loss(es).

    gradient

    Return the gradient with respect to y_pred.

    Parameters

    • y_true
    • y_pred

    Returns

    The gradient(s).

    mean_func

    Mean function.

    This is the inverse of the link function. Typically, a loss function takes as input the raw output of a model. In the case of classification, the raw output would be logits. The mean function can be used to convert the raw output into a value that makes sense to the user, such as a probability.

    Parameters

    • y_pred

    Returns

    The adjusted prediction(s).

    1. Huber loss function - Wikipedia

    "},{"location":"api/optim/losses/Log/","title":"Log","text":"

    Logarithmic loss.

    This loss function expects each provided y_pred to be a logit. In other words if must be the raw output of a linear model or a neural network.

    "},{"location":"api/optim/losses/Log/#parameters","title":"Parameters","text":"
    • weight_pos

      Default \u2192 1.0

    • weight_neg

      Default \u2192 1.0

    "},{"location":"api/optim/losses/Log/#methods","title":"Methods","text":"call

    Returns the loss.

    Parameters

    • y_true
    • y_pred

    Returns

    The loss(es).

    gradient

    Return the gradient with respect to y_pred.

    Parameters

    • y_true
    • y_pred

    Returns

    The gradient(s).

    mean_func

    Mean function.

    This is the inverse of the link function. Typically, a loss function takes as input the raw output of a model. In the case of classification, the raw output would be logits. The mean function can be used to convert the raw output into a value that makes sense to the user, such as a probability.

    Parameters

    • y_pred

    Returns

    The adjusted prediction(s).

    1. Logit Wikipedia page \u21a9

    "},{"location":"api/optim/losses/MultiClassLoss/","title":"MultiClassLoss","text":"

    A loss appropriate for multi-class classification tasks.

    "},{"location":"api/optim/losses/MultiClassLoss/#methods","title":"Methods","text":"call

    Returns the loss.

    Parameters

    • y_true
    • y_pred

    Returns

    The loss(es).

    gradient

    Return the gradient with respect to y_pred.

    Parameters

    • y_true
    • y_pred

    Returns

    The gradient(s).

    mean_func

    Mean function.

    This is the inverse of the link function. Typically, a loss function takes as input the raw output of a model. In the case of classification, the raw output would be logits. The mean function can be used to convert the raw output into a value that makes sense to the user, such as a probability.

    Parameters

    • y_pred

    Returns

    The adjusted prediction(s).

    "},{"location":"api/optim/losses/Poisson/","title":"Poisson","text":"

    Poisson loss.

    The Poisson loss is usually more suited for regression with count data than the squared loss.

    Mathematically, it is defined as

    \\[L = exp(p_i) - y_i \\times p_i\\]

    It's gradient w.r.t. to \\(p_i\\) is

    \\[\\frac{\\partial L}{\\partial p_i} = exp(p_i) - y_i\\]"},{"location":"api/optim/losses/Poisson/#methods","title":"Methods","text":"call

    Returns the loss.

    Parameters

    • y_true
    • y_pred

    Returns

    The loss(es).

    gradient

    Return the gradient with respect to y_pred.

    Parameters

    • y_true
    • y_pred

    Returns

    The gradient(s).

    mean_func

    Mean function.

    This is the inverse of the link function. Typically, a loss function takes as input the raw output of a model. In the case of classification, the raw output would be logits. The mean function can be used to convert the raw output into a value that makes sense to the user, such as a probability.

    Parameters

    • y_pred

    Returns

    The adjusted prediction(s).

    "},{"location":"api/optim/losses/Quantile/","title":"Quantile","text":"

    Quantile loss.

    "},{"location":"api/optim/losses/Quantile/#parameters","title":"Parameters","text":"
    • alpha

      Default \u2192 0.5

      Desired quantile to attain.

    "},{"location":"api/optim/losses/Quantile/#examples","title":"Examples","text":"

    from river import optim\n\nloss = optim.losses.Quantile(0.5)\nloss(1, 3)\n
    1.0\n

    loss.gradient(1, 3)\n
    0.5\n

    loss.gradient(3, 1)\n
    -0.5\n

    "},{"location":"api/optim/losses/Quantile/#methods","title":"Methods","text":"call

    Returns the loss.

    Parameters

    • y_true
    • y_pred

    Returns

    The loss(es).

    gradient

    Return the gradient with respect to y_pred.

    Parameters

    • y_true
    • y_pred

    Returns

    The gradient(s).

    mean_func

    Mean function.

    This is the inverse of the link function. Typically, a loss function takes as input the raw output of a model. In the case of classification, the raw output would be logits. The mean function can be used to convert the raw output into a value that makes sense to the user, such as a probability.

    Parameters

    • y_pred

    Returns

    The adjusted prediction(s).

    1. Wikipedia article on quantile regression \u21a9

    2. Derivative from WolframAlpha \u21a9

    "},{"location":"api/optim/losses/RegressionLoss/","title":"RegressionLoss","text":"

    A loss appropriate for regression tasks.

    "},{"location":"api/optim/losses/RegressionLoss/#methods","title":"Methods","text":"call

    Returns the loss.

    Parameters

    • y_true
    • y_pred

    Returns

    The loss(es).

    gradient

    Return the gradient with respect to y_pred.

    Parameters

    • y_true
    • y_pred

    Returns

    The gradient(s).

    mean_func

    Mean function.

    This is the inverse of the link function. Typically, a loss function takes as input the raw output of a model. In the case of classification, the raw output would be logits. The mean function can be used to convert the raw output into a value that makes sense to the user, such as a probability.

    Parameters

    • y_pred

    Returns

    The adjusted prediction(s).

    "},{"location":"api/optim/losses/Squared/","title":"Squared","text":"

    Squared loss, also known as the L2 loss.

    Mathematically, it is defined as

    \\[L = (p_i - y_i) ^ 2\\]

    It's gradient w.r.t. to \\(p_i\\) is

    \\[\\frac{\\partial L}{\\partial p_i} = 2 (p_i - y_i)\\]

    One thing to note is that this convention is consistent with Vowpal Wabbit and PyTorch, but not with scikit-learn. Indeed, scikit-learn divides the loss by 2, making the 2 disappear in the gradient.

    "},{"location":"api/optim/losses/Squared/#examples","title":"Examples","text":"

    from river import optim\n\nloss = optim.losses.Squared()\nloss(-4, 5)\n
    81\n
    loss.gradient(-4, 5)\n
    18\n
    loss.gradient(5, -4)\n
    -18\n

    "},{"location":"api/optim/losses/Squared/#methods","title":"Methods","text":"call

    Returns the loss.

    Parameters

    • y_true
    • y_pred

    Returns

    The loss(es).

    gradient

    Return the gradient with respect to y_pred.

    Parameters

    • y_true
    • y_pred

    Returns

    The gradient(s).

    mean_func

    Mean function.

    This is the inverse of the link function. Typically, a loss function takes as input the raw output of a model. In the case of classification, the raw output would be logits. The mean function can be used to convert the raw output into a value that makes sense to the user, such as a probability.

    Parameters

    • y_pred

    Returns

    The adjusted prediction(s).

    "},{"location":"api/optim/schedulers/Constant/","title":"Constant","text":"

    Always uses the same learning rate.

    "},{"location":"api/optim/schedulers/Constant/#parameters","title":"Parameters","text":"
    • learning_rate

      Type \u2192 int | float

    "},{"location":"api/optim/schedulers/Constant/#methods","title":"Methods","text":"get

    Returns the learning rate at a given iteration.

    Parameters

    • t \u2014 'int'

    "},{"location":"api/optim/schedulers/InverseScaling/","title":"InverseScaling","text":"

    Reduces the learning rate using a power schedule.

    Assuming an initial learning rate \\(\\eta\\), the learning rate at step \\(t\\) is:

    \\[\\\\frac{eta}{(t + 1) ^ p}\\]

    where \\(p\\) is a user-defined parameter.

    "},{"location":"api/optim/schedulers/InverseScaling/#parameters","title":"Parameters","text":"
    • learning_rate

      Type \u2192 float

    • power

      Default \u2192 0.5

    "},{"location":"api/optim/schedulers/InverseScaling/#methods","title":"Methods","text":"get

    Returns the learning rate at a given iteration.

    Parameters

    • t \u2014 'int'

    "},{"location":"api/optim/schedulers/Optimal/","title":"Optimal","text":"

    Optimal learning schedule as proposed by L\u00e9on Bottou.

    "},{"location":"api/optim/schedulers/Optimal/#parameters","title":"Parameters","text":"
    • loss

      Type \u2192 optim.losses.Loss

    • alpha

      Default \u2192 0.0001

    "},{"location":"api/optim/schedulers/Optimal/#methods","title":"Methods","text":"get

    Returns the learning rate at a given iteration.

    Parameters

    • t \u2014 'int'

    1. Bottou, L., 2012. Stochastic gradient descent tricks. In Neural networks: Tricks of the trade (pp. 421-436). Springer, Berlin, Heidelberg. \u21a9

    "},{"location":"api/preprocessing/AdaptiveStandardScaler/","title":"AdaptiveStandardScaler","text":"

    Scales data using exponentially weighted moving average and variance.

    Under the hood, a exponentially weighted running mean and variance are maintained for each feature. This can potentially provide better results for drifting data in comparison to preprocessing.StandardScaler. Indeed, the latter computes a global mean and variance for each feature, whereas this scaler weights data in proportion to their recency.

    "},{"location":"api/preprocessing/AdaptiveStandardScaler/#parameters","title":"Parameters","text":"
    • fading_factor

      Default \u2192 0.3

      This parameter is passed to stats.EWVar. It is expected to be in [0, 1]. More weight is assigned to recent samples the closer fading_factor is to 1.

    "},{"location":"api/preprocessing/AdaptiveStandardScaler/#examples","title":"Examples","text":"

    Consider the following series which contains a positive trend.

    import random\n\nrandom.seed(42)\nX = [\n    {'x': random.uniform(4 + i, 6 + i)}\n    for i in range(8)\n]\nfor x in X:\n    print(x)\n
    {'x': 5.278}\n{'x': 5.050}\n{'x': 6.550}\n{'x': 7.446}\n{'x': 9.472}\n{'x': 10.353}\n{'x': 11.784}\n{'x': 11.173}\n

    This scaler works well with this kind of data because it uses statistics that assign higher weight to more recent data.

    from river import preprocessing\n\nscaler = preprocessing.AdaptiveStandardScaler(fading_factor=.6)\n\nfor x in X:\n    print(scaler.learn_one(x).transform_one(x))\n
    {'x': 0.0}\n{'x': -0.816}\n{'x': 0.812}\n{'x': 0.695}\n{'x': 0.754}\n{'x': 0.598}\n{'x': 0.651}\n{'x': 0.124}\n

    "},{"location":"api/preprocessing/AdaptiveStandardScaler/#methods","title":"Methods","text":"learn_one

    Update with a set of features x.

    A lot of transformers don't actually have to do anything during the learn_one step because they are stateless. For this reason the default behavior of this function is to do nothing. Transformers that however do something during the learn_one can override this method.

    Parameters

    • x \u2014 'dict'

    Returns

    Transformer: self

    transform_one

    Transform a set of features x.

    Parameters

    • x \u2014 'dict'

    Returns

    dict: The transformed values.

    "},{"location":"api/preprocessing/Binarizer/","title":"Binarizer","text":"

    Binarizes the data to 0 or 1 according to a threshold.

    "},{"location":"api/preprocessing/Binarizer/#parameters","title":"Parameters","text":"
    • threshold

      Default \u2192 0.0

      Values above this are replaced by 1 and the others by 0.

    • dtype

      Default \u2192 <class 'bool'>

      The desired data type to apply.

    "},{"location":"api/preprocessing/Binarizer/#examples","title":"Examples","text":"

    import river\nimport numpy as np\n\nrng = np.random.RandomState(42)\nX = [{'x1': v, 'x2': int(v)} for v in rng.uniform(low=-4, high=4, size=6)]\n\nbinarizer = river.preprocessing.Binarizer()\nfor x in X:\n    print(binarizer.learn_one(x).transform_one(x))\n
    {'x1': False, 'x2': False}\n{'x1': True, 'x2': True}\n{'x1': True, 'x2': True}\n{'x1': True, 'x2': False}\n{'x1': False, 'x2': False}\n{'x1': False, 'x2': False}\n

    "},{"location":"api/preprocessing/Binarizer/#methods","title":"Methods","text":"learn_one

    Update with a set of features x.

    A lot of transformers don't actually have to do anything during the learn_one step because they are stateless. For this reason the default behavior of this function is to do nothing. Transformers that however do something during the learn_one can override this method.

    Parameters

    • x \u2014 'dict'

    Returns

    Transformer: self

    transform_one

    Transform a set of features x.

    Parameters

    • x \u2014 'dict'

    Returns

    dict: The transformed values.

    "},{"location":"api/preprocessing/FeatureHasher/","title":"FeatureHasher","text":"

    Implements the hashing trick.

    Each pair of (name, value) features is hashed into a random integer. A module operator is then used to make sure the hash is in a certain range. We use the Murmurhash implementation from scikit-learn.

    "},{"location":"api/preprocessing/FeatureHasher/#parameters","title":"Parameters","text":"
    • n_features

      Default \u2192 1048576

      The number by which each hash will be moduloed by.

    • seed

      Type \u2192 int | None

      Default \u2192 None

      Set the seed to produce identical results.

    "},{"location":"api/preprocessing/FeatureHasher/#examples","title":"Examples","text":"

    import river\n\nhasher = river.preprocessing.FeatureHasher(n_features=10, seed=42)\n\nX = [\n    {'dog': 1, 'cat': 2, 'elephant': 4},\n    {'dog': 2, 'run': 5}\n]\nfor x in X:\n    print(hasher.transform_one(x))\n
    Counter({1: 4, 9: 2, 8: 1})\nCounter({4: 5, 8: 2})\n

    "},{"location":"api/preprocessing/FeatureHasher/#methods","title":"Methods","text":"learn_one

    Update with a set of features x.

    A lot of transformers don't actually have to do anything during the learn_one step because they are stateless. For this reason the default behavior of this function is to do nothing. Transformers that however do something during the learn_one can override this method.

    Parameters

    • x \u2014 'dict'

    Returns

    Transformer: self

    transform_one

    Transform a set of features x.

    Parameters

    • x \u2014 'dict'

    Returns

    dict: The transformed values.

    1. Wikipedia article on feature vectorization using the hashing trick \u21a9

    "},{"location":"api/preprocessing/GaussianRandomProjector/","title":"GaussianRandomProjector","text":"

    Gaussian random projector.

    This transformer reduces the dimensionality of inputs through Gaussian random projection.

    The components of the random projections matrix are drawn from N(0, 1 / n_components).

    "},{"location":"api/preprocessing/GaussianRandomProjector/#parameters","title":"Parameters","text":"
    • n_components

      Default \u2192 10

      Number of components to project the data onto.

    • seed

      Type \u2192 int | None

      Default \u2192 None

      Random seed for reproducibility.

    "},{"location":"api/preprocessing/GaussianRandomProjector/#examples","title":"Examples","text":"

    from river import datasets\nfrom river import evaluate\nfrom river import linear_model\nfrom river import metrics\nfrom river import preprocessing\n\ndataset = datasets.TrumpApproval()\nmodel = preprocessing.GaussianRandomProjector(\n    n_components=3,\n    seed=42\n)\n\nfor x, y in dataset:\n    x = model.transform_one(x)\n    print(x)\n    break\n
    {0: -61289.37139206629, 1: 141312.51039283074, 2: 279165.99370457436}\n

    model = (\n    preprocessing.GaussianRandomProjector(\n        n_components=5,\n        seed=42\n    ) |\n    preprocessing.StandardScaler() |\n    linear_model.LinearRegression()\n)\nevaluate.progressive_val_score(dataset, model, metrics.MAE())\n
    MAE: 0.933502\n

    "},{"location":"api/preprocessing/GaussianRandomProjector/#methods","title":"Methods","text":"learn_one

    Update with a set of features x.

    A lot of transformers don't actually have to do anything during the learn_one step because they are stateless. For this reason the default behavior of this function is to do nothing. Transformers that however do something during the learn_one can override this method.

    Parameters

    • x \u2014 'dict'

    Returns

    Transformer: self

    transform_one

    Transform a set of features x.

    Parameters

    • x \u2014 'dict'

    Returns

    dict: The transformed values.

    1. Gaussian random projection \u21a9

    2. scikit-learn random projections module \u21a9

    "},{"location":"api/preprocessing/LDA/","title":"LDA","text":"

    Online Latent Dirichlet Allocation with Infinite Vocabulary.

    Latent Dirichlet allocation (LDA) is a probabilistic approach for exploring topics in document collections. The key advantage of this variant is that it assumes an infinite vocabulary, meaning that the set of tokens does not have to known in advance, as opposed to the implementation from sklearn The results produced by this implementation are identical to those from the original implementation proposed by the method's authors.

    This class takes as input token counts. Therefore, it requires you to tokenize beforehand. You can do so by using a feature_extraction.BagOfWords instance, as shown in the example below.

    "},{"location":"api/preprocessing/LDA/#parameters","title":"Parameters","text":"
    • n_components

      Default \u2192 10

      Number of topics of the latent Drichlet allocation.

    • number_of_documents

      Default \u2192 1000000.0

      Estimated number of documents.

    • alpha_theta

      Default \u2192 0.5

      Hyper-parameter of the Dirichlet distribution of topics.

    • alpha_beta

      Default \u2192 100.0

      Hyper-parameter of the Dirichlet process of distribution over words.

    • tau

      Default \u2192 64.0

      Learning inertia to prevent premature convergence.

    • kappa

      Default \u2192 0.75

      The learning rate kappa controls how quickly new parameters estimates replace the old ones. kappa \u2208 (0.5, 1] is required for convergence.

    • vocab_prune_interval

      Default \u2192 10

      Interval at which to refresh the words topics distribution.

    • number_of_samples

      Default \u2192 10

      Number of iteration to computes documents topics distribution.

    • ranking_smooth_factor

      Default \u2192 1e-12

    • burn_in_sweeps

      Default \u2192 5

      Number of iteration necessaries while analyzing a document before updating document topics distribution.

    • maximum_size_vocabulary

      Default \u2192 4000

      Maximum size of the stored vocabulary.

    • seed

      Type \u2192 int | None

      Default \u2192 None

      Random number seed used for reproducibility.

    "},{"location":"api/preprocessing/LDA/#attributes","title":"Attributes","text":"
    • counter (int)

      The current number of observed documents.

    • truncation_size_prime (int)

      Number of distincts words stored in the vocabulary. Updated before processing a document.

    • truncation_size (int)

      Number of distincts words stored in the vocabulary. Updated after processing a document.

    • word_to_index (dict)

      Words as keys and indexes as values.

    • index_to_word (dict)

      Indexes as keys and words as values.

    • nu_1 (dict)

      Weights of the words. Component of the variational inference.

    • nu_2 (dict)

      Weights of the words. Component of the variational inference.

    "},{"location":"api/preprocessing/LDA/#examples","title":"Examples","text":"

    from river import compose\nfrom river import feature_extraction\nfrom river import preprocessing\n\nX = [\n   'weather cold',\n   'weather hot dry',\n   'weather cold rainy',\n   'weather hot',\n   'weather cold humid',\n]\n\nlda = compose.Pipeline(\n    feature_extraction.BagOfWords(),\n    preprocessing.LDA(\n        n_components=2,\n        number_of_documents=60,\n        seed=42\n    )\n)\n\nfor x in X:\n    lda = lda.learn_one(x)\n    topics = lda.transform_one(x)\n    print(topics)\n
    {0: 0.5, 1: 2.5}\n{0: 2.499..., 1: 1.5}\n{0: 0.5, 1: 3.5}\n{0: 0.5, 1: 2.5}\n{0: 1.5, 1: 2.5}\n

    "},{"location":"api/preprocessing/LDA/#methods","title":"Methods","text":"learn_one

    Update with a set of features x.

    A lot of transformers don't actually have to do anything during the learn_one step because they are stateless. For this reason the default behavior of this function is to do nothing. Transformers that however do something during the learn_one can override this method.

    Parameters

    • x \u2014 'dict'

    Returns

    Transformer: self

    learn_transform_one

    Equivalent to lda.learn_one(x).transform_one(x)s, but faster.

    Parameters

    • x \u2014 'dict'

    Returns

    dict: Component attributions for the input document.

    transform_one

    Transform a set of features x.

    Parameters

    • x \u2014 'dict'

    Returns

    dict: The transformed values.

    1. Zhai, K. and Boyd-Graber, J., 2013, February. Online latent Dirichlet allocation with infinite vocabulary. In International Conference on Machine Learning (pp. 561-569). \u21a9

    2. PyInfVoc on GitHub \u21a9

    "},{"location":"api/preprocessing/MaxAbsScaler/","title":"MaxAbsScaler","text":"

    Scales the data to a [-1, 1] range based on absolute maximum.

    Under the hood a running absolute max is maintained. This scaler is meant for data that is already centered at zero or sparse data. It does not shift/center the data, and thus does not destroy any sparsity.

    "},{"location":"api/preprocessing/MaxAbsScaler/#attributes","title":"Attributes","text":"
    • abs_max (dict)

      Mapping between features and instances of stats.AbsMax.

    "},{"location":"api/preprocessing/MaxAbsScaler/#examples","title":"Examples","text":"

    import random\nfrom river import preprocessing\n\nrandom.seed(42)\nX = [{'x': random.uniform(8, 12)} for _ in range(5)]\nfor x in X:\n    print(x)\n
    {'x': 10.557707}\n{'x': 8.100043}\n{'x': 9.100117}\n{'x': 8.892842}\n{'x': 10.945884}\n

    scaler = preprocessing.MaxAbsScaler()\n\nfor x in X:\n    print(scaler.learn_one(x).transform_one(x))\n
    {'x': 1.0}\n{'x': 0.767216}\n{'x': 0.861940}\n{'x': 0.842308}\n{'x': 1.0}\n

    "},{"location":"api/preprocessing/MaxAbsScaler/#methods","title":"Methods","text":"learn_one

    Update with a set of features x.

    A lot of transformers don't actually have to do anything during the learn_one step because they are stateless. For this reason the default behavior of this function is to do nothing. Transformers that however do something during the learn_one can override this method.

    Parameters

    • x \u2014 'dict'

    Returns

    Transformer: self

    transform_one

    Transform a set of features x.

    Parameters

    • x \u2014 'dict'

    Returns

    dict: The transformed values.

    "},{"location":"api/preprocessing/MinMaxScaler/","title":"MinMaxScaler","text":"

    Scales the data to a fixed range from 0 to 1.

    Under the hood a running min and a running peak to peak (max - min) are maintained.

    "},{"location":"api/preprocessing/MinMaxScaler/#attributes","title":"Attributes","text":"
    • min (dict)

      Mapping between features and instances of stats.Min.

    • max (dict)

      Mapping between features and instances of stats.Max.

    "},{"location":"api/preprocessing/MinMaxScaler/#examples","title":"Examples","text":"

    import random\nfrom river import preprocessing\n\nrandom.seed(42)\nX = [{'x': random.uniform(8, 12)} for _ in range(5)]\nfor x in X:\n    print(x)\n
    {'x': 10.557707}\n{'x': 8.100043}\n{'x': 9.100117}\n{'x': 8.892842}\n{'x': 10.945884}\n

    scaler = preprocessing.MinMaxScaler()\n\nfor x in X:\n    print(scaler.learn_one(x).transform_one(x))\n
    {'x': 0.0}\n{'x': 0.0}\n{'x': 0.406920}\n{'x': 0.322582}\n{'x': 1.0}\n

    "},{"location":"api/preprocessing/MinMaxScaler/#methods","title":"Methods","text":"learn_one

    Update with a set of features x.

    A lot of transformers don't actually have to do anything during the learn_one step because they are stateless. For this reason the default behavior of this function is to do nothing. Transformers that however do something during the learn_one can override this method.

    Parameters

    • x \u2014 'dict'

    Returns

    Transformer: self

    transform_one

    Transform a set of features x.

    Parameters

    • x \u2014 'dict'

    Returns

    dict: The transformed values.

    "},{"location":"api/preprocessing/Normalizer/","title":"Normalizer","text":"

    Scales a set of features so that it has unit norm.

    This is particularly useful when used after a feature_extraction.TFIDF.

    "},{"location":"api/preprocessing/Normalizer/#parameters","title":"Parameters","text":"
    • order

      Default \u2192 2

      Order of the norm (e.g. 2 corresponds to the \\(L^2\\) norm).

    "},{"location":"api/preprocessing/Normalizer/#examples","title":"Examples","text":"

    from river import preprocessing\nfrom river import stream\n\nscaler = preprocessing.Normalizer(order=2)\n\nX = [[4, 1, 2, 2],\n     [1, 3, 9, 3],\n     [5, 7, 5, 1]]\n\nfor x, _ in stream.iter_array(X):\n    print(scaler.transform_one(x))\n
    {0: 0.8, 1: 0.2, 2: 0.4, 3: 0.4}\n{0: 0.1, 1: 0.3, 2: 0.9, 3: 0.3}\n{0: 0.5, 1: 0.7, 2: 0.5, 3: 0.1}\n

    "},{"location":"api/preprocessing/Normalizer/#methods","title":"Methods","text":"learn_one

    Update with a set of features x.

    A lot of transformers don't actually have to do anything during the learn_one step because they are stateless. For this reason the default behavior of this function is to do nothing. Transformers that however do something during the learn_one can override this method.

    Parameters

    • x \u2014 'dict'

    Returns

    Transformer: self

    transform_one

    Transform a set of features x.

    Parameters

    • x \u2014 'dict'

    Returns

    dict: The transformed values.

    "},{"location":"api/preprocessing/OneHotEncoder/","title":"OneHotEncoder","text":"

    One-hot encoding.

    This transformer will encode every feature it is provided with. If a list or set is provided, this transformer will encode every entry in the list/set. You can apply it to a subset of features by composing it with compose.Select or compose.SelectType.

    "},{"location":"api/preprocessing/OneHotEncoder/#parameters","title":"Parameters","text":"
    • drop_zeros

      Default \u2192 False

      Whether or not 0s should be made explicit or not.

    • drop_first

      Default \u2192 False

      Whether to get k - 1 dummies out of k categorical levels by removing the first key. This is useful in some statistical models where perfectly collinear features cause problems.

    "},{"location":"api/preprocessing/OneHotEncoder/#examples","title":"Examples","text":"

    Let us first create an example dataset.

    from pprint import pprint\nimport random\nimport string\n\nrandom.seed(42)\nalphabet = list(string.ascii_lowercase)\nX = [\n    {\n        'c1': random.choice(alphabet),\n        'c2': random.choice(alphabet),\n    }\n    for _ in range(4)\n]\npprint(X)\n
    [{'c1': 'u', 'c2': 'd'},\n    {'c1': 'a', 'c2': 'x'},\n    {'c1': 'i', 'c2': 'h'},\n    {'c1': 'h', 'c2': 'e'}]\n

    e can now apply one-hot encoding. All the provided are one-hot encoded, there is therefore no need to specify which features to encode.

    from river import preprocessing\n\noh = preprocessing.OneHotEncoder()\nfor x in X[:2]:\n    oh = oh.learn_one(x)\n    pprint(oh.transform_one(x))\n
    {'c1_u': 1, 'c2_d': 1}\n{'c1_a': 1, 'c1_u': 0, 'c2_d': 0, 'c2_x': 1}\n

    The drop_zeros parameter can be set to True if you don't want the past features to be included in the output. Otherwise, all the past features will be included in the output.

    oh = preprocessing.OneHotEncoder(drop_zeros=True)\nfor x in X:\n    oh = oh.learn_one(x)\n    pprint(oh.transform_one(x))\n
    {'c1_u': 1, 'c2_d': 1}\n{'c1_a': 1, 'c2_x': 1}\n{'c1_i': 1, 'c2_h': 1}\n{'c1_h': 1, 'c2_e': 1}\n

    You can encode only k - 1 features out of k by setting drop_first to True.

    oh = preprocessing.OneHotEncoder(drop_first=True, drop_zeros=True)\nfor x in X:\n    oh = oh.learn_one(x)\n    pprint(oh.transform_one(x))\n
    {'c2_d': 1}\n{'c2_x': 1}\n{'c2_h': 1}\n{'c2_e': 1}\n

    A subset of the features can be one-hot encoded by piping a compose.Select into the OneHotEncoder.

    from river import compose\n\npp = compose.Select('c1') | preprocessing.OneHotEncoder()\n\nfor x in X:\n    pp = pp.learn_one(x)\n    pprint(pp.transform_one(x))\n
    {'c1_u': 1}\n{'c1_a': 1, 'c1_u': 0}\n{'c1_a': 0, 'c1_i': 1, 'c1_u': 0}\n{'c1_a': 0, 'c1_h': 1, 'c1_i': 0, 'c1_u': 0}\n

    You can preserve the c2 feature by using a union:

    pp = compose.Select('c1') | preprocessing.OneHotEncoder()\npp += compose.Select('c2')\n\nfor x in X:\n    pp = pp.learn_one(x)\n    pprint(pp.transform_one(x))\n
    {'c1_u': 1, 'c2': 'd'}\n{'c1_a': 1, 'c1_u': 0, 'c2': 'x'}\n{'c1_a': 0, 'c1_i': 1, 'c1_u': 0, 'c2': 'h'}\n{'c1_a': 0, 'c1_h': 1, 'c1_i': 0, 'c1_u': 0, 'c2': 'e'}\n

    Similar to the above examples, we can also pass values as a list. This will one-hot encode all of the entries individually.

    X = [{'c1': ['u', 'a'], 'c2': ['d']},\n    {'c1': ['a', 'b'], 'c2': ['x']},\n    {'c1': ['i'], 'c2': ['h', 'z']},\n    {'c1': ['h', 'b'], 'c2': ['e']}]\n\noh = preprocessing.OneHotEncoder(drop_zeros=True)\nfor x in X:\n    oh = oh.learn_one(x)\n    pprint(oh.transform_one(x))\n
    {'c1_a': 1, 'c1_u': 1, 'c2_d': 1}\n{'c1_a': 1, 'c1_b': 1, 'c2_x': 1}\n{'c1_i': 1, 'c2_h': 1, 'c2_z': 1}\n{'c1_b': 1, 'c1_h': 1, 'c2_e': 1}\n

    Processing mini-batches is also possible.

    from pprint import pprint\nimport random\nimport string\n\nrandom.seed(42)\nalphabet = list(string.ascii_lowercase)\nX = pd.DataFrame(\n    {\n        'c1': random.choice(alphabet),\n        'c2': random.choice(alphabet),\n    }\n    for _ in range(3)\n)\nX\n
      c1 c2\n0  u  d\n1  a  x\n2  i  h\n

    oh = preprocessing.OneHotEncoder(drop_zeros=True)\ndf = oh.transform_many(X)\ndf.sort_index(axis=\"columns\")\n
       c1_a  c1_i  c1_u  c2_d  c2_h  c2_x\n0     0     0     1     1     0     0\n1     1     0     0     0     0     1\n2     0     1     0     0     1     0\n

    oh = preprocessing.OneHotEncoder(drop_zeros=True, drop_first=True)\ndf = oh.transform_many(X)\ndf.sort_index(axis=\"columns\")\n
       c1_i  c1_u  c2_d  c2_h  c2_x\n0     0     1     1     0     0\n1     0     0     0     0     1\n2     1     0     0     1     0\n

    Here's an example where the zeros are kept:

    oh = preprocessing.OneHotEncoder(drop_zeros=False)\nX_init = pd.DataFrame([{\"c1\": \"Oranges\", \"c2\": \"Apples\"}])\noh = oh.learn_many(X_init)\noh = oh.learn_many(X)\n\ndf = oh.transform_many(X)\ndf.sort_index(axis=\"columns\")\n
       c1_Oranges  c1_a  c1_i  c1_u  c2_Apples  c2_d  c2_h  c2_x\n0           0     0     0     1          0     1     0     0\n1           0     1     0     0          0     0     0     1\n2           0     0     1     0          0     0     1     0\n

    df.dtypes.sort_index()\n
    c1_Oranges    Sparse[uint8, 0]\nc1_a          Sparse[uint8, 0]\nc1_i          Sparse[uint8, 0]\nc1_u          Sparse[uint8, 0]\nc2_Apples     Sparse[uint8, 0]\nc2_d          Sparse[uint8, 0]\nc2_h          Sparse[uint8, 0]\nc2_x          Sparse[uint8, 0]\ndtype: object\n

    "},{"location":"api/preprocessing/OneHotEncoder/#methods","title":"Methods","text":"learn_many

    Update with a mini-batch of features.

    A lot of transformers don't actually have to do anything during the learn_many step because they are stateless. For this reason the default behavior of this function is to do nothing. Transformers that however do something during the learn_many can override this method.

    Parameters

    • X \u2014 'pd.DataFrame'

    Returns

    Transformer: self

    learn_one

    Update with a set of features x.

    A lot of transformers don't actually have to do anything during the learn_one step because they are stateless. For this reason the default behavior of this function is to do nothing. Transformers that however do something during the learn_one can override this method.

    Parameters

    • x \u2014 'dict'

    Returns

    Transformer: self

    transform_many

    Transform a mini-batch of features.

    Parameters

    • X \u2014 'pd.DataFrame'

    Returns

    pd.DataFrame: A new DataFrame.

    transform_one

    Transform a set of features x.

    Parameters

    • x \u2014 'dict'
    • y \u2014 defaults to None

    Returns

    dict: The transformed values.

    "},{"location":"api/preprocessing/OrdinalEncoder/","title":"OrdinalEncoder","text":"

    Ordinal encoder.

    This transformer maps each feature to integers. It can useful when a feature has string values (i.e. categorical variables).

    "},{"location":"api/preprocessing/OrdinalEncoder/#parameters","title":"Parameters","text":"
    • unknown_value

      Type \u2192 int | None

      Default \u2192 0

      The value to use for unknown categories seen during transform_one. Unknown categories will be mapped to an integer once they are seen during learn_one. This value can be set to None in order to categories to None if they've never been seen before.

    • none_value

      Type \u2192 int

      Default \u2192 -1

      The value to encode None with.

    "},{"location":"api/preprocessing/OrdinalEncoder/#attributes","title":"Attributes","text":"
    • categories

      A dict of dicts. The outer dict maps each feature to its inner dict. The inner dict maps each category to its code.

    "},{"location":"api/preprocessing/OrdinalEncoder/#examples","title":"Examples","text":"

    from river import preprocessing\n\nX = [\n    {\"country\": \"France\", \"place\": \"Taco Bell\"},\n    {\"country\": None, \"place\": None},\n    {\"country\": \"Sweden\", \"place\": \"Burger King\"},\n    {\"country\": \"France\", \"place\": \"Burger King\"},\n    {\"country\": \"Russia\", \"place\": \"Starbucks\"},\n    {\"country\": \"Russia\", \"place\": \"Starbucks\"},\n    {\"country\": \"Sweden\", \"place\": \"Taco Bell\"},\n    {\"country\": None, \"place\": None},\n]\n\nencoder = preprocessing.OrdinalEncoder()\nfor x in X:\n    print(encoder.transform_one(x))\n    encoder = encoder.learn_one(x)\n
    {'country': 0, 'place': 0}\n{'country': -1, 'place': -1}\n{'country': 0, 'place': 0}\n{'country': 1, 'place': 2}\n{'country': 0, 'place': 0}\n{'country': 3, 'place': 3}\n{'country': 2, 'place': 1}\n{'country': -1, 'place': -1}\n

    xb1 = pd.DataFrame(X[0:4], index=[0, 1, 2, 3])\nxb2 = pd.DataFrame(X[4:8], index=[4, 5, 6, 7])\n\nencoder = preprocessing.OrdinalEncoder()\nencoder.transform_many(xb1)\n
       country  place\n0        0      0\n1       -1     -1\n2        0      0\n3        0      0\n

    encoder = encoder.learn_many(xb1)\nencoder.transform_many(xb2)\n
       country  place\n4        0      0\n5        0      0\n6        2      1\n7       -1     -1\n

    "},{"location":"api/preprocessing/OrdinalEncoder/#methods","title":"Methods","text":"learn_many

    Update with a mini-batch of features.

    A lot of transformers don't actually have to do anything during the learn_many step because they are stateless. For this reason the default behavior of this function is to do nothing. Transformers that however do something during the learn_many can override this method.

    Parameters

    • X \u2014 'pd.DataFrame'
    • y \u2014 defaults to None

    Returns

    Transformer: self

    learn_one

    Update with a set of features x.

    A lot of transformers don't actually have to do anything during the learn_one step because they are stateless. For this reason the default behavior of this function is to do nothing. Transformers that however do something during the learn_one can override this method.

    Parameters

    • x \u2014 'dict'

    Returns

    Transformer: self

    transform_many

    Transform a mini-batch of features.

    Parameters

    • X \u2014 'pd.DataFrame'

    Returns

    pd.DataFrame: A new DataFrame.

    transform_one

    Transform a set of features x.

    Parameters

    • x \u2014 'dict'

    Returns

    dict: The transformed values.

    "},{"location":"api/preprocessing/PredClipper/","title":"PredClipper","text":"

    Clips the target after predicting.

    "},{"location":"api/preprocessing/PredClipper/#parameters","title":"Parameters","text":"
    • regressor

      Type \u2192 base.Regressor

      Regressor model for which to clip the predictions.

    • y_min

      Type \u2192 float

      minimum value.

    • y_max

      Type \u2192 float

      maximum value.

    "},{"location":"api/preprocessing/PredClipper/#examples","title":"Examples","text":"

    from river import linear_model\nfrom river import preprocessing\n\ndataset = (\n    ({'a': 2, 'b': 4}, 80),\n    ({'a': 3, 'b': 5}, 100),\n    ({'a': 4, 'b': 6}, 120)\n)\n\nmodel = preprocessing.PredClipper(\n    regressor=linear_model.LinearRegression(),\n    y_min=0,\n    y_max=200\n)\n\nfor x, y in dataset:\n    _ = model.learn_one(x, y)\n\nmodel.predict_one({'a': -100, 'b': -200})\n
    0\n

    model.predict_one({'a': 50, 'b': 60})\n
    200\n

    "},{"location":"api/preprocessing/PredClipper/#methods","title":"Methods","text":"learn_one

    Fits to a set of features x and a real-valued target y.

    Parameters

    • x
    • y
    • kwargs

    Returns

    self

    predict_one

    Predict the output of features x.

    Parameters

    • x
    • kwargs

    Returns

    The prediction.

    "},{"location":"api/preprocessing/PreviousImputer/","title":"PreviousImputer","text":"

    Imputes missing values by using the most recent value.

    "},{"location":"api/preprocessing/PreviousImputer/#examples","title":"Examples","text":"

    from river import preprocessing\n\nimputer = preprocessing.PreviousImputer()\n\nimputer = imputer.learn_one({'x': 1, 'y': 2})\nimputer.transform_one({'y': None})\n
    {'y': 2}\n

    imputer.transform_one({'x': None})\n
    {'x': 1}\n

    "},{"location":"api/preprocessing/PreviousImputer/#methods","title":"Methods","text":"learn_one

    Update with a set of features x.

    A lot of transformers don't actually have to do anything during the learn_one step because they are stateless. For this reason the default behavior of this function is to do nothing. Transformers that however do something during the learn_one can override this method.

    Parameters

    • x \u2014 'dict'

    Returns

    Transformer: self

    transform_one

    Transform a set of features x.

    Parameters

    • x \u2014 'dict'

    Returns

    dict: The transformed values.

    "},{"location":"api/preprocessing/RobustScaler/","title":"RobustScaler","text":"

    Scale features using statistics that are robust to outliers.

    This Scaler removes the median and scales the data according to the interquantile range.

    "},{"location":"api/preprocessing/RobustScaler/#parameters","title":"Parameters","text":"
    • with_centering

      Default \u2192 True

      Whether to centre the data before scaling.

    • with_scaling

      Default \u2192 True

      Whether to scale data to IQR.

    • q_inf

      Default \u2192 0.25

      Desired inferior quantile, must be between 0 and 1.

    • q_sup

      Default \u2192 0.75

      Desired superior quantile, must be between 0 and 1.

    "},{"location":"api/preprocessing/RobustScaler/#attributes","title":"Attributes","text":"
    • median (dict)

      Mapping between features and instances of stats.Quantile(0.5)`.

    • iqr (dict)

      Mapping between features and instances of stats.IQR.

    "},{"location":"api/preprocessing/RobustScaler/#examples","title":"Examples","text":"

    from pprint import pprint\nimport random\nfrom river import preprocessing\n\nrandom.seed(42)\nX = [{'x': random.uniform(8, 12)} for _ in range(5)]\npprint(X)\n
    [{'x': 10.557707},\n    {'x': 8.100043},\n    {'x': 9.100117},\n    {'x': 8.892842},\n    {'x': 10.945884}]\n

    scaler = preprocessing.RobustScaler()\n\nfor x in X:\n    print(scaler.learn_one(x).transform_one(x))\n
        {'x': 0.0}\n    {'x': -1.0}\n    {'x': 0.0}\n    {'x': -0.12449923287875722}\n    {'x': 1.1086595155704708}\n

    "},{"location":"api/preprocessing/RobustScaler/#methods","title":"Methods","text":"learn_one

    Update with a set of features x.

    A lot of transformers don't actually have to do anything during the learn_one step because they are stateless. For this reason the default behavior of this function is to do nothing. Transformers that however do something during the learn_one can override this method.

    Parameters

    • x \u2014 'dict'

    Returns

    Transformer: self

    transform_one

    Transform a set of features x.

    Parameters

    • x \u2014 'dict'

    Returns

    dict: The transformed values.

    "},{"location":"api/preprocessing/SparseRandomProjector/","title":"SparseRandomProjector","text":"

    Sparse random projector.

    This transformer reduces the dimensionality of inputs by projecting them onto a sparse random projection matrix.

    Ping Li et al. recommend using a minimum density of 1 / sqrt(n_features). The transformer is not aware of how many features will be seen, so the user must specify the density manually.

    "},{"location":"api/preprocessing/SparseRandomProjector/#parameters","title":"Parameters","text":"
    • n_components

      Default \u2192 10

      Number of components to project the data onto.

    • density

      Default \u2192 0.1

      Density of the random projection matrix. The density is defined as the ratio of non-zero components in the matrix. It is equal to 1 - sparsity.

    • seed

      Type \u2192 int | None

      Default \u2192 None

      Random seed for reproducibility.

    "},{"location":"api/preprocessing/SparseRandomProjector/#examples","title":"Examples","text":"

    from river import datasets\nfrom river import evaluate\nfrom river import linear_model\nfrom river import metrics\nfrom river import preprocessing\n\ndataset = datasets.TrumpApproval()\nmodel = preprocessing.SparseRandomProjector(\n    n_components=3,\n    seed=42\n)\n\nfor x, y in dataset:\n    x = model.transform_one(x)\n    print(x)\n    break\n
    {0: 92.89572746525327, 1: 1344540.5692342375, 2: 0}\n

    model = (\n    preprocessing.SparseRandomProjector(\n        n_components=5,\n        seed=42\n    ) |\n    preprocessing.StandardScaler() |\n    linear_model.LinearRegression()\n)\nevaluate.progressive_val_score(dataset, model, metrics.MAE())\n
    MAE: 1.292572\n

    "},{"location":"api/preprocessing/SparseRandomProjector/#methods","title":"Methods","text":"learn_one

    Update with a set of features x.

    A lot of transformers don't actually have to do anything during the learn_one step because they are stateless. For this reason the default behavior of this function is to do nothing. Transformers that however do something during the learn_one can override this method.

    Parameters

    • x \u2014 'dict'

    Returns

    Transformer: self

    transform_one

    Transform a set of features x.

    Parameters

    • x \u2014 'dict'

    Returns

    dict: The transformed values.

    1. D. Achlioptas. 2003. Database-friendly random projections: Johnson-Lindenstrauss with binary coins. Journal of Computer and System Sciences 66 (2003) 671-687\u00a0\u21a9

    2. Ping Li, Trevor J. Hastie, and Kenneth W. Church. 2006. Very sparse random projections. In Proceedings of the 12th ACM SIGKDD international conference on Knowledge discovery and data mining (KDD'06). ACM, New York, NY, USA, 287-296.\u00a0\u21a9

    "},{"location":"api/preprocessing/StandardScaler/","title":"StandardScaler","text":"

    Scales the data so that it has zero mean and unit variance.

    Under the hood, a running mean and a running variance are maintained. The scaling is slightly different than when scaling the data in batch because the exact means and variances are not known in advance. However, this doesn't have a detrimental impact on performance in the long run.

    This transformer supports mini-batches as well as single instances. In the mini-batch case, the number of columns and the ordering of the columns are allowed to change between subsequent calls. In other words, this transformer will keep working even if you add and/or remove features every time you call learn_many and transform_many.

    "},{"location":"api/preprocessing/StandardScaler/#parameters","title":"Parameters","text":"
    • with_std

      Default \u2192 True

      Whether or not each feature should be divided by its standard deviation.

    "},{"location":"api/preprocessing/StandardScaler/#examples","title":"Examples","text":"

    import random\nfrom river import preprocessing\n\nrandom.seed(42)\nX = [{'x': random.uniform(8, 12), 'y': random.uniform(8, 12)} for _ in range(6)]\nfor x in X:\n    print(x)\n
    {'x': 10.557, 'y': 8.100}\n{'x': 9.100, 'y': 8.892}\n{'x': 10.945, 'y': 10.706}\n{'x': 11.568, 'y': 8.347}\n{'x': 9.687, 'y': 8.119}\n{'x': 8.874, 'y': 10.021}\n

    scaler = preprocessing.StandardScaler()\n\nfor x in X:\n    print(scaler.learn_one(x).transform_one(x))\n
    {'x': 0.0, 'y': 0.0}\n{'x': -0.999, 'y': 0.999}\n{'x': 0.937, 'y': 1.350}\n{'x': 1.129, 'y': -0.651}\n{'x': -0.776, 'y': -0.729}\n{'x': -1.274, 'y': 0.992}\n

    This transformer also supports mini-batch updates. You can call learn_many and provide a pandas.DataFrame:

    import pandas as pd\nX = pd.DataFrame.from_dict(X)\n\nscaler = preprocessing.StandardScaler()\nscaler = scaler.learn_many(X[:3])\nscaler = scaler.learn_many(X[3:])\n

    You can then call transform_many to scale a mini-batch of features:

    scaler.transform_many(X)\n
        x         y\n0  0.444600 -0.933384\n1 -1.044259 -0.138809\n2  0.841106  1.679208\n3  1.477301 -0.685117\n4 -0.444084 -0.914195\n5 -1.274664  0.992296\n

    "},{"location":"api/preprocessing/StandardScaler/#methods","title":"Methods","text":"learn_many

    Update with a mini-batch of features.

    Note that the update formulas for mean and variance are slightly different than in the single instance case, but they produce exactly the same result.

    Parameters

    • X \u2014 'pd.DataFrame'

    learn_one

    Update with a set of features x.

    A lot of transformers don't actually have to do anything during the learn_one step because they are stateless. For this reason the default behavior of this function is to do nothing. Transformers that however do something during the learn_one can override this method.

    Parameters

    • x \u2014 'dict'

    Returns

    Transformer: self

    transform_many

    Scale a mini-batch of features.

    Parameters

    • X \u2014 'pd.DataFrame'

    transform_one

    Transform a set of features x.

    Parameters

    • x \u2014 'dict'

    Returns

    dict: The transformed values.

    1. Welford's Method (and Friends) \u21a9

    2. Batch updates for simple statistics \u21a9

    "},{"location":"api/preprocessing/StatImputer/","title":"StatImputer","text":"

    Replaces missing values with a statistic.

    This transformer allows you to replace missing values with the value of a running statistic. During a call to learn_one, for each feature, a statistic is updated whenever a numeric feature is observed. When transform_one is called, each feature with a None value is replaced with the current value of the corresponding statistic.

    "},{"location":"api/preprocessing/StatImputer/#parameters","title":"Parameters","text":"
    • imputers

      A list of tuples where each tuple has two elements. The first elements is a feature name and the second value is an instance of stats.base.Univariate. The second value can also be an arbitrary value, such as -1, in which case the missing values will be replaced with it.

    "},{"location":"api/preprocessing/StatImputer/#examples","title":"Examples","text":"
    from river import preprocessing\nfrom river import stats\n

    For numeric data, we can use a stats.Mean()` to replace missing values by the running average of the previously seen values:

    X = [\n    {'temperature': 1},\n    {'temperature': 8},\n    {'temperature': 3},\n    {'temperature': None},\n    {'temperature': 4}\n]\n\nimp = preprocessing.StatImputer(('temperature', stats.Mean()))\n\nfor x in X:\n    imp = imp.learn_one(x)\n    print(imp.transform_one(x))\n
    {'temperature': 1}\n{'temperature': 8}\n{'temperature': 3}\n{'temperature': 4.0}\n{'temperature': 4}\n

    For discrete/categorical data, a common practice is to stats.Mode to replace missing values by the most commonly seen value:

    X = [\n    {'weather': 'sunny'},\n    {'weather': 'rainy'},\n    {'weather': 'sunny'},\n    {'weather': None},\n    {'weather': 'rainy'},\n    {'weather': 'rainy'},\n    {'weather': None}\n]\n\nimp = preprocessing.StatImputer(('weather', stats.Mode()))\n\nfor x in X:\n    imp = imp.learn_one(x)\n    print(imp.transform_one(x))\n
    {'weather': 'sunny'}\n{'weather': 'rainy'}\n{'weather': 'sunny'}\n{'weather': 'sunny'}\n{'weather': 'rainy'}\n{'weather': 'rainy'}\n{'weather': 'rainy'}\n

    You can also choose to replace missing values with a constant value, as so:

    imp = preprocessing.StatImputer(('weather', 'missing'))\n\nfor x in X:\n    imp = imp.learn_one(x)\n    print(imp.transform_one(x))\n
    {'weather': 'sunny'}\n{'weather': 'rainy'}\n{'weather': 'sunny'}\n{'weather': 'missing'}\n{'weather': 'rainy'}\n{'weather': 'rainy'}\n{'weather': 'missing'}\n

    Multiple imputers can be defined by providing a tuple for each feature which you want to impute:

    X = [\n    {'weather': 'sunny', 'temperature': 8},\n    {'weather': 'rainy', 'temperature': 3},\n    {'weather': 'sunny', 'temperature': None},\n    {'weather': None, 'temperature': 4},\n    {'weather': 'snowy', 'temperature': -4},\n    {'weather': 'snowy', 'temperature': -3},\n    {'weather': 'snowy', 'temperature': -3},\n    {'weather': None, 'temperature': None}\n]\n\nimp = preprocessing.StatImputer(\n    ('temperature', stats.Mean()),\n    ('weather', stats.Mode())\n)\n\nfor x in X:\n    imp = imp.learn_one(x)\n    print(imp.transform_one(x))\n
    {'weather': 'sunny', 'temperature': 8}\n{'weather': 'rainy', 'temperature': 3}\n{'weather': 'sunny', 'temperature': 5.5}\n{'weather': 'sunny', 'temperature': 4}\n{'weather': 'snowy', 'temperature': -4}\n{'weather': 'snowy', 'temperature': -3}\n{'weather': 'snowy', 'temperature': -3}\n{'weather': 'snowy', 'temperature': 0.8333}\n

    A sophisticated way to go about imputation is condition the statistics on a given feature. For instance, we might want to replace a missing temperature with the average temperature of a particular weather condition. As an example, consider the following dataset where the temperature is missing, but not the weather condition:

    X = [\n    {'weather': 'sunny', 'temperature': 8},\n    {'weather': 'rainy', 'temperature': 3},\n    {'weather': 'sunny', 'temperature': None},\n    {'weather': 'rainy', 'temperature': 4},\n    {'weather': 'sunny', 'temperature': 10},\n    {'weather': 'sunny', 'temperature': None},\n    {'weather': 'sunny', 'temperature': 12},\n    {'weather': 'rainy', 'temperature': None}\n]\n

    Each missing temperature can be replaced with the average temperature of the corresponding weather condition as so:

    from river import compose\n\nimp = compose.Grouper(\n    preprocessing.StatImputer(('temperature', stats.Mean())),\n    by='weather'\n)\n\nfor x in X:\n    imp = imp.learn_one(x)\n    print(imp.transform_one(x))\n
    {'weather': 'sunny', 'temperature': 8}\n{'weather': 'rainy', 'temperature': 3}\n{'weather': 'sunny', 'temperature': 8.0}\n{'weather': 'rainy', 'temperature': 4}\n{'weather': 'sunny', 'temperature': 10}\n{'weather': 'sunny', 'temperature': 9.0}\n{'weather': 'sunny', 'temperature': 12}\n{'weather': 'rainy', 'temperature': 3.5}\n

    Note that you can also create a Grouper with the * operator:

    imp = preprocessing.StatImputer(('temperature', stats.Mean())) * 'weather'\n
    "},{"location":"api/preprocessing/StatImputer/#methods","title":"Methods","text":"learn_one

    Update with a set of features x.

    A lot of transformers don't actually have to do anything during the learn_one step because they are stateless. For this reason the default behavior of this function is to do nothing. Transformers that however do something during the learn_one can override this method.

    Parameters

    • x \u2014 'dict'

    Returns

    Transformer: self

    transform_one

    Transform a set of features x.

    Parameters

    • x \u2014 'dict'

    Returns

    dict: The transformed values.

    "},{"location":"api/preprocessing/TargetMinMaxScaler/","title":"TargetMinMaxScaler","text":"

    Applies min-max scaling to the target.

    "},{"location":"api/preprocessing/TargetMinMaxScaler/#parameters","title":"Parameters","text":"
    • regressor

      Type \u2192 base.Regressor

      Regression model to wrap.

    "},{"location":"api/preprocessing/TargetMinMaxScaler/#examples","title":"Examples","text":"

    from river import datasets\nfrom river import evaluate\nfrom river import linear_model\nfrom river import metrics\nfrom river import preprocessing\n\ndataset = datasets.TrumpApproval()\nmodel = (\n    preprocessing.StandardScaler() |\n    preprocessing.TargetMinMaxScaler(\n        regressor=linear_model.LinearRegression(intercept_lr=0.15)\n    )\n)\nmetric = metrics.MSE()\n\nevaluate.progressive_val_score(dataset, model, metric)\n
    MSE: 2.018905\n

    "},{"location":"api/preprocessing/TargetMinMaxScaler/#methods","title":"Methods","text":"learn_one

    Fits to a set of features x and a real-valued target y.

    Parameters

    • x
    • y

    Returns

    self

    predict_one

    Predict the output of features x.

    Parameters

    • x

    Returns

    The prediction.

    "},{"location":"api/preprocessing/TargetStandardScaler/","title":"TargetStandardScaler","text":"

    Applies standard scaling to the target.

    "},{"location":"api/preprocessing/TargetStandardScaler/#parameters","title":"Parameters","text":"
    • regressor

      Type \u2192 base.Regressor

      Regression model to wrap.

    "},{"location":"api/preprocessing/TargetStandardScaler/#examples","title":"Examples","text":"

    from river import datasets\nfrom river import evaluate\nfrom river import linear_model\nfrom river import metrics\nfrom river import preprocessing\n\ndataset = datasets.TrumpApproval()\nmodel = (\n    preprocessing.StandardScaler() |\n    preprocessing.TargetStandardScaler(\n        regressor=linear_model.LinearRegression(intercept_lr=0.15)\n    )\n)\nmetric = metrics.MSE()\n\nevaluate.progressive_val_score(dataset, model, metric)\n
    MSE: 2.005999\n

    "},{"location":"api/preprocessing/TargetStandardScaler/#methods","title":"Methods","text":"learn_one

    Fits to a set of features x and a real-valued target y.

    Parameters

    • x
    • y

    Returns

    self

    predict_one

    Predict the output of features x.

    Parameters

    • x

    Returns

    The prediction.

    "},{"location":"api/proba/Beta/","title":"Beta","text":"

    Beta distribution for binary data.

    A Beta distribution is very similar to a Bernoulli distribution in that it counts occurrences of boolean events. The differences lies in what is being measured. A Binomial distribution models the probability of an event occurring, whereas a Beta distribution models the probability distribution itself. In other words, it's a probability distribution over probability distributions.

    "},{"location":"api/proba/Beta/#parameters","title":"Parameters","text":"
    • alpha

      Type \u2192 int

      Default \u2192 1

      Initial alpha parameter.

    • beta

      Type \u2192 int

      Default \u2192 1

      Initial beta parameter.

    • seed

      Type \u2192 int | None

      Default \u2192 None

      Random number generator seed for reproducibility.

    "},{"location":"api/proba/Beta/#attributes","title":"Attributes","text":"
    • mode

      The most likely value in the distribution.

    • n_samples

      The number of observed samples.

    "},{"location":"api/proba/Beta/#examples","title":"Examples","text":"

    from river import proba\n\nsuccesses = 81\nfailures = 219\nbeta = proba.Beta(successes, failures)\n\nbeta(.21), beta(.35)\n
    (0.867..., 0.165...)\n

    for success in range(100):\n    beta = beta.update(True)\nfor failure in range(200):\n    beta = beta.update(False)\n\nbeta(.21), beta(.35)\n
    (2.525...e-05, 0.841...)\n

    beta.cdf(.35)\n
    0.994168...\n

    "},{"location":"api/proba/Beta/#methods","title":"Methods","text":"call

    Probability mass/density function.

    Parameters

    • p \u2014 'float'

    cdf

    Cumulative density function, i.e. P(X <= x).

    Parameters

    • x \u2014 'float'

    revert

    Reverts the parameters of the distribution for a given observation.

    Parameters

    • x \u2014 'float'

    sample

    Sample a random value from the distribution.

    update

    Updates the parameters of the distribution given a new observation.

    Parameters

    • x \u2014 'float'

    1. What is the intuition behind beta distribution? \u21a9

    "},{"location":"api/proba/Gaussian/","title":"Gaussian","text":"

    Normal distribution with parameters mu and sigma.

    "},{"location":"api/proba/Gaussian/#parameters","title":"Parameters","text":"
    • seed

      Default \u2192 None

      Random number generator seed for reproducibility.

    "},{"location":"api/proba/Gaussian/#attributes","title":"Attributes","text":"
    • mode

      The most likely value in the distribution.

    • mu

    • n_samples

      The number of observed samples.

    • sigma

    "},{"location":"api/proba/Gaussian/#examples","title":"Examples","text":"

    from river import proba\n\np = proba.Gaussian().update(6).update(7)\n\np\n
    \ud835\udca9(\u03bc=6.500, \u03c3=0.707)\n

    p(6.5)\n
    0.564189\n

    p.revert(7)\n
    \ud835\udca9(\u03bc=6.000, \u03c3=0.000)\n

    "},{"location":"api/proba/Gaussian/#methods","title":"Methods","text":"call

    Probability mass/density function.

    Parameters

    • x \u2014 'typing.Any'

    cdf

    Cumulative density function, i.e. P(X <= x).

    Parameters

    • x \u2014 'float'

    revert

    Reverts the parameters of the distribution for a given observation.

    Parameters

    • x \u2014 'float'
    • w \u2014 defaults to 1.0

    sample

    Sample a random value from the distribution.

    update

    Updates the parameters of the distribution given a new observation.

    Parameters

    • x \u2014 'float'
    • w \u2014 defaults to 1.0

    "},{"location":"api/proba/Multinomial/","title":"Multinomial","text":"

    Multinomial distribution for categorical data.

    "},{"location":"api/proba/Multinomial/#parameters","title":"Parameters","text":"
    • events

      Type \u2192 dict | list | None

      Default \u2192 None

      An optional list of events that already occurred.

    • seed

      Default \u2192 None

      Random number generator seed for reproducibility.

    "},{"location":"api/proba/Multinomial/#attributes","title":"Attributes","text":"
    • mode

      The most likely value in the distribution.

    • n_samples

      The number of observed samples.

    "},{"location":"api/proba/Multinomial/#examples","title":"Examples","text":"

    from river import proba\n\np = proba.Multinomial(['green'] * 3)\np = p.update('red')\n\np('red')\n
    0.25\n

    p = p.update('red').update('red')\np('green')\n
    0.5\n

    p = p.revert('red').revert('red')\np('red')\n
    0.25\n

    You can wrap this with a utils.Rolling to measure a distribution over a window:

    from river import utils\n\nX = ['red', 'green', 'green', 'blue', 'blue']\n\ndist = utils.Rolling(\n    proba.Multinomial(),\n    window_size=3\n)\n\nfor x in X:\n    dist = dist.update(x)\n    print(dist)\n    print()\n
    P(red) = 1.000\n<BLANKLINE>\nP(red) = 0.500\nP(green) = 0.500\n<BLANKLINE>\nP(green) = 0.667\nP(red) = 0.333\n<BLANKLINE>\nP(green) = 0.667\nP(blue) = 0.333\nP(red) = 0.000\n<BLANKLINE>\nP(blue) = 0.667\nP(green) = 0.333\nP(red) = 0.000\n<BLANKLINE>\n

    You can wrap this with a utils.Rolling to measure a distribution over a window of time:

    import datetime as dt\n\nX = ['red', 'green', 'green', 'blue']\ndays = [1, 2, 3, 4]\n\ndist = utils.TimeRolling(\n    proba.Multinomial(),\n    period=dt.timedelta(days=2)\n)\n\nfor x, day in zip(X, days):\n    dist = dist.update(x, t=dt.datetime(2019, 1, day))\n    print(dist)\n    print()\n
    P(red) = 1.000\n<BLANKLINE>\nP(red) = 0.500\nP(green) = 0.500\n<BLANKLINE>\nP(green) = 1.000\nP(red) = 0.000\n<BLANKLINE>\nP(green) = 0.500\nP(blue) = 0.500\nP(red) = 0.000\n<BLANKLINE>\n

    "},{"location":"api/proba/Multinomial/#methods","title":"Methods","text":"call

    Probability mass/density function.

    Parameters

    • x \u2014 'typing.Any'

    revert

    Reverts the parameters of the distribution for a given observation.

    Parameters

    • x \u2014 'typing.Hashable'

    sample

    Sample a random value from the distribution.

    update

    Updates the parameters of the distribution given a new observation.

    Parameters

    • x \u2014 'typing.Hashable'

    "},{"location":"api/proba/MultivariateGaussian/","title":"MultivariateGaussian","text":"

    Multivariate normal distribution with parameters mu and var.

    "},{"location":"api/proba/MultivariateGaussian/#parameters","title":"Parameters","text":"
    • seed

      Default \u2192 None

      Random number generator seed for reproducibility.

    "},{"location":"api/proba/MultivariateGaussian/#attributes","title":"Attributes","text":"
    • mode

      The most likely value in the distribution.

    • mu

      The mean value of the distribution.

    • n_samples

      The number of observed samples.

    • sigma

      The standard deviation of the distribution.

    • var

      The variance of the distribution.

    "},{"location":"api/proba/MultivariateGaussian/#examples","title":"Examples","text":"

    import numpy as np\nimport pandas as pd\nfrom river import proba\n\nnp.random.seed(42)\nX = pd.DataFrame(\n    np.random.random((8, 3)),\n    columns=[\"red\", \"green\", \"blue\"]\n)\nX\n
            red     green      blue\n0  0.374540  0.950714  0.731994\n1  0.598658  0.156019  0.155995\n2  0.058084  0.866176  0.601115\n3  0.708073  0.020584  0.969910\n4  0.832443  0.212339  0.181825\n5  0.183405  0.304242  0.524756\n6  0.431945  0.291229  0.611853\n7  0.139494  0.292145  0.366362\n

    p = proba.MultivariateGaussian(seed=42)\np.n_samples\n
    0.0\n

    for x in X.to_dict(orient=\"records\"):\n    p = p.update(x)\np.var\n
               blue     green       red\nblue   0.076119  0.020292 -0.010128\ngreen  0.020292  0.112931 -0.053268\nred   -0.010128 -0.053268  0.078961\n

    Retrieving current state in nice format is simple

    p\n
    \ud835\udca9(\n    \u03bc=(0.518, 0.387, 0.416),\n    \u03c3^2=(\n        [ 0.076  0.020 -0.010]\n        [ 0.020  0.113 -0.053]\n        [-0.010 -0.053  0.079]\n    )\n)\n

    To retrieve number of samples and mode:

    p.n_samples\n
    8.0\n
    p.mode\n
    {'blue': 0.5179..., 'green': 0.3866..., 'red': 0.4158...}\n

    To retrieve the PDF and CDF:

    p(x)\n
    0.97967...\n
    p.cdf(x)\n
    0.00787...\n

    To sample data from distribution:

    p.sample()\n
    {'blue': -0.179..., 'green': -0.051..., 'red': 0.376...}\n

    MultivariateGaussian works with utils.Rolling:

    from river import utils\n\np = utils.Rolling(MultivariateGaussian(), window_size=5)\nfor x in X.to_dict(orient=\"records\"):\n    p = p.update(x)\np.var\n
               blue     green       red\nblue   0.087062 -0.022873  0.007765\ngreen -0.022873  0.014279 -0.025181\nred    0.007765 -0.025181  0.095066\n

    MultivariateGaussian works with utils.TimeRolling:

    from datetime import datetime as dt, timedelta as td\nX.index = [dt(2023, 3, 28, 0, 0, 0) + td(seconds=x) for x in range(8)]\np = utils.TimeRolling(MultivariateGaussian(), period=td(seconds=5))\nfor t, x in X.iterrows():\n    p = p.update(x.to_dict(), t=t)\np.var\n
               blue     green       red\nblue   0.087062 -0.022873  0.007765\ngreen -0.022873  0.014279 -0.025181\nred    0.007765 -0.025181  0.095066\n

    Variance on diagonal is consistent with proba.Gaussian.

    multi = proba.MultivariateGaussian()\nsingle = proba.Gaussian()\nfor x in X.to_dict(orient='records'):\n    multi = multi.update(x)\n    single = single.update(x['blue'])\nmulti.mu['blue'] == single.mu\n
    True\n
    multi.sigma['blue']['blue'] == single.sigma\n
    True\n

    "},{"location":"api/proba/MultivariateGaussian/#methods","title":"Methods","text":"call

    PDF(x) method.

    Parameters

    • x \u2014 'dict[str, float]'

    cdf

    Cumulative density function, i.e. P(X <= x).

    Parameters

    • x \u2014 'dict[str, float]'

    revert

    Reverts the parameters of the distribution for a given observation.

    Parameters

    • x \u2014 'dict[str, float]'

    sample

    Sample a random value from the distribution.

    update

    Updates the parameters of the distribution given a new observation.

    Parameters

    • x \u2014 'dict[str, float]'

    "},{"location":"api/proba/base/BinaryDistribution/","title":"BinaryDistribution","text":"

    A probability distribution for discrete values.

    "},{"location":"api/proba/base/BinaryDistribution/#parameters","title":"Parameters","text":"
    • seed

      Type \u2192 int | None

      Default \u2192 None

      Random number generator seed for reproducibility.

    "},{"location":"api/proba/base/BinaryDistribution/#attributes","title":"Attributes","text":"
    • mode

      The most likely value in the distribution.

    • n_samples

      The number of observed samples.

    "},{"location":"api/proba/base/BinaryDistribution/#methods","title":"Methods","text":"call

    Probability mass/density function.

    Parameters

    • x \u2014 'typing.Any'

    revert

    Reverts the parameters of the distribution for a given observation.

    Parameters

    • x \u2014 'bool'

    sample

    Sample a random value from the distribution.

    update

    Updates the parameters of the distribution given a new observation.

    Parameters

    • x \u2014 'bool'

    "},{"location":"api/proba/base/ContinuousDistribution/","title":"ContinuousDistribution","text":"

    A probability distribution for continuous values.

    "},{"location":"api/proba/base/ContinuousDistribution/#parameters","title":"Parameters","text":"
    • seed

      Type \u2192 int | None

      Default \u2192 None

      Random number generator seed for reproducibility.

    "},{"location":"api/proba/base/ContinuousDistribution/#attributes","title":"Attributes","text":"
    • mode

      The most likely value in the distribution.

    • n_samples

      The number of observed samples.

    "},{"location":"api/proba/base/ContinuousDistribution/#methods","title":"Methods","text":"call

    Probability mass/density function.

    Parameters

    • x \u2014 'typing.Any'

    cdf

    Cumulative density function, i.e. P(X <= x).

    Parameters

    • x \u2014 'float'

    revert

    Reverts the parameters of the distribution for a given observation.

    Parameters

    • x \u2014 'float'

    sample

    Sample a random value from the distribution.

    update

    Updates the parameters of the distribution given a new observation.

    Parameters

    • x \u2014 'float'

    "},{"location":"api/proba/base/DiscreteDistribution/","title":"DiscreteDistribution","text":"

    A probability distribution for discrete values.

    "},{"location":"api/proba/base/DiscreteDistribution/#parameters","title":"Parameters","text":"
    • seed

      Type \u2192 int | None

      Default \u2192 None

      Random number generator seed for reproducibility.

    "},{"location":"api/proba/base/DiscreteDistribution/#attributes","title":"Attributes","text":"
    • mode

      The most likely value in the distribution.

    • n_samples

      The number of observed samples.

    "},{"location":"api/proba/base/DiscreteDistribution/#methods","title":"Methods","text":"call

    Probability mass/density function.

    Parameters

    • x \u2014 'typing.Any'

    revert

    Reverts the parameters of the distribution for a given observation.

    Parameters

    • x \u2014 'typing.Hashable'

    sample

    Sample a random value from the distribution.

    update

    Updates the parameters of the distribution given a new observation.

    Parameters

    • x \u2014 'typing.Hashable'

    "},{"location":"api/proba/base/Distribution/","title":"Distribution","text":"

    General distribution.

    "},{"location":"api/proba/base/Distribution/#parameters","title":"Parameters","text":"
    • seed

      Type \u2192 int | None

      Default \u2192 None

      Random number generator seed for reproducibility.

    "},{"location":"api/proba/base/Distribution/#attributes","title":"Attributes","text":"
    • mode

      The most likely value in the distribution.

    • n_samples

      The number of observed samples.

    "},{"location":"api/proba/base/Distribution/#methods","title":"Methods","text":"call

    Probability mass/density function.

    Parameters

    • x \u2014 'typing.Any'

    sample

    Sample a random value from the distribution.

    "},{"location":"api/reco/Baseline/","title":"Baseline","text":"

    Baseline for recommender systems.

    A first-order approximation of the bias involved in target. The model equation is defined as:

    \\[\\hat{y}(x) = \\bar{y} + bu_{u} + bi_{i}\\]

    Where \\(bu_{u}\\) and \\(bi_{i}\\) are respectively the user and item biases.

    This model expects a dict input with a user and an item entries without any type constraint on their values (i.e. can be strings or numbers). Other entries are ignored.

    "},{"location":"api/reco/Baseline/#parameters","title":"Parameters","text":"
    • optimizer

      Type \u2192 optim.base.Optimizer | None

      Default \u2192 None

      The sequential optimizer used for updating the weights.

    • loss

      Type \u2192 optim.losses.Loss | None

      Default \u2192 None

      The loss function to optimize for.

    • l2

      Default \u2192 0.0

      regularization amount used to push weights towards 0.

    • initializer

      Type \u2192 optim.initializers.Initializer | None

      Default \u2192 None

      Weights initialization scheme.

    • clip_gradient

      Default \u2192 1000000000000.0

      Clips the absolute value of each gradient value.

    • seed

      Default \u2192 None

      Random number generation seed. Set this for reproducibility.

    "},{"location":"api/reco/Baseline/#attributes","title":"Attributes","text":"
    • global_mean (stats.Mean)

      The target arithmetic mean.

    • u_biases (collections.defaultdict)

      The user bias weights.

    • i_biases (collections.defaultdict)

      The item bias weights.

    • u_optimizer (optim.base.Optimizer)

      The sequential optimizer used for updating the user bias weights.

    • i_optimizer (optim.base.Optimizer)

      The sequential optimizer used for updating the item bias weights.

    "},{"location":"api/reco/Baseline/#examples","title":"Examples","text":"

    from river import optim\nfrom river import reco\n\ndataset = (\n    ({'user': 'Alice', 'item': 'Superman'}, 8),\n    ({'user': 'Alice', 'item': 'Terminator'}, 9),\n    ({'user': 'Alice', 'item': 'Star Wars'}, 8),\n    ({'user': 'Alice', 'item': 'Notting Hill'}, 2),\n    ({'user': 'Alice', 'item': 'Harry Potter'}, 5),\n    ({'user': 'Bob', 'item': 'Superman'}, 8),\n    ({'user': 'Bob', 'item': 'Terminator'}, 9),\n    ({'user': 'Bob', 'item': 'Star Wars'}, 8),\n    ({'user': 'Bob', 'item': 'Notting Hill'}, 2)\n)\n\nmodel = reco.Baseline(optimizer=optim.SGD(0.005))\n\nfor x, y in dataset:\n    _ = model.learn_one(**x, y=y)\n\nmodel.predict_one(user='Bob', item='Harry Potter')\n
    6.538120\n

    "},{"location":"api/reco/Baseline/#methods","title":"Methods","text":"learn_one

    Fits a user-item pair and a real-valued target y.

    Parameters

    • user \u2014 'ID'
    • item \u2014 'ID'
    • y \u2014 'Reward'
    • x \u2014 'dict | None' \u2014 defaults to None

    predict_one

    Predicts the target value of a set of features x.

    Parameters

    • user \u2014 'ID'
    • item \u2014 'ID'
    • x \u2014 'dict | None' \u2014 defaults to None

    Returns

    Reward: The predicted preference from the user for the item.

    rank

    Rank models by decreasing order of preference for a given user.

    Parameters

    • user \u2014 'ID'
    • items \u2014 'set[ID]'
    • x \u2014 'dict | None' \u2014 defaults to None

    1. Matrix factorization techniques for recommender systems \u21a9

    "},{"location":"api/reco/BiasedMF/","title":"BiasedMF","text":"

    Biased Matrix Factorization for recommender systems.

    The model equation is defined as:

    \\[\\hat{y}(x) = \\bar{y} + bu_{u} + bi_{i} + \\langle \\mathbf{v}_u, \\mathbf{v}_i \\rangle\\]

    Where \\(bu_{u}\\) and \\(bi_{i}\\) are respectively the user and item biases. The last term being simply the dot product between the latent vectors of the given user-item pair:

    \\[\\langle \\mathbf{v}_u, \\mathbf{v}_i \\rangle = \\sum_{f=1}^{k} \\mathbf{v}_{u, f} \\cdot \\mathbf{v}_{i, f}\\]

    where \\(k\\) is the number of latent factors.

    This model expects a dict input with a user and an item entries without any type constraint on their values (i.e. can be strings or numbers). Other entries are ignored.

    "},{"location":"api/reco/BiasedMF/#parameters","title":"Parameters","text":"
    • n_factors

      Default \u2192 10

      Dimensionality of the factorization or number of latent factors.

    • bias_optimizer

      Type \u2192 optim.base.Optimizer | None

      Default \u2192 None

      The sequential optimizer used for updating the bias weights.

    • latent_optimizer

      Type \u2192 optim.base.Optimizer | None

      Default \u2192 None

      The sequential optimizer used for updating the latent weights.

    • loss

      Type \u2192 optim.losses.Loss | None

      Default \u2192 None

      The loss function to optimize for.

    • l2_bias

      Default \u2192 0.0

      Amount of L2 regularization used to push bias weights towards 0.

    • l2_latent

      Default \u2192 0.0

      Amount of L2 regularization used to push latent weights towards 0.

    • weight_initializer

      Type \u2192 optim.initializers.Initializer | None

      Default \u2192 None

      Weights initialization scheme.

    • latent_initializer

      Type \u2192 optim.initializers.Initializer | None

      Default \u2192 None

      Latent factors initialization scheme.

    • clip_gradient

      Default \u2192 1000000000000.0

      Clips the absolute value of each gradient value.

    • seed

      Default \u2192 None

      Random number generation seed. Set this for reproducibility.

    "},{"location":"api/reco/BiasedMF/#attributes","title":"Attributes","text":"
    • global_mean (stats.Mean)

      The target arithmetic mean.

    • u_biases (collections.defaultdict)

      The user bias weights.

    • i_biases (collections.defaultdict)

      The item bias weights.

    • u_latents (collections.defaultdict)

      The user latent vectors randomly initialized.

    • i_latents (collections.defaultdict)

      The item latent vectors randomly initialized.

    • u_bias_optimizer (optim.base.Optimizer)

      The sequential optimizer used for updating the user bias weights.

    • i_bias_optimizer (optim.base.Optimizer)

      The sequential optimizer used for updating the item bias weights.

    • u_latent_optimizer (optim.base.Optimizer)

      The sequential optimizer used for updating the user latent weights.

    • i_latent_optimizer (optim.base.Optimizer)

      The sequential optimizer used for updating the item latent weights.

    "},{"location":"api/reco/BiasedMF/#examples","title":"Examples","text":"

    from river import optim\nfrom river import reco\n\ndataset = (\n    ({'user': 'Alice', 'item': 'Superman'}, 8),\n    ({'user': 'Alice', 'item': 'Terminator'}, 9),\n    ({'user': 'Alice', 'item': 'Star Wars'}, 8),\n    ({'user': 'Alice', 'item': 'Notting Hill'}, 2),\n    ({'user': 'Alice', 'item': 'Harry Potter'}, 5),\n    ({'user': 'Bob', 'item': 'Superman'}, 8),\n    ({'user': 'Bob', 'item': 'Terminator'}, 9),\n    ({'user': 'Bob', 'item': 'Star Wars'}, 8),\n    ({'user': 'Bob', 'item': 'Notting Hill'}, 2)\n)\n\nmodel = reco.BiasedMF(\n    n_factors=10,\n    bias_optimizer=optim.SGD(0.025),\n    latent_optimizer=optim.SGD(0.025),\n    latent_initializer=optim.initializers.Normal(mu=0., sigma=0.1, seed=71)\n)\n\nfor x, y in dataset:\n    _ = model.learn_one(**x, y=y)\n\nmodel.predict_one(user='Bob', item='Harry Potter')\n
    6.489025\n

    "},{"location":"api/reco/BiasedMF/#methods","title":"Methods","text":"learn_one

    Fits a user-item pair and a real-valued target y.

    Parameters

    • user \u2014 'ID'
    • item \u2014 'ID'
    • y \u2014 'Reward'
    • x \u2014 'dict | None' \u2014 defaults to None

    predict_one

    Predicts the target value of a set of features x.

    Parameters

    • user \u2014 'ID'
    • item \u2014 'ID'
    • x \u2014 'dict | None' \u2014 defaults to None

    Returns

    Reward: The predicted preference from the user for the item.

    rank

    Rank models by decreasing order of preference for a given user.

    Parameters

    • user \u2014 'ID'
    • items \u2014 'set[ID]'
    • x \u2014 'dict | None' \u2014 defaults to None

    1. Paterek, A., 2007, August. Improving regularized singular value decomposition for collaborative filtering. In Proceedings of KDD cup and workshop (Vol. 2007, pp. 5-8) \u21a9

    2. Matrix factorization techniques for recommender systems \u21a9

    "},{"location":"api/reco/FunkMF/","title":"FunkMF","text":"

    Funk Matrix Factorization for recommender systems.

    The model equation is defined as:

    \\[\\hat{y}(x) = \\langle \\mathbf{v}_u, \\mathbf{v}_i \\rangle = \\sum_{f=1}^{k} \\mathbf{v}_{u, f} \\cdot \\mathbf{v}_{i, f}\\]

    where \\(k\\) is the number of latent factors.

    This model expects a dict input with a user and an item entries without any type constraint on their values (i.e. can be strings or numbers). Other entries are ignored.

    "},{"location":"api/reco/FunkMF/#parameters","title":"Parameters","text":"
    • n_factors

      Default \u2192 10

      Dimensionality of the factorization or number of latent factors.

    • optimizer

      Type \u2192 optim.base.Optimizer | None

      Default \u2192 None

      The sequential optimizer used for updating the latent factors.

    • loss

      Type \u2192 optim.losses.Loss | None

      Default \u2192 None

      The loss function to optimize for.

    • l2

      Default \u2192 0.0

      Amount of L2 regularization used to push weights towards 0.

    • initializer

      Type \u2192 optim.initializers.Initializer | None

      Default \u2192 None

      Latent factors initialization scheme.

    • clip_gradient

      Default \u2192 1000000000000.0

      Clips the absolute value of each gradient value.

    • seed

      Default \u2192 None

      Random number generation seed. Set this for reproducibility.

    "},{"location":"api/reco/FunkMF/#attributes","title":"Attributes","text":"
    • u_latents (collections.defaultdict)

      The user latent vectors randomly initialized.

    • i_latents (collections.defaultdict)

      The item latent vectors randomly initialized.

    • u_optimizer (optim.base.Optimizer)

      The sequential optimizer used for updating the user latent weights.

    • i_optimizer (optim.base.Optimizer)

      The sequential optimizer used for updating the item latent weights.

    "},{"location":"api/reco/FunkMF/#examples","title":"Examples","text":"

    from river import optim\nfrom river import reco\n\ndataset = (\n    ({'user': 'Alice', 'item': 'Superman'}, 8),\n    ({'user': 'Alice', 'item': 'Terminator'}, 9),\n    ({'user': 'Alice', 'item': 'Star Wars'}, 8),\n    ({'user': 'Alice', 'item': 'Notting Hill'}, 2),\n    ({'user': 'Alice', 'item': 'Harry Potter'}, 5),\n    ({'user': 'Bob', 'item': 'Superman'}, 8),\n    ({'user': 'Bob', 'item': 'Terminator'}, 9),\n    ({'user': 'Bob', 'item': 'Star Wars'}, 8),\n    ({'user': 'Bob', 'item': 'Notting Hill'}, 2)\n)\n\nmodel = reco.FunkMF(\n    n_factors=10,\n    optimizer=optim.SGD(0.1),\n    initializer=optim.initializers.Normal(mu=0., sigma=0.1, seed=11),\n)\n\nfor x, y in dataset:\n    _ = model.learn_one(**x, y=y)\n\nmodel.predict_one(user='Bob', item='Harry Potter')\n
    1.866272\n

    "},{"location":"api/reco/FunkMF/#methods","title":"Methods","text":"learn_one

    Fits a user-item pair and a real-valued target y.

    Parameters

    • user \u2014 'ID'
    • item \u2014 'ID'
    • y \u2014 'Reward'
    • x \u2014 'dict | None' \u2014 defaults to None

    predict_one

    Predicts the target value of a set of features x.

    Parameters

    • user \u2014 'ID'
    • item \u2014 'ID'
    • x \u2014 'dict | None' \u2014 defaults to None

    Returns

    Reward: The predicted preference from the user for the item.

    rank

    Rank models by decreasing order of preference for a given user.

    Parameters

    • user \u2014 'ID'
    • items \u2014 'set[ID]'
    • x \u2014 'dict | None' \u2014 defaults to None

    1. Netflix update: Try this at home \u21a9

    2. Matrix factorization techniques for recommender systems \u21a9

    "},{"location":"api/reco/RandomNormal/","title":"RandomNormal","text":"

    Predicts random values sampled from a normal distribution.

    The parameters of the normal distribution are fitted with running statistics. They parameters are independent of the user, the item, or the context, and are instead fitted globally. This recommender therefore acts as a dummy model that any serious model should easily outperform.

    "},{"location":"api/reco/RandomNormal/#parameters","title":"Parameters","text":"
    • seed

      Default \u2192 None

      Random number generation seed. Set this for reproducibility.

    "},{"location":"api/reco/RandomNormal/#attributes","title":"Attributes","text":"
    • mean

      stats.Mean

    • variance

      stats.Var

    "},{"location":"api/reco/RandomNormal/#examples","title":"Examples","text":"

    from river import reco\n\ndataset = (\n    ({'user': 'Alice', 'item': 'Superman'}, 8),\n    ({'user': 'Alice', 'item': 'Terminator'}, 9),\n    ({'user': 'Alice', 'item': 'Star Wars'}, 8),\n    ({'user': 'Alice', 'item': 'Notting Hill'}, 2),\n    ({'user': 'Alice', 'item': 'Harry Potter'}, 5),\n    ({'user': 'Bob', 'item': 'Superman'}, 8),\n    ({'user': 'Bob', 'item': 'Terminator'}, 9),\n    ({'user': 'Bob', 'item': 'Star Wars'}, 8),\n    ({'user': 'Bob', 'item': 'Notting Hill'}, 2)\n)\n\nmodel = reco.RandomNormal(seed=42)\n\nfor x, y in dataset:\n    _ = model.learn_one(**x, y=y)\n\nmodel.predict_one(user='Bob', item='Harry Potter')\n
    6.147299621751425\n

    "},{"location":"api/reco/RandomNormal/#methods","title":"Methods","text":"learn_one

    Fits a user-item pair and a real-valued target y.

    Parameters

    • user \u2014 'ID'
    • item \u2014 'ID'
    • y \u2014 'Reward'
    • x \u2014 'dict | None' \u2014 defaults to None

    predict_one

    Predicts the target value of a set of features x.

    Parameters

    • user \u2014 'ID'
    • item \u2014 'ID'
    • x \u2014 'dict | None' \u2014 defaults to None

    Returns

    Reward: The predicted preference from the user for the item.

    rank

    Rank models by decreasing order of preference for a given user.

    Parameters

    • user \u2014 'ID'
    • items \u2014 'set[ID]'
    • x \u2014 'dict | None' \u2014 defaults to None

    "},{"location":"api/reco/base/Ranker/","title":"Ranker","text":"

    Base class for ranking models.

    "},{"location":"api/reco/base/Ranker/#parameters","title":"Parameters","text":"
    • seed

      Type \u2192 int | None

      Default \u2192 None

      Random number generation seed. Set this for reproducibility.

    "},{"location":"api/reco/base/Ranker/#attributes","title":"Attributes","text":"
    • is_contextual
    "},{"location":"api/reco/base/Ranker/#methods","title":"Methods","text":"learn_one

    Fits a user-item pair and a real-valued target y.

    Parameters

    • user \u2014 'ID'
    • item \u2014 'ID'
    • y \u2014 'Reward'
    • x \u2014 'dict | None' \u2014 defaults to None

    predict_one

    Predicts the target value of a set of features x.

    Parameters

    • user \u2014 'ID'
    • item \u2014 'ID'
    • x \u2014 'dict | None' \u2014 defaults to None

    Returns

    Reward: The predicted preference from the user for the item.

    rank

    Rank models by decreasing order of preference for a given user.

    Parameters

    • user \u2014 'ID'
    • items \u2014 'set[ID]'
    • x \u2014 'dict | None' \u2014 defaults to None

    "},{"location":"api/rules/AMRules/","title":"AMRules","text":"

    Adaptive Model Rules.

    AMRules1 is a rule-based algorithm for incremental regression tasks. AMRules relies on the Hoeffding bound to build its rule set, similarly to Hoeffding Trees. The Variance-Ratio heuristic is used to evaluate rules' splits. Moreover, this rule-based regressor has additional capacities not usually found in decision trees.

    Firstly, each created decision rule has a built-in drift detection mechanism. Every time a drift is detected, the affected decision rule is removed. In addition, AMRules' rules also have anomaly detection capabilities. After a warm-up period, each rule tests whether or not the incoming instances are anomalies. Anomalous instances are not used for training.

    Every time no rule is covering an incoming example, a default rule is used to learn from it. A rule covers an instance when all of the rule's literals (tests joined by the logical operation and) match the input case. The default rule is also applied for predicting examples not covered by any rules from the rule set.

    "},{"location":"api/rules/AMRules/#parameters","title":"Parameters","text":"
    • n_min

      Type \u2192 int

      Default \u2192 200

      The total weight that must be observed by a rule between expansion attempts.

    • delta

      Type \u2192 float

      Default \u2192 1e-07

      The split test significance. The split confidence is given by 1 - delta.

    • tau

      Type \u2192 float

      Default \u2192 0.05

      The tie-breaking threshold.

    • pred_type

      Type \u2192 str

      Default \u2192 adaptive

      The prediction strategy used by the decision rules. Can be either: - \"mean\": outputs the target mean within the partitions defined by the decision rules. - \"model\": always use instances of the model passed pred_model to make predictions. - \"adaptive\": dynamically selects between \"mean\" and \"model\" for each incoming example. The most accurate option at the moment will be used.

    • pred_model

      Type \u2192 base.Regressor | None

      Default \u2192 None

      The regression model that will be replicated for every rule when pred_type is either \"model\" or \"adaptive\".

    • splitter

      Type \u2192 spl.Splitter | None

      Default \u2192 None

      The Splitter or Attribute Observer (AO) used to monitor the class statistics of numeric features and perform splits. Splitters are available in the tree.splitter module. Different splitters are available for classification and regression tasks. Classification and regression splitters can be distinguished by their property is_target_class. This is an advanced option. Special care must be taken when choosing different splitters. By default, tree.splitter.TEBSTSplitter is used if splitter is None.

    • drift_detector

      Type \u2192 base.DriftDetector | None

      Default \u2192 None

      The drift detection model that is used by each rule. Care must be taken to avoid the triggering of too many false alarms or delaying too much the concept drift detection. By default, drift.ADWIN is used if drift_detector is None.

    • fading_factor

      Type \u2192 float

      Default \u2192 0.99

      The exponential decaying factor applied to the learning models' absolute errors, that are monitored if pred_type='adaptive'. Must be between 0 and 1. The closer to 1, the more importance is going to be given to past observations. On the other hand, if its value approaches 0, the recent observed errors are going to have more influence on the final decision.

    • anomaly_threshold

      Type \u2192 float

      Default \u2192 -0.75

      The threshold below which instances will be considered anomalies by the rules.

    • m_min

      Type \u2192 int

      Default \u2192 30

      The minimum total weight a rule must observe before it starts to skip anomalous instances during training.

    • ordered_rule_set

      Type \u2192 bool

      Default \u2192 True

      If True, only the first rule that covers an instance will be used for training or prediction. If False, all the rules covering an instance will be updated during training, and the predictions for an instance will be the average prediction of all rules covering that example.

    • min_samples_split

      Type \u2192 int

      Default \u2192 5

      The minimum number of samples each partition of a binary split candidate must have to be considered valid.

    "},{"location":"api/rules/AMRules/#attributes","title":"Attributes","text":"
    • n_drifts_detected

      The number of detected concept drifts.

    "},{"location":"api/rules/AMRules/#examples","title":"Examples","text":"

    from river import datasets\nfrom river import drift\nfrom river import evaluate\nfrom river import metrics\nfrom river import preprocessing\nfrom river import rules\n\ndataset = datasets.TrumpApproval()\n\nmodel = (\n    preprocessing.StandardScaler() |\n    rules.AMRules(\n        delta=0.01,\n        n_min=50,\n        drift_detector=drift.ADWIN()\n    )\n)\n\nmetric = metrics.MAE()\n\nevaluate.progressive_val_score(dataset, model, metric)\n
    MAE: 1.119553\n

    "},{"location":"api/rules/AMRules/#methods","title":"Methods","text":"anomaly_score

    Aggregated anomaly score computed using all the rules that cover the input instance.

    Returns the mean anomaly score, the standard deviation of the score, and the proportion of rules that cover the instance (support). If the support is zero, it means that the default rule was used (not other rule covered x).

    Parameters

    • x

    Returns

    tuple[float, float, float]: mean_anomaly_score, std_anomaly_score, support

    debug_one

    Return an explanation of how x is predicted

    Parameters

    • x

    Returns

    str: A representation of the rules that cover the input and their prediction.

    learn_one

    Fits to a set of features x and a real-valued target y.

    Parameters

    • x \u2014 'dict'
    • y \u2014 'base.typing.RegTarget'
    • w \u2014 'int' \u2014 defaults to 1

    Returns

    AMRules: self

    predict_one

    Predict the output of features x.

    Parameters

    • x \u2014 'dict'

    Returns

    base.typing.RegTarget: The prediction.

    "},{"location":"api/rules/AMRules/#notes","title":"Notes","text":"

    AMRules treats all the non-numerical inputs as nominal features. All instances of numbers.Number will be treated as continuous, even if they represent integer categories. When using nominal features, pred_type should be set to \"mean\", otherwise errors will be thrown while trying to update the underlying rules' prediction models. Prediction strategies other than \"mean\" can be used, as long as the prediction model passed to pred_model supports nominal features.

    1. Duarte, J., Gama, J. and Bifet, A., 2016. Adaptive model rules from high-speed data streams. ACM Transactions on Knowledge Discovery from Data (TKDD), 10(3), pp.1-22.\u00a0\u21a9

    "},{"location":"api/sketch/Counter/","title":"Counter","text":"

    Counting using the Count-Min Sketch (CMS) algorithm.

    Contrary to an exhaustive approach, e.g., using a collections.Counter, CMS uses a limited and fixed amount of memory. The CMS algorithm uses a sketch structure consisting of a matrix \\(w \\times d\\).

    These dimensions are obtained via:

    • \\(w = \\lceil \\frac{e}{\\epsilon} \\rceil\\), where \\(e\\) is the Euler number.

    • \\(d = \\lceil \\ln\\left(\\frac{1}{\\delta} \\right) \\rceil\\).

    Decreasing the values of \\(\\epsilon\\) (epsilon) and \\(\\delta\\) (delta) increase the accuracy of the algorithm, at the cost of increased memory usage. The values of w and d control the hash tables' capability and the amount of hash collisions, respectively.

    CMS works by keeping d hash tables with w slots each. Elements are mapped to a slot in each hash table. These tables store the counting estimates. This implementation assumes the turnstile case described in the paper, i.e., count values and updates can be negative.

    The count values obtained by CMS are always overestimates. Suppose \\(c_i\\) and \\(\\hat{c}_i\\) are the ground truth and estimated count values, respectively, for a given element \\(i\\). CMS guarantees that \\(c_i \\le \\hat{c}_i\\) and, with probability \\(1 - \\delta\\), \\(\\hat{c}_i \\le c_i + \\epsilon||\\mathbf{c}||_1\\). In the expression, \\(||\\mathbf{c}||_1 = \\sum_i |c_i|\\).

    "},{"location":"api/sketch/Counter/#parameters","title":"Parameters","text":"
    • epsilon

      Type \u2192 float

      Default \u2192 0.1

      The approximation error parameter. The error in answering a query is within a factor of epsilon with probability delta.

    • delta

      Type \u2192 float

      Default \u2192 0.05

      A query estimates have a probability of 1 - delta of having errors which are a factor of epsilon. See the CMS description above for more details.

    • seed

      Type \u2192 int | None

      Default \u2192 None

      Random seed for reproducibility.

    "},{"location":"api/sketch/Counter/#attributes","title":"Attributes","text":"
    • n_slots

      The number of slots in each hash table.

    • n_tables

      The number of stored hash tables.

    "},{"location":"api/sketch/Counter/#examples","title":"Examples","text":"
    import collections\nfrom river import sketch\n\ncms = sketch.Counter(epsilon=0.005, seed=0)\n\nrng = random.Random(7)\n\ncounter = collections.Counter()\n

    We can check the number of slots per hash table:

    cms.n_slots\n
    544\n

    And the number of hash tables:

    cms.n_tables\n
    3\n

    Let's compare the sketch against a brute force approach:

    vals = []\nfor _ in range(10000):\n    v = rng.randint(-1000, 1000)\n    cms = cms.update(v)\n    counter[v] += 1\n    vals.append(v)\n

    Now, we can compare the estimates of CMS against the exhaustive counting strategy:

    counter[7]\n
    5\n
    cms[7]\n
    12\n
    counter[532]\n
    4\n
    cms[532]\n
    15\n

    Keep in mind that CMS is an approximate sketch algorithm. Couting estimates for unseen values might not be always reliable:

    cms[1001]\n
    9\n

    We can check the number of elements stored by each approach:

    len(counter), len(cms)\n
    (1982, 1632)\n

    And also retrieve the total sum of counts:

    cms.total()\n
    10000\n

    We can decrease the error by allocating more memory in the CMS:

    cms_a = sketch.Counter(epsilon=0.001, delta=0.01, seed=0)\nfor v in vals:\n    cms_a = cms_a.update(v)\n\ncms_a[7]\n
    5\n
    cms_a[532]\n
    4\n

    We can also obtain estimates of the dot product between two instances of river.collections.Counter. This could be useful, for instance, to estimate the cosine distance between the data monitored in two different counter sketch instances. Suppose we create another CMS instance (the number of slots and hash tables must match) that monitors another sample of the same data generating process:

    cms_b = sketch.Counter(epsilon=0.001, delta=0.01, seed=7)\n\nfor _ in range(10000):\n    v = rng.randint(-1000, 1000)\n    cms_b = cms_b.update(v)\n

    Now, we can define a cosine distance function:

    def cosine_dist(cms_a, cms_b):\n    num = cms_a @ cms_b\n    den = math.sqrt(cms_a @ cms_a) * math.sqrt(cms_b @ cms_b)\n    return num / den\n

    And use it to calculate the cosine distance between the elements monitored in cms_a and cms_b:

    cosine_dist(cms_a, cms_b)\n
    0.175363...\n

    "},{"location":"api/sketch/Counter/#methods","title":"Methods","text":"total

    Return the total count.

    update
    1. Cormode, G., & Muthukrishnan, S. (2005). An improved data stream summary: the count-min sketch and its applications. Journal of Algorithms, 55(1), 58-75. \u21a9

    2. Count-Min Sketch \u21a9

    3. Hash functions family generator in Python \u21a9

    "},{"location":"api/sketch/HeavyHitters/","title":"HeavyHitters","text":"

    Find the Heavy Hitters using the Lossy Count with Forgetting factor algorithm1.

    Keep track of the most frequent item(set)s in a data stream and apply a forgetting factor to discard previous frequent items that do not often appear anymore. This is an approximation algorithm designed to work with a limited amount of memory rather than accounting for every possible solution (thus using an unbounded memory footprint). Any hashable type can be passed as input, hence tuples or frozensets can also be monitored.

    Considering a data stream where n elements were observed so far, the Lossy Count algorithm has the following properties:

    • All item(set)s whose true frequency exceeds support * n are output. There are no

    false negatives;

    • No item(set) whose true frequency is less than (support - epsilon) * n is outputted;

    • Estimated frequencies are less than the true frequencies by at most epsilon * n.

    "},{"location":"api/sketch/HeavyHitters/#parameters","title":"Parameters","text":"
    • support

      Type \u2192 float

      Default \u2192 0.001

      The support threshold used to determine if an item is frequent. The value of support must be in \\([0, 1]\\). Elements whose frequency is higher than support times the number of observations seen so far are outputted.

    • epsilon

      Type \u2192 float

      Default \u2192 0.005

      Error parameter to control the accuracy-memory tradeoff. The value of epsilon must be in \\((0, 1]\\) and typically epsilon \\(\\ll\\) support. The smaller the epsilon, the more accurate the estimates will be, but the count sketch will have an increased memory footprint.

    • fading_factor

      Type \u2192 float

      Default \u2192 0.999

      Forgetting factor applied to the frequency estimates to reduce the impact of old items. The value of fading_factor must be in \\((0, 1]\\).

    "},{"location":"api/sketch/HeavyHitters/#examples","title":"Examples","text":"
    import random\nimport string\nfrom river import sketch\n\nrng = random.Random(42)\nhh = sketch.HeavyHitters()\n

    We will feed the counter with printable ASCII characters:

    for _ in range(10_000):\n    hh = hh.update(rng.choice(string.printable))\n

    We can retrieve estimates of the n top elements and their frequencies. Let's try n=3

    hh.most_common(3)\n
    [(',', 122.099142...), ('[', 116.049510...), ('W', 115.013402...)]\n

    We can also access estimates of individual elements:

    hh['A']\n
    99.483575...\n

    Unobserved elements are handled just fine:

    hh[(1, 2, 3)]\n
    0.0\n

    "},{"location":"api/sketch/HeavyHitters/#methods","title":"Methods","text":"most_common update
    1. Veloso, B., Tabassum, S., Martins, C., Espanha, R., Azevedo, R., & Gama, J. (2020). Interconnect bypass fraud detection: a case study. Annals of Telecommunications, 75(9), 583-596.\u00a0\u21a9

    "},{"location":"api/sketch/Histogram/","title":"Histogram","text":"

    Streaming histogram.

    "},{"location":"api/sketch/Histogram/#parameters","title":"Parameters","text":"
    • max_bins

      Default \u2192 256

      Maximal number of bins.

    "},{"location":"api/sketch/Histogram/#attributes","title":"Attributes","text":"
    • n

      Total number of seen values.

    "},{"location":"api/sketch/Histogram/#examples","title":"Examples","text":"

    from river import sketch\nimport numpy as np\n\nnp.random.seed(42)\n\nvalues = np.hstack((\n    np.random.normal(-3, 1, 1000),\n    np.random.normal(3, 1, 1000),\n))\n\nhist = sketch.Histogram(max_bins=15)\n\nfor x in values:\n    hist = hist.update(x)\n\nfor bin in hist:\n    print(bin)\n
    [-6.24127, -6.24127]: 1\n[-5.69689, -5.19881]: 8\n[-5.12390, -4.43014]: 57\n[-4.42475, -3.72574]: 158\n[-3.71984, -3.01642]: 262\n[-3.01350, -2.50668]: 206\n[-2.50329, -0.81020]: 294\n[-0.80954, 0.29677]: 19\n[0.40896, 0.82733]: 7\n[0.84661, 1.25147]: 24\n[1.26029, 2.30758]: 178\n[2.31081, 3.05701]: 284\n[3.05963, 3.69695]: 242\n[3.69822, 5.64434]: 258\n[6.13775, 6.19311]: 2\n

    "},{"location":"api/sketch/Histogram/#methods","title":"Methods","text":"cdf

    Cumulative distribution function.

    Parameters

    • x

    iter_cdf

    Yields CDF values for a sorted iterable of values.

    This is faster than calling cdf with many values.

    Parameters

    • X
    • verbose \u2014 defaults to False

    1. Ben-Haim, Y. and Tom-Tov, E., 2010. A streaming parallel decision tree algorithm. Journal of Machine Learning Research, 11(Feb), pp.849-872. \u21a9

    2. Go implementation \u21a9

    "},{"location":"api/sketch/Set/","title":"Set","text":"

    Approximate tracking of observed items using Bloom filters.

    Bloom filters enable using a limited amount of memory to check whether a given item was already observed in a stream. They can be used similarly to Python's built-in sets with the difference that items are not explicitly stored. For that reason, element removal and set difference are not currently supported.

    Bloom filters store a bit array and map incoming items to k index positions in the such array. The selected positions are set to True. Therefore, a binary code representation is created for each item. Membership works by projecting the query item and checking if every position of its binary code is True. If that is not the case, the item was not observed yet. A nice property of Bloom filters is that they do not yield false negatives: unobserved items might be signalized as observed, but observed items are never signalized as unobserved.

    If more than one item has the same binary code, i.e., hash collisions happen, the accuracy of the Bloom filter decreases, and false positives are produced. For instance, a previously unobserved item is signalized as observed. Increasing the size of the binary array and the value of k increase the filter's accuracy as hash collisions are avoided. Nonetheless, even using an increased number of hash functions, hash collisions will frequently happen if the array capacity is too small. The length of the bit array and the number of hash functions are inferred automatically from the supplied capacity and fp_rate.

    "},{"location":"api/sketch/Set/#parameters","title":"Parameters","text":"
    • capacity

      Type \u2192 int

      Default \u2192 2048

      The maximum capacity of the Bloom filter, i.e., the maximum number of distinct items to store given the selected fp_rate.

    • fp_rate

      Type \u2192 float

      Default \u2192 0.01

      The allowed rate of false positives. The probability of obtaining a true positive is 1 - fp_rate.

    • seed

      Type \u2192 int | None

      Default \u2192 None

      Random seed for reproducibility.

    "},{"location":"api/sketch/Set/#attributes","title":"Attributes","text":"
    • n_bits

      Return the size of the binary array used by the Bloom filter.

    • n_hash

      Return the number of used hash functions.

    "},{"location":"api/sketch/Set/#examples","title":"Examples","text":"
    import random\nfrom river import sketch\n\nrng = random.Random(42)\ns_set = sketch.Set(capacity=100, seed=0)\n

    We can retrieve the number of selected hash functions:

    s_set.n_hash\n
    7\n

    And the size of the binary array used by the Bloom filter:

    s_set.n_bits\n
    959\n

    We can add new items and check for membership using the same calls used by Python's standard sets:

    for _ in range(1000):\n    s_set.add(rng.randint(0, 200))\n\n1 in s_set\n
    True\n

    False positives might happen if the capacity is not large enough:

    -10 in s_set\n
    True\n

    Iterables can also be supplied to perform multiple updates with a single call to update:

    s_set = s_set.update([1, 2, 3, 4, 5, 6, 7])\n

    We can also combine instances of sketch.Set using the intersection and union operations, as long as they share the same hash functions and capability. In other words, all they hyperparameters match. Let's create two instances that will monitor different portions of a stream of random numbers:

    s1 = sketch.Set(seed=8)\ns2 = sketch.Set(seed=8)\n\nfor _ in range(1000):\n    s1.add(rng.randint(0, 5000))\n\nfor _ in range(1000):\n    s2.add(rng.randint(0, 5000))\n\n43 in s1\n
    True\n
    43 in s2\n
    False\n

    We can get the intersection between the two instances by using:

    s_intersection = s1 & s2\n43 in s_intersection\n
    False\n

    We can also obtain the set union:

    s_union = s1 | s2\n\n43 in s_union\n
    True\n

    The same effect of the non-inplace dunder methods can be achieved via explicit method calls:

    43 in s1.intersection(s2)\n
    False\n

    43 in s1.union(s2)\n
    True\n

    "},{"location":"api/sketch/Set/#methods","title":"Methods","text":"add intersection

    Set intersection.

    Return a new instance that results from the set intersection between the current Set object and other. Dunder operators can be used to replace the method call, i.e., a &= b and a & b for inplace and non-inplace intersections, respectively.

    Parameters

    • other \u2014 'Set'

    union

    Set union.

    Return a new instance that results from the set union between the current Set object and other. Dunder operators can be used to replace the method call, i.e., a |= b and a | b for inplace and non-inplace unions, respectively.

    Parameters

    • other \u2014 'Set'

    update"},{"location":"api/sketch/Set/#notes","title":"Notes","text":"

    This implementation uses an integer to represent the binary array. Bitwise operations are performed in the integer to reflect the Bloom filter updates.

    1. Florian Hartmann's blog article on Bloom Filters.\u00a0\u21a9

    2. Wikipedia entry on Bloom filters.\u00a0\u21a9

    "},{"location":"api/stats/AbsMax/","title":"AbsMax","text":"

    Running absolute max.

    "},{"location":"api/stats/AbsMax/#attributes","title":"Attributes","text":"
    • abs_max (float)

      The current absolute max.

    "},{"location":"api/stats/AbsMax/#examples","title":"Examples","text":"

    from river import stats\n\nX = [1, -4, 3, -2, 5, -6]\nabs_max = stats.AbsMax()\nfor x in X:\n    print(abs_max.update(x).get())\n
    1\n4\n4\n4\n5\n6\n

    "},{"location":"api/stats/AbsMax/#methods","title":"Methods","text":"get

    Return the current value of the statistic.

    update

    Update and return the called instance.

    Parameters

    • x \u2014 'numbers.Number'

    "},{"location":"api/stats/AutoCorr/","title":"AutoCorr","text":"

    Measures the serial correlation.

    This method computes the Pearson correlation between the current value and the value seen n steps before.

    "},{"location":"api/stats/AutoCorr/#parameters","title":"Parameters","text":"
    • lag

      Type \u2192 int

    "},{"location":"api/stats/AutoCorr/#attributes","title":"Attributes","text":"
    • name
    "},{"location":"api/stats/AutoCorr/#examples","title":"Examples","text":"

    The following examples are taken from the pandas documentation.

    from river import stats\n\nauto_corr = stats.AutoCorr(lag=1)\nfor x in [0.25, 0.5, 0.2, -0.05]:\n    print(auto_corr.update(x).get())\n
    0\n0\n-1.0\n0.103552\n

    auto_corr = stats.AutoCorr(lag=2)\nfor x in [0.25, 0.5, 0.2, -0.05]:\n    print(auto_corr.update(x).get())\n
    0\n0\n0\n-1.0\n

    auto_corr = stats.AutoCorr(lag=1)\nfor x in [1, 0, 0, 0]:\n    print(auto_corr.update(x).get())\n
    0\n0\n0\n0\n

    "},{"location":"api/stats/AutoCorr/#methods","title":"Methods","text":"get

    Return the current value of the statistic.

    update

    Update and return the called instance.

    Parameters

    • x \u2014 'numbers.Number'

    "},{"location":"api/stats/BayesianMean/","title":"BayesianMean","text":"

    Estimates a mean using outside information.

    "},{"location":"api/stats/BayesianMean/#parameters","title":"Parameters","text":"
    • prior

      Type \u2192 float

    • prior_weight

      Type \u2192 float

    "},{"location":"api/stats/BayesianMean/#attributes","title":"Attributes","text":"
    • name
    "},{"location":"api/stats/BayesianMean/#methods","title":"Methods","text":"get

    Return the current value of the statistic.

    revert update

    Update and return the called instance.

    Parameters

    • x \u2014 'numbers.Number'

    1. Additive smoothing \u21a9

    2. Bayesian average \u21a9

    3. Practical example of Bayes estimators \u21a9

    "},{"location":"api/stats/Count/","title":"Count","text":"

    A simple counter.

    "},{"location":"api/stats/Count/#attributes","title":"Attributes","text":"
    • n (int)

      The current number of observations.

    "},{"location":"api/stats/Count/#methods","title":"Methods","text":"get

    Return the current value of the statistic.

    update

    Update and return the called instance.

    Parameters

    • x \u2014 'numbers.Number' \u2014 defaults to None

    "},{"location":"api/stats/Cov/","title":"Cov","text":"

    Covariance.

    "},{"location":"api/stats/Cov/#parameters","title":"Parameters","text":"
    • ddof

      Default \u2192 1

      Delta Degrees of Freedom.

    "},{"location":"api/stats/Cov/#attributes","title":"Attributes","text":"
    • n
    "},{"location":"api/stats/Cov/#examples","title":"Examples","text":"

    from river import stats\n\nx = [-2.1,  -1,  4.3]\ny = [   3, 1.1, 0.12]\n\ncov = stats.Cov()\n\nfor xi, yi in zip(x, y):\n    print(cov.update(xi, yi).get())\n
    0.0\n-1.044999\n-4.286\n

    This class has a revert method, and can thus be wrapped by utils.Rolling:

    from river import utils\n\nx = [-2.1,  -1, 4.3, 1, -2.1,  -1, 4.3]\ny = [   3, 1.1, .12, 1,    3, 1.1, .12]\n\nrcov = utils.Rolling(stats.Cov(), window_size=3)\n\nfor xi, yi in zip(x, y):\n    print(rcov.update(xi, yi).get())\n
    0.0\n-1.045\n-4.286\n-1.382\n-4.589\n-1.415\n-4.286\n

    "},{"location":"api/stats/Cov/#methods","title":"Methods","text":"get

    Return the current value of the statistic.

    revert update

    Update and return the called instance.

    Parameters

    • x
    • y
    • w \u2014 defaults to 1.0

    update_many"},{"location":"api/stats/Cov/#notes","title":"Notes","text":"

    The outcomes of the incremental and parallel updates are consistent with numpy's batch processing when \\(\\text{ddof} \\le 1\\).

    1. Wikipedia article on algorithms for calculating variance \u21a9

    2. Schubert, E. and Gertz, M., 2018, July. Numerically stable parallel computation of (co-) variance. In Proceedings of the 30th International Conference on Scientific and Statistical Database Management (pp. 1-12).\u00a0\u21a9

    "},{"location":"api/stats/EWMean/","title":"EWMean","text":"

    Exponentially weighted mean.

    "},{"location":"api/stats/EWMean/#parameters","title":"Parameters","text":"
    • fading_factor

      Default \u2192 0.5

      The closer fading_factor is to 1 the more the statistic will adapt to recent values.

    "},{"location":"api/stats/EWMean/#attributes","title":"Attributes","text":"
    • mean (float)

      The running exponentially weighted mean.

    "},{"location":"api/stats/EWMean/#examples","title":"Examples","text":"

    from river import stats\n\nX = [1, 3, 5, 4, 6, 8, 7, 9, 11]\newm = stats.EWMean(fading_factor=0.5)\nfor x in X:\n    print(ewm.update(x).get())\n
    1.0\n2.0\n3.5\n3.75\n4.875\n6.4375\n6.71875\n7.859375\n9.4296875\n

    "},{"location":"api/stats/EWMean/#methods","title":"Methods","text":"get

    Return the current value of the statistic.

    update

    Update and return the called instance.

    Parameters

    • x \u2014 'numbers.Number'

    1. Finch, T., 2009. Incremental calculation of weighted mean and variance. University of Cambridge, 4(11-5), pp.41-42. \u21a9

    2. Exponential Moving Average on Streaming Data \u21a9

    "},{"location":"api/stats/EWVar/","title":"EWVar","text":"

    Exponentially weighted variance.

    To calculate the variance we use the fact that Var(X) = Mean(x^2) - Mean(x)^2 and internally we use the exponentially weighted mean of x/x^2 to calculate this.

    "},{"location":"api/stats/EWVar/#parameters","title":"Parameters","text":"
    • fading_factor

      Default \u2192 0.5

      The closer fading_factor is to 1 the more the statistic will adapt to recent values.

    "},{"location":"api/stats/EWVar/#attributes","title":"Attributes","text":"
    • variance (float)

      The running exponentially weighted variance.

    "},{"location":"api/stats/EWVar/#examples","title":"Examples","text":"

    from river import stats\n\nX = [1, 3, 5, 4, 6, 8, 7, 9, 11]\newv = stats.EWVar(fading_factor=0.5)\nfor x in X:\n    print(ewv.update(x).get())\n
    0.0\n1.0\n2.75\n1.4375\n1.984375\n3.43359375\n1.7958984375\n2.198974609375\n3.56536865234375\n

    "},{"location":"api/stats/EWVar/#methods","title":"Methods","text":"get

    Return the current value of the statistic.

    update

    Update and return the called instance.

    Parameters

    • x \u2014 'numbers.Number'

    1. Finch, T., 2009. Incremental calculation of weighted mean and variance. University of Cambridge, 4(11-5), pp.41-42. \u21a9

    2. Exponential Moving Average on Streaming Data \u21a9

    "},{"location":"api/stats/Entropy/","title":"Entropy","text":"

    Running entropy.

    "},{"location":"api/stats/Entropy/#parameters","title":"Parameters","text":"
    • fading_factor

      Default \u2192 1

      Fading factor.

    • eps

      Default \u2192 1e-08

      Small value that will be added to the denominator to avoid division by zero.

    "},{"location":"api/stats/Entropy/#attributes","title":"Attributes","text":"
    • entropy (float)

      The running entropy.

    • n (int)

      The current number of observations.

    • counter (collections.Counter)

      Count the number of times the values have occurred

    "},{"location":"api/stats/Entropy/#examples","title":"Examples","text":"

    import math\nimport random\nimport numpy as np\nfrom scipy.stats import entropy\nfrom river import stats\n\ndef entropy_list(labels, base=None):\n  value,counts = np.unique(labels, return_counts=True)\n  return entropy(counts, base=base)\n\nSEED = 42 * 1337\nrandom.seed(SEED)\n\nentro = stats.Entropy(fading_factor=1)\n\nlist_animal = []\nfor animal, num_val in zip(['cat', 'dog', 'bird'],[301, 401, 601]):\n    list_animal += [animal for i in range(num_val)]\nrandom.shuffle(list_animal)\n\nfor animal in list_animal:\n    _ = entro.update(animal)\n\nprint(f'{entro.get():.6f}')\n
    1.058093\n
    print(f'{entropy_list(list_animal):.6f}')\n
    1.058093\n

    "},{"location":"api/stats/Entropy/#methods","title":"Methods","text":"get

    Return the current value of the statistic.

    update

    Update and return the called instance.

    Parameters

    • x \u2014 'numbers.Number'

    1. Sovdat, B., 2014. Updating Formulas and Algorithms for Computing Entropy and Gini Index from Time-Changing Data Streams. arXiv preprint arXiv:1403.6348. \u21a9

    "},{"location":"api/stats/IQR/","title":"IQR","text":"

    Computes the interquartile range.

    "},{"location":"api/stats/IQR/#parameters","title":"Parameters","text":"
    • q_inf

      Default \u2192 0.25

      Desired inferior quantile, must be between 0 and 1. Defaults to 0.25.

    • q_sup

      Default \u2192 0.75

      Desired superior quantile, must be between 0 and 1. Defaults to 0.75.

    "},{"location":"api/stats/IQR/#attributes","title":"Attributes","text":"
    • name
    "},{"location":"api/stats/IQR/#examples","title":"Examples","text":"

    from river import stats\n\niqr = stats.IQR(q_inf=0.25, q_sup=0.75)\n\nfor i in range(0, 1001):\n    iqr = iqr.update(i)\n    if i % 100 == 0:\n        print(iqr.get())\n
    0.0\n50.0\n100.0\n150.0\n200.0\n250.0\n300.0\n350.0\n400.0\n450.0\n500.0\n

    "},{"location":"api/stats/IQR/#methods","title":"Methods","text":"get

    Return the current value of the statistic.

    update

    Update and return the called instance.

    Parameters

    • x \u2014 'numbers.Number'

    "},{"location":"api/stats/Kurtosis/","title":"Kurtosis","text":"

    Running kurtosis using Welford's algorithm.

    "},{"location":"api/stats/Kurtosis/#parameters","title":"Parameters","text":"
    • bias

      Default \u2192 False

      If False, then the calculations are corrected for statistical bias.

    "},{"location":"api/stats/Kurtosis/#attributes","title":"Attributes","text":"
    • name
    "},{"location":"api/stats/Kurtosis/#examples","title":"Examples","text":"

    from river import stats\nimport scipy.stats\nimport numpy as np\n\nnp.random.seed(42)\nX = np.random.normal(loc=0, scale=1, size=10)\n\nkurtosis = stats.Kurtosis(bias=False)\nfor x in X:\n    print(kurtosis.update(x).get())\n
    -3.0\n-2.0\n-1.5\n1.4130027920707047\n0.15367976585756438\n0.46142633246812653\n-1.620647789230658\n-1.3540178492487054\n-1.2310268787102745\n-0.9490372374384453\n

    for i in range(2, len(X)+1):\n    print(scipy.stats.kurtosis(X[:i], bias=False))\n
    -2.0\n-1.4999999999999998\n1.4130027920707082\n0.15367976585756082\n0.46142633246812403\n-1.620647789230658\n-1.3540178492487063\n-1.2310268787102738\n-0.9490372374384459\n

    kurtosis = stats.Kurtosis(bias=True)\nfor x in X:\n    print(kurtosis.update(x).get())\n
    -3.0\n-2.0\n-1.5\n-1.011599627723906\n-0.9615800585356089\n-0.6989395431537853\n-1.4252699121794408\n-1.311437071070812\n-1.246289111322894\n-1.082283689864171\n

    for i in range(2, len(X)+1):\n    print(scipy.stats.kurtosis(X[:i], bias=True))\n
    -2.0\n-1.4999999999999998\n-1.0115996277239057\n-0.9615800585356098\n-0.6989395431537861\n-1.425269912179441\n-1.3114370710708125\n-1.2462891113228936\n-1.0822836898641714\n

    "},{"location":"api/stats/Kurtosis/#methods","title":"Methods","text":"get

    Return the current value of the statistic.

    update

    Update and return the called instance.

    Parameters

    • x \u2014 'numbers.Number'

    1. Wikipedia article on algorithms for calculating variance \u21a9

    "},{"location":"api/stats/Link/","title":"Link","text":"

    A link joins two univariate statistics as a sequence.

    This can be used to pipe the output of one statistic to the input of another. This can be used, for instance, to calculate the mean of the variance of a variable. It can also be used to compute shifted statistics by piping statistics with an instance of stats.Shift.

    Note that a link is not meant to be instantiated via this class definition. Instead, users can link statistics together via the | operator.

    "},{"location":"api/stats/Link/#parameters","title":"Parameters","text":"
    • left

      Type \u2192 stats.base.Univariate

    • right

      Type \u2192 stats.base.Univariate

      The output from left's get method is passed to right's update method if left's get method doesn't produce None.

    "},{"location":"api/stats/Link/#attributes","title":"Attributes","text":"
    • name
    "},{"location":"api/stats/Link/#examples","title":"Examples","text":"
    from river import stats\nstat = stats.Shift(1) | stats.Mean()\n

    No values have been seen, therefore get defaults to the initial value of stats.Mean, which is 0.

    stat.get()\n
    0.\n

    Let us now call update.

    stat = stat.update(1)\n

    The output from get will still be 0. The reason is that stats.Shift has not enough values, and therefore outputs it's default value, which is None. The stats.Mean instance is therefore not updated.

    stat.get()\n
    0.0\n

    On the next call to update, the stats.Shift instance has seen enough values, and therefore the mean can be updated. The mean is therefore equal to 1, because that's the only value from the past.

    stat = stat.update(3)\nstat.get()\n
    1.0\n

    On the subsequent call to update, the mean will be updated with the value 3.

    stat = stat.update(4)\nstat.get()\n
    2.0\n

    Note that composing statistics returns a new statistic with it's own name.

    stat.name\n
    'mean_of_shift_1'\n

    "},{"location":"api/stats/Link/#methods","title":"Methods","text":"get

    Return the current value of the statistic.

    update

    Update and return the called instance.

    Parameters

    • x \u2014 'numbers.Number'

    "},{"location":"api/stats/MAD/","title":"MAD","text":"

    Median Absolute Deviation (MAD).

    The median absolute deviation is the median of the absolute differences between each data point and the data's overall median. In an online setting, the median of the data is unknown beforehand. Therefore, both the median of the data and the median of the differences of the data with respect to the latter are updated online. To be precise, the median of the data is updated before the median of the differences. As a consequence, this online version of the MAD does not coincide exactly with its batch counterpart.

    "},{"location":"api/stats/MAD/#attributes","title":"Attributes","text":"
    • median (stats.Median)

      The median of the data.

    "},{"location":"api/stats/MAD/#examples","title":"Examples","text":"

    from river import stats\n\nX = [4, 2, 5, 3, 0, 4]\n\nmad = stats.MAD()\nfor x in X:\n    print(mad.update(x).get())\n
    0.0\n2.0\n1.0\n1.0\n1.0\n1.0\n

    "},{"location":"api/stats/MAD/#methods","title":"Methods","text":"get

    Return the current value of the statistic.

    update

    Update and return the called instance.

    Parameters

    • x \u2014 'numbers.Number'

    1. Median absolute deviation article on Wikipedia \u21a9

    "},{"location":"api/stats/Max/","title":"Max","text":"

    Running max.

    "},{"location":"api/stats/Max/#attributes","title":"Attributes","text":"
    • max (float)

      The current max.

    "},{"location":"api/stats/Max/#examples","title":"Examples","text":"

    from river import stats\n\nX = [1, -4, 3, -2, 5, -6]\n_max = stats.Max()\nfor x in X:\n    print(_max.update(x).get())\n
    1\n1\n3\n3\n5\n5\n

    "},{"location":"api/stats/Max/#methods","title":"Methods","text":"get

    Return the current value of the statistic.

    update

    Update and return the called instance.

    Parameters

    • x \u2014 'numbers.Number'

    "},{"location":"api/stats/Mean/","title":"Mean","text":"

    Running mean.

    "},{"location":"api/stats/Mean/#attributes","title":"Attributes","text":"
    • n (float)

      The current sum of weights. If each passed weight was 1, then this is equal to the number of seen observations.

    "},{"location":"api/stats/Mean/#examples","title":"Examples","text":"

    from river import stats\n\nX = [-5, -3, -1, 1, 3, 5]\nmean = stats.Mean()\nfor x in X:\n    print(mean.update(x).get())\n
    -5.0\n-4.0\n-3.0\n-2.0\n-1.0\n0.0\n

    You can calculate a rolling average by wrapping a utils.Rolling around:

    from river import utils\n\nX = [1, 2, 3, 4, 5, 6]\nrmean = utils.Rolling(stats.Mean(), window_size=2)\n\nfor x in X:\n    print(rmean.update(x).get())\n
    1.0\n1.5\n2.5\n3.5\n4.5\n5.5\n

    "},{"location":"api/stats/Mean/#methods","title":"Methods","text":"get

    Return the current value of the statistic.

    revert update

    Update and return the called instance.

    Parameters

    • x \u2014 'numbers.Number'
    • w \u2014 defaults to 1.0

    update_many
    1. West, D. H. D. (1979). Updating mean and variance estimates: An improved method. Communications of the ACM, 22(9), 532-535. \u21a9

    2. Finch, T., 2009. Incremental calculation of weighted mean and variance. University of Cambridge, 4(11-5), pp.41-42. \u21a9

    3. Chan, T.F., Golub, G.H. and LeVeque, R.J., 1983. Algorithms for computing the sample variance: Analysis and recommendations. The American Statistician, 37(3), pp.242-247. \u21a9

    "},{"location":"api/stats/Min/","title":"Min","text":"

    Running min.

    "},{"location":"api/stats/Min/#attributes","title":"Attributes","text":"
    • min (float)

      The current min.

    "},{"location":"api/stats/Min/#methods","title":"Methods","text":"get

    Return the current value of the statistic.

    update

    Update and return the called instance.

    Parameters

    • x \u2014 'numbers.Number'

    "},{"location":"api/stats/Mode/","title":"Mode","text":"

    Running mode.

    The mode is simply the most common value. An approximate mode can be computed by setting the number of first unique values to count.

    "},{"location":"api/stats/Mode/#parameters","title":"Parameters","text":"
    • k

      Default \u2192 25

      Only the first k unique values will be included. If k equals -1, the exact mode is computed.

    "},{"location":"api/stats/Mode/#attributes","title":"Attributes","text":"
    • name
    "},{"location":"api/stats/Mode/#examples","title":"Examples","text":"

    from river import stats\n\nX = ['sunny', 'cloudy', 'cloudy', 'rainy', 'rainy', 'rainy']\nmode = stats.Mode(k=2)\nfor x in X:\n    print(mode.update(x).get())\n
    sunny\nsunny\ncloudy\ncloudy\ncloudy\ncloudy\n

    mode = stats.Mode(k=-1)\nfor x in X:\n    print(mode.update(x).get())\n
    sunny\nsunny\ncloudy\ncloudy\ncloudy\nrainy\n

    "},{"location":"api/stats/Mode/#methods","title":"Methods","text":"get

    Return the current value of the statistic.

    update

    Update and return the called instance.

    Parameters

    • x \u2014 'numbers.Number'

    "},{"location":"api/stats/NUnique/","title":"NUnique","text":"

    Approximate number of unique values counter.

    This is basically an implementation of the HyperLogLog algorithm. Adapted from hypy. The code is a bit too terse but it will do for now.

    "},{"location":"api/stats/NUnique/#parameters","title":"Parameters","text":"
    • error_rate

      Default \u2192 0.01

      Desired error rate. Memory usage is inversely proportional to this value.

    • seed

      Type \u2192 int | None

      Default \u2192 None

      Set the seed to produce identical results.

    "},{"location":"api/stats/NUnique/#attributes","title":"Attributes","text":"
    • n_bits (int)

    • n_buckets (int)

    • buckets (list)

    "},{"location":"api/stats/NUnique/#examples","title":"Examples","text":"

    import string\nfrom river import stats\n\nalphabet = string.ascii_lowercase\nn_unique = stats.NUnique(error_rate=0.2, seed=42)\n\nn_unique.update('a').get()\n
    1\n

    n_unique.update('b').get()\n
    2\n

    for letter in alphabet:\n    n_unique = n_unique.update(letter)\nn_unique.get()\n
    31\n

    Lowering the error_rate parameter will increase the precision.

    n_unique = stats.NUnique(error_rate=0.01, seed=42)\nfor letter in alphabet:\n    n_unique = n_unique.update(letter)\nn_unique.get()\n
    26\n

    "},{"location":"api/stats/NUnique/#methods","title":"Methods","text":"get

    Return the current value of the statistic.

    update

    Update and return the called instance.

    Parameters

    • x \u2014 'numbers.Number'

    1. My favorite algorithm (and data structure): HyperLogLog \u21a9

    2. Flajolet, P., Fusy, \u00c9., Gandouet, O. and Meunier, F., 2007, June. Hyperloglog: the analysis of a near-optimal cardinality estimation algorithm. \u21a9

    "},{"location":"api/stats/PeakToPeak/","title":"PeakToPeak","text":"

    Running peak to peak (max - min).

    "},{"location":"api/stats/PeakToPeak/#attributes","title":"Attributes","text":"
    • name
    "},{"location":"api/stats/PeakToPeak/#examples","title":"Examples","text":"

    from river import stats\n\nX = [1, -4, 3, -2, 2, 4]\nptp = stats.PeakToPeak()\nfor x in X:\n    print(ptp.update(x).get())\n
    0.\n5.\n7.\n7.\n7.\n8.\n

    "},{"location":"api/stats/PeakToPeak/#methods","title":"Methods","text":"get

    Return the current value of the statistic.

    update

    Update and return the called instance.

    Parameters

    • x \u2014 'numbers.Number'

    "},{"location":"api/stats/PearsonCorr/","title":"PearsonCorr","text":"

    Online Pearson correlation.

    "},{"location":"api/stats/PearsonCorr/#parameters","title":"Parameters","text":"
    • ddof

      Default \u2192 1

      Delta Degrees of Freedom.

    "},{"location":"api/stats/PearsonCorr/#attributes","title":"Attributes","text":"
    • var_x (stats.Var)

      Running variance of x.

    • var_y (stats.Var)

      Running variance of y.

    • cov_xy (stats.Cov)

      Running covariance of x and y.

    "},{"location":"api/stats/PearsonCorr/#examples","title":"Examples","text":"

    from river import stats\n\nx = [0, 0, 0, 1, 1, 1, 1]\ny = [0, 1, 2, 3, 4, 5, 6]\n\npearson = stats.PearsonCorr()\n\nfor xi, yi in zip(x, y):\n    print(pearson.update(xi, yi).get())\n
    0\n0\n0\n0.774596\n0.866025\n0.878310\n0.866025\n

    You can also do this in a rolling fashion:

    from river import utils\n\nx = [0, 0, 0, 1, 1, 1, 1]\ny = [0, 1, 2, 3, 4, 5, 6]\n\npearson = utils.Rolling(stats.PearsonCorr(), window_size=4)\n\nfor xi, yi in zip(x, y):\n    print(pearson.update(xi, yi).get())\n
    0\n0\n0\n0.7745966692414834\n0.8944271909999159\n0.7745966692414832\n-4.712160915387242e-09\n

    "},{"location":"api/stats/PearsonCorr/#methods","title":"Methods","text":"get

    Return the current value of the statistic.

    revert update

    Update and return the called instance.

    Parameters

    • x
    • y

    "},{"location":"api/stats/Quantile/","title":"Quantile","text":"

    Running quantile.

    Uses the P\u00b2 algorithm, which is also known as the \"Piecewise-Parabolic quantile estimator\". The code is inspired by LiveStat's implementation 2.

    "},{"location":"api/stats/Quantile/#parameters","title":"Parameters","text":"
    • q

      Type \u2192 float

      Default \u2192 0.5

      Determines which quantile to compute, must be comprised between 0 and 1.

    "},{"location":"api/stats/Quantile/#attributes","title":"Attributes","text":"
    • name
    "},{"location":"api/stats/Quantile/#examples","title":"Examples","text":"

    from river import stats\nimport numpy as np\n\nnp.random.seed(42 * 1337)\nmu, sigma = 0, 1\ns = np.random.normal(mu, sigma, 500)\n\nmedian = stats.Quantile(0.5)\nfor x in s:\n   _ = median.update(x)\nprint(f'The estimated value of the 50th (median) quantile is {median.get():.4f}')\n
    The estimated value of the 50th (median) quantile is -0.0275\n

    print(f'The real value of the 50th (median) quantile is {np.median(s):.4f}')\n
    The real value of the 50th (median) quantile is -0.0135\n

    percentile_17 = stats.Quantile(0.17)\nfor x in s:\n   _ = percentile_17.update(x)\nprint(f'The estimated value of the 17th quantile is {percentile_17.get():.4f}')\n
    The estimated value of the 17th quantile is -0.8652\n

    print(f'The real value of the 17th quantile is {np.percentile(s,17):.4f}')\n
    The real value of the 17th quantile is -0.9072\n

    "},{"location":"api/stats/Quantile/#methods","title":"Methods","text":"get

    Return the current value of the statistic.

    update

    Update and return the called instance.

    Parameters

    • x \u2014 'numbers.Number'

    1. The P\u00b2 Algorithm for Dynamic Univariateal Computing Calculation of Quantiles and Editor Histograms Without Storing Observations \u21a9

    2. LiveStats \u21a9

    3. P\u00b2 quantile estimator: estimating the median without storing values \u21a9

    "},{"location":"api/stats/RollingAbsMax/","title":"RollingAbsMax","text":"

    Running absolute max over a window.

    "},{"location":"api/stats/RollingAbsMax/#parameters","title":"Parameters","text":"
    • window_size

      Type \u2192 int

      Size of the rolling window.

    "},{"location":"api/stats/RollingAbsMax/#attributes","title":"Attributes","text":"
    • name

    • window_size

    "},{"location":"api/stats/RollingAbsMax/#examples","title":"Examples","text":"

    from river import stats\n\nX = [1, -4, 3, -2, 2, 1]\nrolling_absmax = stats.RollingAbsMax(window_size=2)\nfor x in X:\n    print(rolling_absmax.update(x).get())\n
    1\n4\n4\n3\n2\n2\n

    "},{"location":"api/stats/RollingAbsMax/#methods","title":"Methods","text":"get

    Return the current value of the statistic.

    update

    Update and return the called instance.

    Parameters

    • x \u2014 'numbers.Number'

    "},{"location":"api/stats/RollingIQR/","title":"RollingIQR","text":"

    Computes the rolling interquartile range.

    "},{"location":"api/stats/RollingIQR/#parameters","title":"Parameters","text":"
    • window_size

      Type \u2192 int

      Size of the window.

    • q_inf

      Default \u2192 0.25

      Desired inferior quantile, must be between 0 and 1. Defaults to 0.25.

    • q_sup

      Default \u2192 0.75

      Desired superior quantile, must be between 0 and 1. Defaults to 0.75.

    "},{"location":"api/stats/RollingIQR/#attributes","title":"Attributes","text":"
    • name

    • window_size

    "},{"location":"api/stats/RollingIQR/#examples","title":"Examples","text":"

    from river import stats\nrolling_iqr = stats.RollingIQR(\n    q_inf=0.25,\n    q_sup=0.75,\n    window_size=101\n)\n\nfor i in range(0, 1001):\n    rolling_iqr = rolling_iqr.update(i)\n    if i % 100 == 0:\n        print(rolling_iqr.get())\n
    0.0\n50.0\n50.0\n50.0\n50.0\n50.0\n50.0\n50.0\n50.0\n50.0\n50.0\n

    "},{"location":"api/stats/RollingIQR/#methods","title":"Methods","text":"get

    Return the current value of the statistic.

    update

    Update and return the called instance.

    Parameters

    • x \u2014 'numbers.Number'

    "},{"location":"api/stats/RollingMax/","title":"RollingMax","text":"

    Running max over a window.

    "},{"location":"api/stats/RollingMax/#parameters","title":"Parameters","text":"
    • window_size

      Type \u2192 int

      Size of the rolling window.

    "},{"location":"api/stats/RollingMax/#attributes","title":"Attributes","text":"
    • name

    • window_size

    "},{"location":"api/stats/RollingMax/#examples","title":"Examples","text":"

    from river import stats\n\nX = [1, -4, 3, -2, 2, 1]\nrolling_max = stats.RollingMax(window_size=2)\nfor x in X:\n    print(rolling_max.update(x).get())\n
    1\n1\n3\n3\n2\n2\n

    "},{"location":"api/stats/RollingMax/#methods","title":"Methods","text":"get

    Return the current value of the statistic.

    update

    Update and return the called instance.

    Parameters

    • x \u2014 'numbers.Number'

    "},{"location":"api/stats/RollingMin/","title":"RollingMin","text":"

    Running min over a window.

    "},{"location":"api/stats/RollingMin/#parameters","title":"Parameters","text":"
    • window_size

      Type \u2192 int

      Size of the rolling window.

    "},{"location":"api/stats/RollingMin/#attributes","title":"Attributes","text":"
    • name

    • window_size

    "},{"location":"api/stats/RollingMin/#examples","title":"Examples","text":"

    from river import stats\n\nX = [1, -4, 3, -2, 2, 1]\nrolling_min = stats.RollingMin(2)\nfor x in X:\n    print(rolling_min.update(x).get())\n
    1\n-4\n-4\n-2\n-2\n1\n

    "},{"location":"api/stats/RollingMin/#methods","title":"Methods","text":"get

    Return the current value of the statistic.

    update

    Update and return the called instance.

    Parameters

    • x \u2014 'numbers.Number'

    "},{"location":"api/stats/RollingMode/","title":"RollingMode","text":"

    Running mode over a window.

    The mode is the most common value.

    "},{"location":"api/stats/RollingMode/#parameters","title":"Parameters","text":"
    • window_size

      Type \u2192 int

      Size of the rolling window.

    "},{"location":"api/stats/RollingMode/#attributes","title":"Attributes","text":"
    • counts (collections.defaultdict)

      Value counts.

    "},{"location":"api/stats/RollingMode/#examples","title":"Examples","text":"

    from river import stats\n\nX = ['sunny', 'sunny', 'sunny', 'rainy', 'rainy', 'rainy', 'rainy']\nrolling_mode = stats.RollingMode(window_size=2)\nfor x in X:\n    print(rolling_mode.update(x).get())\n
    sunny\nsunny\nsunny\nsunny\nrainy\nrainy\nrainy\n

    rolling_mode = stats.RollingMode(window_size=5)\nfor x in X:\n    print(rolling_mode.update(x).get())\n
    sunny\nsunny\nsunny\nsunny\nsunny\nrainy\nrainy\n

    "},{"location":"api/stats/RollingMode/#methods","title":"Methods","text":"get

    Return the current value of the statistic.

    update

    Update and return the called instance.

    Parameters

    • x \u2014 'numbers.Number'

    "},{"location":"api/stats/RollingPeakToPeak/","title":"RollingPeakToPeak","text":"

    Running peak to peak (max - min) over a window.

    "},{"location":"api/stats/RollingPeakToPeak/#parameters","title":"Parameters","text":"
    • window_size

      Type \u2192 int

      Size of the rolling window.

    "},{"location":"api/stats/RollingPeakToPeak/#attributes","title":"Attributes","text":"
    • max (stats.RollingMax)

      The running rolling max.

    • min (stats.RollingMin)

      The running rolling min.

    "},{"location":"api/stats/RollingPeakToPeak/#examples","title":"Examples","text":"

    from river import stats\n\nX = [1, -4, 3, -2, 2, 1]\nptp = stats.RollingPeakToPeak(window_size=2)\nfor x in X:\n    print(ptp.update(x).get())\n
    0\n5\n7\n5\n4\n1\n

    "},{"location":"api/stats/RollingPeakToPeak/#methods","title":"Methods","text":"get

    Return the current value of the statistic.

    update

    Update and return the called instance.

    Parameters

    • x \u2014 'numbers.Number'

    "},{"location":"api/stats/RollingQuantile/","title":"RollingQuantile","text":"

    Running quantile over a window.

    "},{"location":"api/stats/RollingQuantile/#parameters","title":"Parameters","text":"
    • q

      Type \u2192 float

      Determines which quantile to compute, must be comprised between 0 and 1.

    • window_size

      Type \u2192 int

      Size of the window.

    "},{"location":"api/stats/RollingQuantile/#attributes","title":"Attributes","text":"
    • name

    • window_size

    "},{"location":"api/stats/RollingQuantile/#examples","title":"Examples","text":"

    from river import stats\n\nrolling_quantile = stats.RollingQuantile(\n    q=.5,\n    window_size=101,\n)\n\nfor i in range(1001):\n    rolling_quantile = rolling_quantile.update(i)\n    if i % 100 == 0:\n        print(rolling_quantile.get())\n
    0.0\n50.0\n150.0\n250.0\n350.0\n450.0\n550.0\n650.0\n750.0\n850.0\n950.0\n

    "},{"location":"api/stats/RollingQuantile/#methods","title":"Methods","text":"get

    Return the current value of the statistic.

    update

    Update and return the called instance.

    Parameters

    • x \u2014 'numbers.Number'

    1. Left sorted \u21a9

    "},{"location":"api/stats/SEM/","title":"SEM","text":"

    Running standard error of the mean using Welford's algorithm.

    "},{"location":"api/stats/SEM/#parameters","title":"Parameters","text":"
    • ddof

      Default \u2192 1

      Delta Degrees of Freedom. The divisor used in calculations is n - ddof, where n is the number of seen elements.

    "},{"location":"api/stats/SEM/#attributes","title":"Attributes","text":"
    • n (int)

      Number of observations.

    "},{"location":"api/stats/SEM/#examples","title":"Examples","text":"

    from river import stats\n\nX = [3, 5, 4, 7, 10, 12]\n\nsem = stats.SEM()\nfor x in X:\n    print(sem.update(x).get())\n
    0.0\n1.0\n0.577350\n0.853912\n1.240967\n1.447219\n

    from river import utils\n\nX = [1, 4, 2, -4, -8, 0]\n\nrolling_sem = utils.Rolling(stats.SEM(ddof=1), window_size=3)\nfor x in X:\n    print(rolling_sem.update(x).get())\n
    0.0\n1.5\n0.881917\n2.403700\n2.905932\n2.309401\n

    "},{"location":"api/stats/SEM/#methods","title":"Methods","text":"get

    Return the current value of the statistic.

    revert update

    Update and return the called instance.

    Parameters

    • x \u2014 'numbers.Number'
    • w \u2014 defaults to 1.0

    update_many
    1. Wikipedia article on algorithms for calculating variance \u21a9

    "},{"location":"api/stats/Shift/","title":"Shift","text":"

    Shifts a data stream by returning past values.

    This can be used to compute statistics over past data. For instance, if you're computing daily averages, then shifting by 7 will be equivalent to computing averages from a week ago.

    Shifting values is useful when you're calculating an average over a target value. Indeed, in this case it's important to shift the values in order not to introduce leakage. The recommended way to do this is to feature_extraction.TargetAgg, which already takes care of shifting the target values once.

    "},{"location":"api/stats/Shift/#parameters","title":"Parameters","text":"
    • amount

      Default \u2192 1

      Shift amount. The get method will return the t - amount value, where t is the current moment.

    • fill_value

      Default \u2192 None

      This value will be returned by the get method if not enough values have been observed.

    "},{"location":"api/stats/Shift/#attributes","title":"Attributes","text":"
    • name
    "},{"location":"api/stats/Shift/#examples","title":"Examples","text":"

    It is rare to have to use Shift by itself. A more common usage is to compose it with other statistics. This can be done via the | operator.

    from river import stats\n\nstat = stats.Shift(1) | stats.Mean()\n\nfor i in range(5):\n    stat = stat.update(i)\n    print(stat.get())\n
    0.0\n0.0\n0.5\n1.0\n1.5\n

    A common usecase for using Shift is when computing statistics on shifted data. For instance, say you have a dataset which records the amount of sales for a set of shops. You might then have a shop field and a sales field. Let's say you want to look at the average amount of sales per shop. You can do this by using a feature_extraction.Agg. When you call transform_one, you're expecting it to return the average amount of sales, without including today's sales. You can do this by prepending an instance of stats.Mean with an instance of stats.Shift.

    from river import feature_extraction\n\nagg = feature_extraction.Agg(\n    on='sales',\n    how=stats.Shift(1) | stats.Mean(),\n    by='shop'\n)\n

    Let's define a little example dataset.

    X = iter([\n    {'shop': 'Ikea', 'sales': 10},\n    {'shop': 'Ikea', 'sales': 15},\n    {'shop': 'Ikea', 'sales': 20}\n])\n

    Now let's call the learn_one method to update our feature extractor.

    x = next(X)\nagg = agg.learn_one(x)\n

    At this point, the average defaults to the initial value of stats.Mean, which is 0.

    agg.transform_one(x)\n
    {'sales_mean_of_shift_1_by_shop': 0.0}\n

    We can now update our feature extractor with the next data point and check the output.

    agg = agg.learn_one(next(X))\nagg.transform_one(x)\n
    {'sales_mean_of_shift_1_by_shop': 10.0}\n

    agg = agg.learn_one(next(X))\nagg.transform_one(x)\n
    {'sales_mean_of_shift_1_by_shop': 12.5}\n

    "},{"location":"api/stats/Shift/#methods","title":"Methods","text":"get

    Return the current value of the statistic.

    update

    Update and return the called instance.

    Parameters

    • x \u2014 'numbers.Number'

    "},{"location":"api/stats/Skew/","title":"Skew","text":"

    Running skew using Welford's algorithm.

    "},{"location":"api/stats/Skew/#parameters","title":"Parameters","text":"
    • bias

      Default \u2192 False

      If False, then the calculations are corrected for statistical bias.

    "},{"location":"api/stats/Skew/#attributes","title":"Attributes","text":"
    • name
    "},{"location":"api/stats/Skew/#examples","title":"Examples","text":"

    from river import stats\nimport numpy as np\n\nnp.random.seed(42)\nX = np.random.normal(loc=0, scale=1, size=10)\n\nskew = stats.Skew(bias=False)\nfor x in X:\n    print(skew.update(x).get())\n
    0.0\n0.0\n-1.4802398132849872\n0.5127437186677888\n0.7803466510704751\n1.056115628922055\n0.5057840774320389\n0.3478402420400934\n0.4536710660918704\n0.4123070197493227\n

    skew = stats.Skew(bias=True)\nfor x in X:\n    print(skew.update(x).get())\n
    0.0\n0.0\n-0.6043053732501439\n0.2960327239981376\n0.5234724473423674\n0.7712778043924866\n0.39022088752624845\n0.278892645224261\n0.37425953513864063\n0.3476878073823696\n

    "},{"location":"api/stats/Skew/#methods","title":"Methods","text":"get

    Return the current value of the statistic.

    update

    Update and return the called instance.

    Parameters

    • x \u2014 'numbers.Number'

    1. Wikipedia article on algorithms for calculating variance \u21a9

    "},{"location":"api/stats/Sum/","title":"Sum","text":"

    Running sum.

    "},{"location":"api/stats/Sum/#attributes","title":"Attributes","text":"
    • sum (float)

      The running sum.

    "},{"location":"api/stats/Sum/#examples","title":"Examples","text":"

    from river import stats\n\nX = [-5, -3, -1, 1, 3, 5]\nmean = stats.Sum()\nfor x in X:\n    print(mean.update(x).get())\n
    -5.0\n-8.0\n-9.0\n-8.0\n-5.0\n0.0\n

    from river import utils\n\nX = [1, -4, 3, -2, 2, 1]\nrolling_sum = utils.Rolling(stats.Sum(), window_size=2)\nfor x in X:\n    print(rolling_sum.update(x).get())\n
    1.0\n-3.0\n-1.0\n1.0\n0.0\n3.0\n

    "},{"location":"api/stats/Sum/#methods","title":"Methods","text":"get

    Return the current value of the statistic.

    revert update

    Update and return the called instance.

    Parameters

    • x \u2014 'numbers.Number'

    "},{"location":"api/stats/Var/","title":"Var","text":"

    Running variance using Welford's algorithm.

    "},{"location":"api/stats/Var/#parameters","title":"Parameters","text":"
    • ddof

      Default \u2192 1

      Delta Degrees of Freedom. The divisor used in calculations is n - ddof, where n represents the number of seen elements.

    "},{"location":"api/stats/Var/#attributes","title":"Attributes","text":"
    • mean

      It is necessary to calculate the mean of the data in order to calculate its variance.

    "},{"location":"api/stats/Var/#examples","title":"Examples","text":"

    from river import stats\n\nX = [3, 5, 4, 7, 10, 12]\n\nvar = stats.Var()\nfor x in X:\n    print(var.update(x).get())\n
    0.0\n2.0\n1.0\n2.916666\n7.7\n12.56666\n

    You can measure a rolling variance by using a utils.Rolling wrapper:

    from river import utils\n\nX = [1, 4, 2, -4, -8, 0]\nrvar = utils.Rolling(stats.Var(ddof=1), window_size=3)\nfor x in X:\n    print(rvar.update(x).get())\n
    0.0\n4.5\n2.333333\n17.333333\n25.333333\n16.0\n

    "},{"location":"api/stats/Var/#methods","title":"Methods","text":"get

    Return the current value of the statistic.

    revert update

    Update and return the called instance.

    Parameters

    • x \u2014 'numbers.Number'
    • w \u2014 defaults to 1.0

    update_many"},{"location":"api/stats/Var/#notes","title":"Notes","text":"

    The outcomes of the incremental and parallel updates are consistent with numpy's batch processing when \\(\\text{ddof} \\le 1\\).

    1. Wikipedia article on algorithms for calculating variance \u21a9

    2. Chan, T.F., Golub, G.H. and LeVeque, R.J., 1983. Algorithms for computing the sample variance: Analysis and recommendations. The American Statistician, 37(3), pp.242-247. \u21a9

    3. Schubert, E. and Gertz, M., 2018, July. Numerically stable parallel computation of (co-)variance. In Proceedings of the 30th International Conference on Scientific and Statistical Database Management (pp. 1-12).\u00a0\u21a9

    "},{"location":"api/stats/base/Bivariate/","title":"Bivariate","text":"

    A bivariate statistic measures a relationship between two variables.

    "},{"location":"api/stats/base/Bivariate/#methods","title":"Methods","text":"get

    Return the current value of the statistic.

    update

    Update and return the called instance.

    Parameters

    • x
    • y

    "},{"location":"api/stats/base/Univariate/","title":"Univariate","text":"

    A univariate statistic measures a property of a variable.

    "},{"location":"api/stats/base/Univariate/#attributes","title":"Attributes","text":"
    • name
    "},{"location":"api/stats/base/Univariate/#methods","title":"Methods","text":"get

    Return the current value of the statistic.

    update

    Update and return the called instance.

    Parameters

    • x \u2014 'numbers.Number'

    "},{"location":"api/stream/Cache/","title":"Cache","text":"

    Utility for caching iterables.

    This can be used to save a stream of data to the disk in order to iterate over it faster the following time. This can save time depending on the nature of stream. The more processing happens in a stream, the more time will be saved. Even in the case where no processing is done apart from reading the data, the cache will save some time because it is using the pickle binary protocol. It can thus improve the speed in common cases such as reading from a CSV file.

    "},{"location":"api/stream/Cache/#parameters","title":"Parameters","text":"
    • directory

      Default \u2192 None

      The path where to store the pickled data streams. If not provided, then it will be automatically inferred whenever possible, if not an exception will be raised.

    "},{"location":"api/stream/Cache/#attributes","title":"Attributes","text":"
    • keys (set)

      The set of keys that are being cached.

    "},{"location":"api/stream/Cache/#examples","title":"Examples","text":"
    import time\nfrom river import datasets\nfrom river import stream\n\ndataset = datasets.Phishing()\ncache = stream.Cache()\n

    The cache can be used by wrapping it around an iterable. Because this is the first time are iterating over the data, nothing is cached.

    tic = time.time()\nfor x, y in cache(dataset, key='phishing'):\n    pass\ntoc = time.time()\nprint(toc - tic)  # doctest: +SKIP\n
    0.012813\n

    If we do the same thing again, we can see the loop is now faster.

    tic = time.time()\nfor x, y in cache(dataset, key='phishing'):\n    pass\ntoc = time.time()\nprint(toc - tic)  # doctest: +SKIP\n
    0.001927\n

    We can see an overview of the cache. The first line indicates the location of the cache.

    cache  # doctest: +SKIP\n
    /tmp\nphishing - 125.2KiB\n

    Finally, we can clear the stream from the cache.

    cache.clear('phishing')\ncache  # doctest: +SKIP\n
    /tmp\n

    There is also a clear_all method to remove all the items in the cache.

    cache.clear_all()\n
    "},{"location":"api/stream/Cache/#methods","title":"Methods","text":"call

    Call self as a function.

    Parameters

    • stream
    • key \u2014 defaults to None

    clear

    Delete the cached stream associated with the given key.

    Parameters

    • key \u2014 'str'

    clear_all

    Delete all the cached streams.

    "},{"location":"api/stream/TwitchChatStream/","title":"TwitchChatStream","text":"

    Twitch chat stream client.

    This client gives access to a live stream of chat messages in Twitch channels using IRC protocol. You need to have a Twitch account and receive an OAuth token from https://twitchapps.com/tmi/.

    "},{"location":"api/stream/TwitchChatStream/#parameters","title":"Parameters","text":"
    • nickname

      Type \u2192 str

      The nickname of your account.

    • token

      Type \u2192 str

      OAuth token which has been generated.

    • channels

      Type \u2192 list[str]

      A list of channel names like [\"asmongold\", \"shroud\"] you want to collect messages from.

    • buffer_size

      Type \u2192 int

      Default \u2192 2048

      Size of buffer in bytes used for receiving responses from Twitch with IRC (default 2 kB).

    • timeout

      Type \u2192 int

      Default \u2192 60

      A timeout value in seconds for waiting response from Twitch (default 60s). It can be useful if all requested channels are offline or chat is not active enough.

    "},{"location":"api/stream/TwitchChatStream/#examples","title":"Examples","text":"

    The live stream is instantiated by passing your Twitch account nickname, OAuth token and list of channels. Other parameters are optional.

    from river import stream\n\ntwitch_chat = stream.TwitchChatStream(\n    nickname=\"twitch_user1\",\n    token=\"oauth:okrip6j6fjio8n5xpy2oum1lph4fbve\",\n    channels=[\"asmongold\", \"shroud\"]\n)\n

    The stream can be iterated over like this:

    for item in twitch_chat:\n    print(item)\n

    Here's a single stream item example:

    {\n    'dt': datetime.datetime(2022, 9, 14, 10, 33, 37, 989560),\n    'channel': 'asmongold',\n    'username': 'moojiejaa',\n    'msg': 'damn this chat mod are wild'\n}\n

    1. Twitch IRC doc \u21a9

    "},{"location":"api/stream/TwitterLiveStream/","title":"TwitterLiveStream","text":"

    Twitter API v2 live stream client.

    This client gives access to a live stream of Tweets. That is, Tweets that have just been published. This is different to stream.TwitterRecentStream, which also covers Tweets that have been published over recent days, and not necessarily in real-time.

    A list of filtering rules has to be provided. For instance, this allows focusing on a subset of topics and/or users.

    Note

    Using this requires having the requests package installed.

    "},{"location":"api/stream/TwitterLiveStream/#parameters","title":"Parameters","text":"
    • rules

      See the documentation[^2] for a comprehensive overview of filtering rules.

    • bearer_token

      A bearer token that is available in each account's developer portal.

    "},{"location":"api/stream/TwitterLiveStream/#examples","title":"Examples","text":"

    The live stream is instantiated by passing a list of filtering rules, as well as a bearer token. For instance, we can listen to all the breaking news Tweets from the BBC and CNN.

    from river import stream\n\ntweets = stream.TwitterLiveStream(\n    rules=[\"from:BBCBreaking\", \"from:cnnbrk\"],\n    bearer_token=\"<insert_bearer_token>\"\n)\n
    The stream can then be iterated over, possibly in an infinite loop. This will listen to the\nlive feed of Tweets and produce a Tweet right after it's been published.\n\n```py\nimport logging\n\nwhile True:\n    try:\n        for tweet in tweets:\n            print(tweet)\n    except requests.exceptions.RequestException as e:\n        logging.warning(str(e))\n        time.sleep(10)\n```\n\nHere's a Tweet example:\n\n```py\n{\n    'data': {\n        'author_id': '428333',\n        'created_at': '2022-08-26T12:59:48.000Z',\n        'id': '1563149212774445058',\n        'text': \"Ukraine's Zaporizhzhia nuclear power plant, which is currently held by\n

    Russian forces, has been reconnected to Ukraine's electricity grid, according to the country's nuclear operator https://t.co/xfylkBs4JR\" }, 'includes': { 'users': [ { 'created_at': '2007-01-02T01:48:14.000Z', 'id': '428333', 'name': 'CNN Breaking News', 'username': 'cnnbrk' } ] }, 'matching_rules': [{'id': '1563148866333151233', 'tag': 'from:cnnbrk'}] } ``` [^1]: Filtered stream introduction [^2]: Building rules for filtered stream [^3]: Stream Tweets in real-time

    "},{"location":"api/stream/iter-arff/","title":"iter_arff","text":"

    Iterates over rows from an ARFF file.

    "},{"location":"api/stream/iter-arff/#parameters","title":"Parameters","text":"
    • filepath_or_buffer

      Either a string indicating the location of a file, or a buffer object that has a read method.

    • target

      Type \u2192 str | list[str] | None

      Default \u2192 None

      Name(s) of the target field. If None, then the target field is ignored. If a list of names is passed, then a dictionary is returned instead of a single value.

    • compression

      Default \u2192 infer

      For on-the-fly decompression of on-disk data. If this is set to 'infer' and filepath_or_buffer is a path, then the decompression method is inferred for the following extensions: '.gz', '.zip'.

    • sparse

      Default \u2192 False

      Whether the data is sparse or not.

    "},{"location":"api/stream/iter-arff/#examples","title":"Examples","text":"

    cars = '''\n@relation CarData\n@attribute make {Toyota, Honda, Ford, Chevrolet}\n@attribute model string\n@attribute year numeric\n@attribute price numeric\n@attribute mpg numeric\n@data\nToyota, Corolla, 2018, 15000, 30.5\nHonda, Civic, 2019, 16000, 32.2\nFord, Mustang, 2020, 25000, 25.0\nChevrolet, Malibu, 2017, 18000, 28.9\nToyota, Camry, 2019, 22000, 29.8\n'''\nwith open('cars.arff', mode='w') as f:\n    _ = f.write(cars)\n\nfrom river import stream\n\nfor x, y in stream.iter_arff('cars.arff', target='price'):\n    print(x, y)\n
    {'make': 'Toyota', 'model': ' Corolla', 'year': 2018.0, 'mpg': 30.5} 15000.0\n{'make': 'Honda', 'model': ' Civic', 'year': 2019.0, 'mpg': 32.2} 16000.0\n{'make': 'Ford', 'model': ' Mustang', 'year': 2020.0, 'mpg': 25.0} 25000.0\n{'make': 'Chevrolet', 'model': ' Malibu', 'year': 2017.0, 'mpg': 28.9} 18000.0\n{'make': 'Toyota', 'model': ' Camry', 'year': 2019.0, 'mpg': 29.8} 22000.0\n

    Finally, let's delete the example file.

    import os; os.remove('cars.arff')\n

    ARFF files support sparse data. Let's create a sparse ARFF file.

    sparse = '''\n% traindata\n@RELATION \"traindata: -C 6\"\n@ATTRIBUTE y0 {0, 1}\n@ATTRIBUTE y1 {0, 1}\n@ATTRIBUTE y2 {0, 1}\n@ATTRIBUTE y3 {0, 1}\n@ATTRIBUTE y4 {0, 1}\n@ATTRIBUTE y5 {0, 1}\n@ATTRIBUTE X0 NUMERIC\n@ATTRIBUTE X1 NUMERIC\n@ATTRIBUTE X2 NUMERIC\n@DATA\n{ 3 1,6 0.863382,8 0.820094 }\n{ 2 1,6 0.659761 }\n{ 0 1,3 1,6 0.437881,8 0.818882 }\n{ 2 1,6 0.676477,7 0.724635,8 0.755123 }\n'''\n\nwith open('sparse.arff', mode='w') as f:\n    _ = f.write(sparse)\n

    In addition, we'll specify that there are several target fields.

    arff_stream = stream.iter_arff(\n    'sparse.arff',\n    target=['y0', 'y1', 'y2', 'y3', 'y4', 'y5'],\n    sparse=True\n)\n\nfor x, y in arff_stream:\n    print(x)\n    print(y)\n
    {'X0': '0.863382', 'X2': '0.820094'}\n{'y0': 0, 'y1': 0, 'y2': 0, 'y3': '1', 'y4': 0, 'y5': 0}\n{'X0': '0.659761'}\n{'y0': 0, 'y1': 0, 'y2': '1', 'y3': 0, 'y4': 0, 'y5': 0}\n{'X0': '0.437881', 'X2': '0.818882'}\n{'y0': '1', 'y1': 0, 'y2': 0, 'y3': '1', 'y4': 0, 'y5': 0}\n{'X0': '0.676477', 'X1': '0.724635', 'X2': '0.755123'}\n{'y0': 0, 'y1': 0, 'y2': '1', 'y3': 0, 'y4': 0, 'y5': 0}\n

    This function can also deal with missing features in non-sparse data. These are indicated with a question mark.

    data = '''\n@relation giveMeLoan-weka.filters.unsupervised.attribute.Remove-R1\n@attribute RevolvingUtilizationOfUnsecuredLines numeric\n@attribute age numeric\n@attribute NumberOfTime30-59DaysPastDueNotWorse numeric\n@attribute DebtRatio numeric\n@attribute MonthlyIncome numeric\n@attribute NumberOfOpenCreditLinesAndLoans numeric\n@attribute NumberOfTimes90DaysLate numeric\n@attribute NumberRealEstateLoansOrLines numeric\n@attribute NumberOfTime60-89DaysPastDueNotWorse numeric\n@attribute NumberOfDependents numeric\n@attribute isFraud {0,1}\n@data\n0.213179,74,0,0.375607,3500,3,0,1,0,1,0\n0.305682,57,0,5710,?,8,0,3,0,0,0\n0.754464,39,0,0.20994,3500,8,0,0,0,0,0\n0.116951,27,0,46,?,2,0,0,0,0,0\n0.189169,57,0,0.606291,23684,9,0,4,0,2,0\n'''\n\nwith open('data.arff', mode='w') as f:\n    _ = f.write(data)\n\nfor x, y in stream.iter_arff('data.arff', target='isFraud'):\n    print(len(x))\n
    10\n9\n10\n9\n10\n

    1. ARFF format description from Weka \u21a9

    "},{"location":"api/stream/iter-array/","title":"iter_array","text":"

    Iterates over the rows from an array of features and an array of targets.

    This method is intended to work with numpy arrays, but should also work with Python lists.

    "},{"location":"api/stream/iter-array/#parameters","title":"Parameters","text":"
    • X

      Type \u2192 np.ndarray

      A 2D array of features. This can also be a 1D array of strings, which can be the case if you're working with text.

    • y

      Type \u2192 np.ndarray | None

      Default \u2192 None

      An optional array of targets.

    • feature_names

      Type \u2192 list[base.typing.FeatureName] | None

      Default \u2192 None

      An optional list of feature names. The features will be labeled with integers if no names are provided.

    • target_names

      Type \u2192 list[base.typing.FeatureName] | None

      Default \u2192 None

      An optional list of output names. The outputs will be labeled with integers if no names are provided. Only applies if there are multiple outputs, i.e. if y is a 2D array.

    • shuffle

      Type \u2192 bool

      Default \u2192 False

      Indicates whether or not to shuffle the input arrays before iterating over them.

    • seed

      Type \u2192 int | None

      Default \u2192 None

      Random seed used for shuffling the data.

    "},{"location":"api/stream/iter-array/#examples","title":"Examples","text":"

    from river import stream\nimport numpy as np\n\nX = np.array([[1, 2, 3], [11, 12, 13]])\nY = np.array([True, False])\n\ndataset = stream.iter_array(\n    X, Y,\n    feature_names=['x1', 'x2', 'x3']\n)\nfor x, y in dataset:\n    print(x, y)\n
    {'x1': 1, 'x2': 2, 'x3': 3} True\n{'x1': 11, 'x2': 12, 'x3': 13} False\n

    This also works with a array of texts:

    X = [\"foo\", \"bar\"]\ndataset = stream.iter_array(\n    X, Y,\n    feature_names=['x1', 'x2', 'x3']\n)\nfor x, y in dataset:\n    print(x, y)\n
    foo True\nbar False\n

    "},{"location":"api/stream/iter-csv/","title":"iter_csv","text":"

    Iterates over rows from a CSV file.

    Reading CSV files can be quite slow. If, for whatever reason, you're going to loop through the same file multiple times, then we recommend that you to use the stream.Cache utility.

    "},{"location":"api/stream/iter-csv/#parameters","title":"Parameters","text":"
    • filepath_or_buffer

      Either a string indicating the location of a file, or a buffer object that has a read method.

    • target

      Type \u2192 str | list[str] | None

      Default \u2192 None

      A single target column is assumed if a string is passed. A multiple output scenario is assumed if a list of strings is passed. A None value will be assigned to each y if this parameter is omitted.

    • converters

      Type \u2192 dict | None

      Default \u2192 None

      All values in the CSV are interpreted as strings by default. You can use this parameter to cast values to the desired type. This should be a dict mapping feature names to callables used to parse their associated values. Note that a callable may be a type, such as float and int.

    • parse_dates

      Type \u2192 dict | None

      Default \u2192 None

      A dict mapping feature names to a format passed to the datetime.datetime.strptime method.

    • drop

      Type \u2192 list[str] | None

      Default \u2192 None

      Fields to ignore.

    • drop_nones

      Default \u2192 False

      Whether or not to drop fields where the value is a None.

    • fraction

      Default \u2192 1.0

      Sampling fraction.

    • compression

      Default \u2192 infer

      For on-the-fly decompression of on-disk data. If this is set to 'infer' and filepath_or_buffer is a path, then the decompression method is inferred for the following extensions: '.gz', '.zip'.

    • seed

      Type \u2192 int | None

      Default \u2192 None

      If specified, the sampling will be deterministic.

    • field_size_limit

      Type \u2192 int | None

      Default \u2192 None

      If not None, this will be passed to the csv.field_size_limit function.

    • kwargs

      All other keyword arguments are passed to the underlying csv.DictReader.

    "},{"location":"api/stream/iter-csv/#examples","title":"Examples","text":"

    Although this function is designed to handle different kinds of inputs, the most common use case is to read a file on the disk. We'll first create a little CSV file to illustrate.

    tv_shows = '''name,year,rating\nPlanet Earth II,2016,9.5\nPlanet Earth,2006,9.4\nBand of Brothers,2001,9.4\nBreaking Bad,2008,9.4\nChernobyl,2019,9.4\n'''\nwith open('tv_shows.csv', mode='w') as f:\n    _ = f.write(tv_shows)\n

    We can now go through the rows one by one. We can use the converters parameter to cast the rating field value as a float. We can also convert the year to a datetime via the parse_dates parameter.

    from river import stream\n\nparams = {\n    'converters': {'rating': float},\n    'parse_dates': {'year': '%Y'}\n}\nfor x, y in stream.iter_csv('tv_shows.csv', **params):\n    print(x, y)\n
    {'name': 'Planet Earth II', 'year': datetime.datetime(2016, 1, 1, 0, 0), 'rating': 9.5} None\n{'name': 'Planet Earth', 'year': datetime.datetime(2006, 1, 1, 0, 0), 'rating': 9.4} None\n{'name': 'Band of Brothers', 'year': datetime.datetime(2001, 1, 1, 0, 0), 'rating': 9.4} None\n{'name': 'Breaking Bad', 'year': datetime.datetime(2008, 1, 1, 0, 0), 'rating': 9.4} None\n{'name': 'Chernobyl', 'year': datetime.datetime(2019, 1, 1, 0, 0), 'rating': 9.4} None\n

    The value of y is always None because we haven't provided a value for the target parameter. Here is an example where a target is provided:

    dataset = stream.iter_csv('tv_shows.csv', target='rating', **params)\nfor x, y in dataset:\n    print(x, y)\n
    {'name': 'Planet Earth II', 'year': datetime.datetime(2016, 1, 1, 0, 0)} 9.5\n{'name': 'Planet Earth', 'year': datetime.datetime(2006, 1, 1, 0, 0)} 9.4\n{'name': 'Band of Brothers', 'year': datetime.datetime(2001, 1, 1, 0, 0)} 9.4\n{'name': 'Breaking Bad', 'year': datetime.datetime(2008, 1, 1, 0, 0)} 9.4\n{'name': 'Chernobyl', 'year': datetime.datetime(2019, 1, 1, 0, 0)} 9.4\n

    Finally, let's delete the example file.

    import os; os.remove('tv_shows.csv')\n
    "},{"location":"api/stream/iter-libsvm/","title":"iter_libsvm","text":"

    Iterates over a dataset in LIBSVM format.

    The LIBSVM format is a popular way in the machine learning community to store sparse datasets. Only numerical feature values are supported. The feature names will be considered as strings.

    "},{"location":"api/stream/iter-libsvm/#parameters","title":"Parameters","text":"
    • filepath_or_buffer

      Type \u2192 str

      Either a string indicating the location of a file, or a buffer object that has a read method.

    • target_type

      Default \u2192 <class 'float'>

      The type of the target value.

    • compression

      Default \u2192 infer

      For on-the-fly decompression of on-disk data. If this is set to 'infer' and filepath_or_buffer is a path, then the decompression method is inferred for the following extensions: '.gz', '.zip'.

    "},{"location":"api/stream/iter-libsvm/#examples","title":"Examples","text":"

    import io\nfrom river import stream\n\ndata = io.StringIO('''+1 x:-134.26 y:0.2563\n1 x:-12 z:0.3\n-1 y:.25\n''')\n\nfor x, y in stream.iter_libsvm(data, target_type=int):\n    print(y, x)\n
    1 {'x': -134.26, 'y': 0.2563}\n1 {'x': -12.0, 'z': 0.3}\n-1 {'y': 0.25}\n

    1. LIBSVM documentation \u21a9

    "},{"location":"api/stream/iter-pandas/","title":"iter_pandas","text":"

    Iterates over the rows of a pandas.DataFrame.

    "},{"location":"api/stream/iter-pandas/#parameters","title":"Parameters","text":"
    • X

      Type \u2192 pd.DataFrame

      A dataframe of features.

    • y

      Type \u2192 pd.Series | pd.DataFrame | None

      Default \u2192 None

      A series or a dataframe with one column per target.

    • kwargs

      Extra keyword arguments are passed to the underlying call to stream.iter_array.

    "},{"location":"api/stream/iter-pandas/#examples","title":"Examples","text":"

    import pandas as pd\nfrom river import stream\n\nX = pd.DataFrame({\n    'x1': [1, 2, 3, 4],\n    'x2': ['blue', 'yellow', 'yellow', 'blue'],\n    'y': [True, False, False, True]\n})\ny = X.pop('y')\n\nfor xi, yi in stream.iter_pandas(X, y):\n    print(xi, yi)\n
    {'x1': 1, 'x2': 'blue'} True\n{'x1': 2, 'x2': 'yellow'} False\n{'x1': 3, 'x2': 'yellow'} False\n{'x1': 4, 'x2': 'blue'} True\n

    "},{"location":"api/stream/iter-sklearn-dataset/","title":"iter_sklearn_dataset","text":"

    Iterates rows from one of the datasets provided by scikit-learn.

    This allows you to use any dataset from scikit-learn's datasets module. For instance, you can use the fetch_openml function to get access to all of the datasets from the OpenML website.

    "},{"location":"api/stream/iter-sklearn-dataset/#parameters","title":"Parameters","text":"
    • dataset

      Type \u2192 sklearn.utils.Bunch

      A scikit-learn dataset.

    • kwargs

      Extra keyword arguments are passed to the underlying call to stream.iter_array.

    "},{"location":"api/stream/iter-sklearn-dataset/#examples","title":"Examples","text":"

    import pprint\nfrom sklearn import datasets\nfrom river import stream\n\ndataset = datasets.load_diabetes()\n\nfor xi, yi in stream.iter_sklearn_dataset(dataset):\n    pprint.pprint(xi)\n    print(yi)\n    break\n
    {'age': 0.038075906433423026,\n 'bmi': 0.061696206518683294,\n 'bp': 0.0218723855140367,\n 's1': -0.04422349842444599,\n 's2': -0.03482076283769895,\n 's3': -0.04340084565202491,\n 's4': -0.002592261998183278,\n 's5': 0.019907486170462722,\n 's6': -0.01764612515980379,\n 'sex': 0.05068011873981862}\n151.0\n

    "},{"location":"api/stream/iter-sql/","title":"iter_sql","text":"

    Iterates over the results from an SQL query.

    By default, SQLAlchemy prefetches results. Therefore, even though you can iterate over the resulting rows one by one, the results are in fact loaded in batch. You can modify this behavior by configuring the connection you pass to iter_sql. For instance, you can set the stream_results parameter to True, as explained in SQLAlchemy's documentation. Note, however, that this isn't available for all database engines.

    "},{"location":"api/stream/iter-sql/#parameters","title":"Parameters","text":"
    • query

      Type \u2192 str | sqlalchemy.TextClause | sqlalchemy.Select

      SQL query to be executed.

    • conn

      Type \u2192 sqlalchemy.Connection

      An SQLAlchemy construct which has an execute method. In other words you can pass an engine, a connection, or a session.

    • target_name

      Type \u2192 str | None

      Default \u2192 None

      The name of the target field. If this is None, then y will also be None.

    "},{"location":"api/stream/iter-sql/#examples","title":"Examples","text":"

    As an example we'll create an in-memory database with SQLAlchemy.

    import datetime as dt\nimport sqlalchemy\n\nengine = sqlalchemy.create_engine('sqlite://')\n\nmetadata = sqlalchemy.MetaData()\n\nt_sales = sqlalchemy.Table('sales', metadata,\n    sqlalchemy.Column('shop', sqlalchemy.String, primary_key=True),\n    sqlalchemy.Column('date', sqlalchemy.Date, primary_key=True),\n    sqlalchemy.Column('amount', sqlalchemy.Integer)\n)\n\nmetadata.create_all(engine)\n\nsales = [\n    {'shop': 'Hema', 'date': dt.date(2016, 8, 2), 'amount': 20},\n    {'shop': 'Ikea', 'date': dt.date(2016, 8, 2), 'amount': 18},\n    {'shop': 'Hema', 'date': dt.date(2016, 8, 3), 'amount': 22},\n    {'shop': 'Ikea', 'date': dt.date(2016, 8, 3), 'amount': 14},\n    {'shop': 'Hema', 'date': dt.date(2016, 8, 4), 'amount': 12},\n    {'shop': 'Ikea', 'date': dt.date(2016, 8, 4), 'amount': 16}\n]\n\nwith engine.connect() as conn:\n    _ = conn.execute(t_sales.insert(), sales)\n    conn.commit()\n

    We can now query the database. We will set amount to be the target field.

    from river import stream\n\nwith engine.connect() as conn:\n    query = sqlalchemy.sql.select(t_sales)\n    dataset = stream.iter_sql(query, conn, target_name='amount')\n    for x, y in dataset:\n        print(x, y)\n
    {'shop': 'Hema', 'date': datetime.date(2016, 8, 2)} 20\n{'shop': 'Ikea', 'date': datetime.date(2016, 8, 2)} 18\n{'shop': 'Hema', 'date': datetime.date(2016, 8, 3)} 22\n{'shop': 'Ikea', 'date': datetime.date(2016, 8, 3)} 14\n{'shop': 'Hema', 'date': datetime.date(2016, 8, 4)} 12\n{'shop': 'Ikea', 'date': datetime.date(2016, 8, 4)} 16\n

    This also with raw SQL queries.

    with engine.connect() as conn:\n    query = \"SELECT * FROM sales WHERE shop = 'Hema'\"\n    dataset = stream.iter_sql(query, conn, target_name='amount')\n    for x, y in dataset:\n        print(x, y)\n
    {'shop': 'Hema', 'date': '2016-08-02'} 20\n{'shop': 'Hema', 'date': '2016-08-03'} 22\n{'shop': 'Hema', 'date': '2016-08-04'} 12\n

    "},{"location":"api/stream/shuffle/","title":"shuffle","text":"

    Shuffles a stream of data.

    This works by maintaining a buffer of elements. The first buffer_size elements are stored in memory. Once the buffer is full, a random element inside the buffer is yielded. Every time an element is yielded, the next element in the stream replaces it and the buffer is sampled again. Increasing buffer_size will improve the quality of the shuffling.

    If you really want to stream over your dataset in a \"good\" random order, the best way is to split your dataset into smaller datasets and loop over them in a round-robin fashion. You may do this by using the roundrobin recipe from the itertools module.

    "},{"location":"api/stream/shuffle/#parameters","title":"Parameters","text":"
    • stream

      Type \u2192 typing.Iterator

      The stream to shuffle.

    • buffer_size

      Type \u2192 int

      The size of the buffer which contains the elements help in memory. Increasing this will increase randomness but will incur more memory usage.

    • seed

      Type \u2192 int | None

      Default \u2192 None

      Random seed used for sampling.

    "},{"location":"api/stream/shuffle/#examples","title":"Examples","text":"

    from river import stream\n\nfor i in stream.shuffle(range(15), buffer_size=5, seed=42):\n    print(i)\n
    0\n5\n2\n1\n8\n9\n6\n4\n11\n12\n10\n7\n14\n13\n3\n

    1. Visualizing TensorFlow's streaming shufflers \u21a9

    "},{"location":"api/stream/simulate-qa/","title":"simulate_qa","text":"

    Simulate a time-ordered question and answer session.

    This method allows looping through a dataset in the order in which it arrived. Indeed, it usually is the case that labels arrive after features. Being able to go through a dataset in arrival order enables assessing a model's performance in a reliable manner. For instance, the evaluate.progressive_val_score is a high-level method that can be used to score a model on a dataset. Under the hood it uses this method to determine the correct arrival order.

    "},{"location":"api/stream/simulate-qa/#parameters","title":"Parameters","text":"
    • dataset

      Type \u2192 base.typing.Dataset

      A stream of (features, target) tuples.

    • moment

      Type \u2192 str | typing.Callable[[dict], dt.datetime] | None

      The attribute used for measuring time. If a callable is passed, then it is expected to take as input a dict of features. If None, then the observations are implicitly timestamped in the order in which they arrive. If a str is passed, then it will be used to obtain the time from the input features.

    • delay

      Type \u2192 str | int | dt.timedelta | typing.Callable | None

      The amount of time to wait before revealing the target associated with each observation to the model. This value is expected to be able to sum with the moment value. For instance, if moment is a datetime.date, then delay is expected to be a datetime.timedelta. If a callable is passed, then it is expected to take as input a dict of features and the target. If a str is passed, then it will be used to access the relevant field from the features. If None is passed, then no delay will be used, which leads to doing standard online validation. If a scalar is passed, such an int or a datetime.timedelta, then the delay is constant.

    • copy

      Type \u2192 bool

      Default \u2192 True

      If True, then a separate copy of the features are yielded the second time around. This ensures that inadvertent modifications in downstream code don't have any effect.

    "},{"location":"api/stream/simulate-qa/#examples","title":"Examples","text":"

    The arrival delay isn't usually indicated in a dataset, but it might be able to be inferred from the features. As an example, we'll simulate the departure and arrival time of taxi trips. Let's first create a time table which records the departure time and the duration of seconds of several taxi trips.

    import datetime as dt\ntime_table = [\n    (dt.datetime(2020, 1, 1, 20,  0, 0),  900),\n    (dt.datetime(2020, 1, 1, 20, 10, 0), 1800),\n    (dt.datetime(2020, 1, 1, 20, 20, 0),  300),\n    (dt.datetime(2020, 1, 1, 20, 45, 0),  400),\n    (dt.datetime(2020, 1, 1, 20, 50, 0),  240),\n    (dt.datetime(2020, 1, 1, 20, 55, 0),  450)\n]\n

    We can now create a streaming dataset where the features are the departure dates and the targets are the durations.

    dataset = (\n    ({'date': date}, duration)\n    for date, duration in time_table\n)\n

    Now, we can use simulate_qa to iterate over the events in the order in which they are meant to occur.

    delay = lambda _, y: dt.timedelta(seconds=y)\n\nfor i, x, y in simulate_qa(dataset, moment='date', delay=delay):\n    if y is None:\n        print(f'{x[\"date\"]} - trip #{i} departs')\n    else:\n        arrival_date = x['date'] + dt.timedelta(seconds=y)\n        print(f'{arrival_date} - trip #{i} arrives after {y} seconds')\n
    2020-01-01 20:00:00 - trip #0 departs\n2020-01-01 20:10:00 - trip #1 departs\n2020-01-01 20:15:00 - trip #0 arrives after 900 seconds\n2020-01-01 20:20:00 - trip #2 departs\n2020-01-01 20:25:00 - trip #2 arrives after 300 seconds\n2020-01-01 20:40:00 - trip #1 arrives after 1800 seconds\n2020-01-01 20:45:00 - trip #3 departs\n2020-01-01 20:50:00 - trip #4 departs\n2020-01-01 20:51:40 - trip #3 arrives after 400 seconds\n2020-01-01 20:54:00 - trip #4 arrives after 240 seconds\n2020-01-01 20:55:00 - trip #5 departs\n2020-01-01 21:02:30 - trip #5 arrives after 450 seconds\n

    This function is extremely practical because it provides a reliable way to evaluate the performance of a model in a real scenario. Indeed, it allows to make predictions and perform model updates in exactly the same manner that would happen live. For instance, it is used in evaluate.progressive_val_score, which is a higher level function for evaluating models in an online manner.

    "},{"location":"api/time-series/ForecastingMetric/","title":"ForecastingMetric","text":""},{"location":"api/time-series/ForecastingMetric/#methods","title":"Methods","text":"get

    Return the current performance along the horizon.

    Returns

    list[float]: The current performance.

    update

    Update the metric at each step along the horizon.

    Parameters

    • y_true \u2014 'list[Number]'
    • y_pred \u2014 'list[Number]'

    Returns

    ForecastingMetric: self

    "},{"location":"api/time-series/HoltWinters/","title":"HoltWinters","text":"

    Holt-Winters forecaster.

    This is a standard implementation of the Holt-Winters forecasting method. Certain parametrisations result in special cases, such as simple exponential smoothing.

    Optimal parameters and initialisation values can be determined in a batch setting. However, in an online setting, it is necessary to wait and observe enough values. The first k = max(2, seasonality) values are indeed used to initialize the components.

    Level initialization

    \\[l = \\frac{1}{k} \\sum_{i=1}{k} y_i\\]

    Trend initialization

    \\[t = \\frac{1}{k - 1} \\sum_{i=2}{k} y_i - y_{i-1}\\]

    Trend initialization

    \\[s_i = \\frac{y_i}{k}\\]"},{"location":"api/time-series/HoltWinters/#parameters","title":"Parameters","text":"
    • alpha

      Smoothing parameter for the level.

    • beta

      Default \u2192 None

      Smoothing parameter for the trend.

    • gamma

      Default \u2192 None

      Smoothing parameter for the seasonality.

    • seasonality

      Default \u2192 0

      The number of periods in a season. For instance, this should be 4 for quarterly data, and 12 for yearly data.

    • multiplicative

      Default \u2192 False

      Whether or not to use a multiplicative formulation.

    "},{"location":"api/time-series/HoltWinters/#examples","title":"Examples","text":"

    from river import datasets\nfrom river import metrics\nfrom river import time_series\n\ndataset = datasets.AirlinePassengers()\n\nmodel = time_series.HoltWinters(\n    alpha=0.3,\n    beta=0.1,\n    gamma=0.6,\n    seasonality=12,\n    multiplicative=True\n)\n\nmetric = metrics.MAE()\n\ntime_series.evaluate(\n    dataset,\n    model,\n    metric,\n    horizon=12\n)\n
    +1  MAE: 25.899087\n+2  MAE: 26.26131\n+3  MAE: 25.735903\n+4  MAE: 25.625678\n+5  MAE: 26.093842\n+6  MAE: 26.90249\n+7  MAE: 28.634398\n+8  MAE: 29.284769\n+9  MAE: 31.018351\n+10 MAE: 32.252349\n+11 MAE: 33.518946\n+12 MAE: 33.975057\n

    "},{"location":"api/time-series/HoltWinters/#methods","title":"Methods","text":"forecast

    Makes forecast at each step of the given horizon.

    Parameters

    • horizon \u2014 'int'
    • xs \u2014 'list[dict] | None' \u2014 defaults to None

    learn_one

    Updates the model.

    Parameters

    • y \u2014 'float'
    • x \u2014 'dict | None' \u2014 defaults to None

    1. Exponential smoothing \u2014 Wikipedia \u21a9

    2. Exponential smoothing \u2014 Forecasting: Principles and Practice \u21a9

    3. What is Exponential Smoothing? \u2014 Engineering statistics handbook \u21a9

    "},{"location":"api/time-series/HorizonAggMetric/","title":"HorizonAggMetric","text":"

    Same as HorizonMetric, but aggregates the result based on an provided function.

    This allows, for instance, to measure the average performance of a forecasting model along the horizon.

    "},{"location":"api/time-series/HorizonAggMetric/#parameters","title":"Parameters","text":"
    • metric

      Type \u2192 metrics.base.RegressionMetric

      A regression metric.

    • agg_func

      Type \u2192 typing.Callable[[list[float]], float]

      A function that takes as input a list of floats and outputs a single float. You may want to min, max, as well as statistics.mean and statistics.median.

    "},{"location":"api/time-series/HorizonAggMetric/#examples","title":"Examples","text":"

    This is used internally by the time_series.evaluate function when you pass an agg_func.

    import statistics\nfrom river import datasets\nfrom river import metrics\nfrom river import time_series\n\nmetric = time_series.evaluate(\n    dataset=datasets.AirlinePassengers(),\n    model=time_series.HoltWinters(alpha=0.1),\n    metric=metrics.MAE(),\n    agg_func=statistics.mean,\n    horizon=4\n)\n\nmetric\n
    mean(MAE): 42.901748\n

    "},{"location":"api/time-series/HorizonAggMetric/#methods","title":"Methods","text":"get

    Return the current performance along the horizon.

    Returns

    list[float]: The current performance.

    update

    Update the metric at each step along the horizon.

    Parameters

    • y_true \u2014 'list[Number]'
    • y_pred \u2014 'list[Number]'

    Returns

    ForecastingMetric: self

    "},{"location":"api/time-series/HorizonMetric/","title":"HorizonMetric","text":"

    Measures performance at each time step ahead.

    This allows to measure the performance of a model at each time step along the horizon. A copy of the provided regression metric is made for each time step. At each time step ahead, the metric is thus evaluated on each prediction for said time step, and not for the time steps before or after that.

    "},{"location":"api/time-series/HorizonMetric/#parameters","title":"Parameters","text":"
    • metric

      Type \u2192 metrics.base.RegressionMetric

      A regression metric.

    "},{"location":"api/time-series/HorizonMetric/#examples","title":"Examples","text":"

    This is used internally by the time_series.evaluate function.

    from river import datasets\nfrom river import metrics\nfrom river import time_series\n\nmetric = time_series.evaluate(\n    dataset=datasets.AirlinePassengers(),\n    model=time_series.HoltWinters(alpha=0.1),\n    metric=metrics.MAE(),\n    horizon=4\n)\n\nmetric\n
    +1 MAE: 40.931286\n+2 MAE: 42.667998\n+3 MAE: 44.158092\n+4 MAE: 43.849617\n

    "},{"location":"api/time-series/HorizonMetric/#methods","title":"Methods","text":"get

    Return the current performance along the horizon.

    Returns

    list[float]: The current performance.

    update

    Update the metric at each step along the horizon.

    Parameters

    • y_true \u2014 'list[Number]'
    • y_pred \u2014 'list[Number]'

    Returns

    ForecastingMetric: self

    "},{"location":"api/time-series/SNARIMAX/","title":"SNARIMAX","text":"

    SNARIMAX model.

    SNARIMAX stands for (S)easonal (N)on-linear (A)uto(R)egressive (I)ntegrated (M)oving-(A)verage with e(X)ogenous inputs model.

    This model generalizes many established time series models in a single interface that can be trained online. It assumes that the provided training data is ordered in time and is uniformly spaced. It is made up of the following components:

    • S (Seasonal)

    • N (Non-linear): Any online regression model can be used, not necessarily a linear regression

      as is done in textbooks. - AR (Autoregressive): Lags of the target variable are used as features.

    • I (Integrated): The model can be fitted on a differenced version of a time series. In this

      context, integration is the reverse of differencing. - MA (Moving average): Lags of the errors are used as features.

    • X (Exogenous): Users can provide additional features. Care has to be taken to include

      features that will be available both at training and prediction time.

    Each of these components can be switched on and off by specifying the appropriate parameters. Classical time series models such as AR, MA, ARMA, and ARIMA can thus be seen as special parametrizations of the SNARIMAX model.

    This model is tailored for time series that are homoskedastic. In other words, it might not work well if the variance of the time series varies widely along time.

    "},{"location":"api/time-series/SNARIMAX/#parameters","title":"Parameters","text":"
    • p

      Type \u2192 int

      Order of the autoregressive part. This is the number of past target values that will be included as features.

    • d

      Type \u2192 int

      Differencing order.

    • q

      Type \u2192 int

      Order of the moving average part. This is the number of past error terms that will be included as features.

    • m

      Type \u2192 int

      Default \u2192 1

      Season length used for extracting seasonal features. If you believe your data has a seasonal pattern, then set this accordingly. For instance, if the data seems to exhibit a yearly seasonality, and that your data is spaced by month, then you should set this to 12. Note that for this parameter to have any impact you should also set at least one of the p, d, and q parameters.

    • sp

      Type \u2192 int

      Default \u2192 0

      Seasonal order of the autoregressive part. This is the number of past target values that will be included as features.

    • sd

      Type \u2192 int

      Default \u2192 0

      Seasonal differencing order.

    • sq

      Type \u2192 int

      Default \u2192 0

      Seasonal order of the moving average part. This is the number of past error terms that will be included as features.

    • regressor

      Type \u2192 base.Regressor | None

      Default \u2192 None

      The online regression model to use. By default, a preprocessing.StandardScaler piped with a linear_model.LinearRegression will be used.

    "},{"location":"api/time-series/SNARIMAX/#attributes","title":"Attributes","text":"
    • differencer (Differencer)

    • y_trues (collections.deque)

      The p past target values.

    • errors (collections.deque)

      The q past error values.

    "},{"location":"api/time-series/SNARIMAX/#examples","title":"Examples","text":"

    import datetime as dt\nfrom river import datasets\nfrom river import time_series\nfrom river import utils\n\nperiod = 12\nmodel = time_series.SNARIMAX(\n    p=period,\n    d=1,\n    q=period,\n    m=period,\n    sd=1\n)\n\nfor t, (x, y) in enumerate(datasets.AirlinePassengers()):\n    model = model.learn_one(y)\n\nhorizon = 12\nfuture = [\n    {'month': dt.date(year=1961, month=m, day=1)}\n    for m in range(1, horizon + 1)\n]\nforecast = model.forecast(horizon=horizon)\nfor x, y_pred in zip(future, forecast):\n    print(x['month'], f'{y_pred:.3f}')\n
    1961-01-01 494.542\n1961-02-01 450.825\n1961-03-01 484.972\n1961-04-01 576.401\n1961-05-01 559.489\n1961-06-01 612.251\n1961-07-01 722.410\n1961-08-01 674.604\n1961-09-01 575.716\n1961-10-01 562.808\n1961-11-01 477.049\n1961-12-01 515.191\n

    Classic ARIMA models learn solely on the time series values. You can also include features built at each step.

    import calendar\nimport math\nfrom river import compose\nfrom river import linear_model\nfrom river import optim\nfrom river import preprocessing\n\ndef get_month_distances(x):\n    return {\n        calendar.month_name[month]: math.exp(-(x['month'].month - month) ** 2)\n        for month in range(1, 13)\n    }\n\ndef get_ordinal_date(x):\n    return {'ordinal_date': x['month'].toordinal()}\n\nextract_features = compose.TransformerUnion(\n    get_ordinal_date,\n    get_month_distances\n)\n\nmodel = (\n    extract_features |\n    time_series.SNARIMAX(\n        p=1,\n        d=0,\n        q=0,\n        m=12,\n        sp=3,\n        sq=6,\n        regressor=(\n            preprocessing.StandardScaler() |\n            linear_model.LinearRegression(\n                intercept_init=110,\n                optimizer=optim.SGD(0.01),\n                intercept_lr=0.3\n            )\n        )\n    )\n)\n\nfor x, y in datasets.AirlinePassengers():\n    model = model.learn_one(x, y)\n\nforecast = model.forecast(horizon=horizon)\nfor x, y_pred in zip(future, forecast):\n    print(x['month'], f'{y_pred:.3f}')\n
    1961-01-01 444.821\n1961-02-01 432.612\n1961-03-01 457.739\n1961-04-01 465.544\n1961-05-01 476.575\n1961-06-01 516.255\n1961-07-01 565.405\n1961-08-01 572.470\n1961-09-01 512.645\n1961-10-01 475.919\n1961-11-01 438.033\n1961-12-01 456.892\n

    "},{"location":"api/time-series/SNARIMAX/#methods","title":"Methods","text":"forecast

    Makes forecast at each step of the given horizon.

    Parameters

    • horizon \u2014 'int'
    • xs \u2014 'list[dict] | None' \u2014 defaults to None

    learn_one

    Updates the model.

    Parameters

    • y \u2014 'float'
    • x \u2014 'dict | None' \u2014 defaults to None

    1. ARMA - Wikipedia \u21a9

    2. NARX - Wikipedia \u21a9

    3. ARIMA - Forecasting: Principles and Practice \u21a9

    4. Anava, O., Hazan, E., Mannor, S. and Shamir, O., 2013, June. Online learning for time series prediction. In Conference on learning theory (pp. 172-184) \u21a9

    "},{"location":"api/time-series/evaluate/","title":"evaluate","text":"

    Evaluates the performance of a forecaster on a time series dataset.

    To understand why this method is useful, it's important to understand the difference between nowcasting and forecasting. Nowcasting is about predicting a value at the next time step. This can be seen as a special case of regression, where the value to predict is the value at the next time step. In this case, the evaluate.progressive_val_score function may be used to evaluate a model via progressive validation.

    Forecasting models can also be evaluated via progressive validation. This is the purpose of this function. At each time step t, the forecaster is asked to predict the values at t + 1, t + 2, ..., t + horizon. The performance at each time step is measured and returned.

    "},{"location":"api/time-series/evaluate/#parameters","title":"Parameters","text":"
    • dataset

      Type \u2192 base.typing.Dataset

      A sequential time series.

    • model

      Type \u2192 time_series.base.Forecaster

      A forecaster.

    • metric

      Type \u2192 metrics.base.RegressionMetric

      A regression metric.

    • horizon

      Type \u2192 int

    • agg_func

      Type \u2192 typing.Callable[[list[float]], float] | None

      Default \u2192 None

    • grace_period

      Type \u2192 int | None

      Default \u2192 None

      Initial period during which the metric is not updated. This is to fairly evaluate models which need a warming up period to start producing meaningful forecasts. The value of this parameter is equal to the horizon by default.

    "},{"location":"api/time-series/iter-evaluate/","title":"iter_evaluate","text":"

    Evaluates the performance of a forecaster on a time series dataset and yields results.

    This does exactly the same as evaluate.progressive_val_score. The only difference is that this function returns an iterator, yielding results at every step. This can be useful if you want to have control over what you do with the results. For instance, you might want to plot the results.

    "},{"location":"api/time-series/iter-evaluate/#parameters","title":"Parameters","text":"
    • dataset

      Type \u2192 base.typing.Dataset

      A sequential time series.

    • model

      Type \u2192 time_series.base.Forecaster

      A forecaster.

    • metric

      Type \u2192 metrics.base.RegressionMetric

      A regression metric.

    • horizon

      Type \u2192 int

    • agg_func

      Type \u2192 typing.Callable[[list[float]], float] | None

      Default \u2192 None

    • grace_period

      Type \u2192 int | None

      Default \u2192 None

      Initial period during which the metric is not updated. This is to fairly evaluate models which need a warming up period to start producing meaningful forecasts. The value of this parameter is equal to the horizon by default.

    "},{"location":"api/time-series/base/Forecaster/","title":"Forecaster","text":""},{"location":"api/time-series/base/Forecaster/#methods","title":"Methods","text":"forecast

    Makes forecast at each step of the given horizon.

    Parameters

    • horizon \u2014 'int'
    • xs \u2014 'list[dict] | None' \u2014 defaults to None

    learn_one

    Updates the model.

    Parameters

    • y \u2014 'float'
    • x \u2014 'dict | None' \u2014 defaults to None

    "},{"location":"api/tree/ExtremelyFastDecisionTreeClassifier/","title":"ExtremelyFastDecisionTreeClassifier","text":"

    Extremely Fast Decision Tree classifier.

    Also referred to as Hoeffding AnyTime Tree (HATT) classifier.

    "},{"location":"api/tree/ExtremelyFastDecisionTreeClassifier/#parameters","title":"Parameters","text":"
    • grace_period

      Type \u2192 int

      Default \u2192 200

      Number of instances a leaf should observe between split attempts.

    • max_depth

      Type \u2192 int | None

      Default \u2192 None

      The maximum depth a tree can reach. If None, the tree will grow indefinitely.

    • min_samples_reevaluate

      Type \u2192 int

      Default \u2192 20

      Number of instances a node should observe before reevaluating the best split.

    • split_criterion

      Type \u2192 str

      Default \u2192 info_gain

      Split criterion to use. - 'gini' - Gini - 'info_gain' - Information Gain - 'hellinger' - Helinger Distance

    • delta

      Type \u2192 float

      Default \u2192 1e-07

      Significance level to calculate the Hoeffding bound. The significance level is given by 1 - delta. Values closer to zero imply longer split decision delays.

    • tau

      Type \u2192 float

      Default \u2192 0.05

      Threshold below which a split will be forced to break ties.

    • leaf_prediction

      Type \u2192 str

      Default \u2192 nba

      Prediction mechanism used at leafs. - 'mc' - Majority Class - 'nb' - Naive Bayes - 'nba' - Naive Bayes Adaptive

    • nb_threshold

      Type \u2192 int

      Default \u2192 0

      Number of instances a leaf should observe before allowing Naive Bayes.

    • nominal_attributes

      Type \u2192 list | None

      Default \u2192 None

      List of Nominal attributes identifiers. If empty, then assume that all numeric attributes should be treated as continuous.

    • splitter

      Type \u2192 Splitter | None

      Default \u2192 None

      The Splitter or Attribute Observer (AO) used to monitor the class statistics of numeric features and perform splits. Splitters are available in the tree.splitter module. Different splitters are available for classification and regression tasks. Classification and regression splitters can be distinguished by their property is_target_class. This is an advanced option. Special care must be taken when choosing different splitters. By default, tree.splitter.GaussianSplitter is used if splitter is None.

    • binary_split

      Type \u2192 bool

      Default \u2192 False

      If True, only allow binary splits.

    • min_branch_fraction

      Type \u2192 float

      Default \u2192 0.01

      The minimum percentage of observed data required for branches resulting from split candidates. To validate a split candidate, at least two resulting branches must have a percentage of samples greater than min_branch_fraction. This criterion prevents unnecessary splits when the majority of instances are concentrated in a single branch.

    • max_share_to_split

      Type \u2192 float

      Default \u2192 0.99

      Only perform a split in a leaf if the proportion of elements in the majority class is smaller than this parameter value. This parameter avoids performing splits when most of the data belongs to a single class.

    • max_size

      Type \u2192 float

      Default \u2192 100.0

      The max size of the tree, in Megabytes (MB).

    • memory_estimate_period

      Type \u2192 int

      Default \u2192 1000000

      Interval (number of processed instances) between memory consumption checks.

    • stop_mem_management

      Type \u2192 bool

      Default \u2192 False

      If True, stop growing as soon as memory limit is hit.

    • remove_poor_attrs

      Type \u2192 bool

      Default \u2192 False

      If True, disable poor attributes to reduce memory usage.

    • merit_preprune

      Type \u2192 bool

      Default \u2192 True

      If True, enable merit-based tree pre-pruning.

    "},{"location":"api/tree/ExtremelyFastDecisionTreeClassifier/#attributes","title":"Attributes","text":"
    • height

    • leaf_prediction

      Return the prediction strategy used by the tree at its leaves.

    • max_size

      Max allowed size tree can reach (in MB).

    • n_active_leaves

    • n_branches

    • n_inactive_leaves

    • n_leaves

    • n_nodes

    • split_criterion

      Return a string with the name of the split criterion being used by the tree.

    • summary

      Collect metrics corresponding to the current status of the tree in a string buffer.

    "},{"location":"api/tree/ExtremelyFastDecisionTreeClassifier/#examples","title":"Examples","text":"

    from river.datasets import synth\nfrom river import evaluate\nfrom river import metrics\nfrom river import tree\n\ngen = synth.Agrawal(classification_function=0, seed=42)\ndataset = iter(gen.take(1000))\n\nmodel = tree.ExtremelyFastDecisionTreeClassifier(\n    grace_period=100,\n    delta=1e-5,\n    nominal_attributes=['elevel', 'car', 'zipcode'],\n    min_samples_reevaluate=100\n)\n\nmetric = metrics.Accuracy()\n\nevaluate.progressive_val_score(dataset, model, metric)\n
    Accuracy: 87.29%\n

    "},{"location":"api/tree/ExtremelyFastDecisionTreeClassifier/#methods","title":"Methods","text":"debug_one

    Print an explanation of how x is predicted.

    Parameters

    • x \u2014 'dict'

    Returns

    str | None: A representation of the path followed by the tree to predict x; None if

    draw

    Draw the tree using the graphviz library.

    Since the tree is drawn without passing incoming samples, classification trees will show the majority class in their leaves, whereas regression trees will use the target mean.

    Parameters

    • max_depth \u2014 'int | None' \u2014 defaults to None The maximum depth a tree can reach. If None, the tree will grow indefinitely.

    learn_one

    Incrementally train the model

    Parameters

    • x
    • y
    • sample_weight \u2014 defaults to 1.0

    Returns

    self

    predict_one

    Predict the label of a set of features x.

    Parameters

    • x \u2014 'dict'
    • kwargs

    Returns

    base.typing.ClfTarget | None: The predicted label.

    predict_proba_one

    Predict the probability of each label for a dictionary of features x.

    Parameters

    • x

    Returns

    A dictionary that associates a probability which each label.

    to_dataframe

    Return a representation of the current tree structure organized in a pandas.DataFrame object.

    In case the tree is empty or it only contains a single node (a leaf), None is returned.

    Returns

    df

    "},{"location":"api/tree/ExtremelyFastDecisionTreeClassifier/#notes","title":"Notes","text":"

    The Extremely Fast Decision Tree (EFDT) 1 constructs a tree incrementally. The EFDT seeks to select and deploy a split as soon as it is confident the split is useful, and then revisits that decision, replacing the split if it subsequently becomes evident that a better split is available. The EFDT learns rapidly from a stationary distribution and eventually it learns the asymptotic batch tree if the distribution from which the data are drawn is stationary.

    1. C. Manapragada, G. Webb, and M. Salehi. Extremely Fast Decision Tree. In Proceedings of the 24th ACM SIGKDD International Conference on Knowledge Discovery & Data Mining (KDD '18). ACM, New York, NY, USA, 1953-1962. DOI: https://doi.org/10.1145/3219819.3220005\u00a0\u21a9

    "},{"location":"api/tree/HoeffdingAdaptiveTreeClassifier/","title":"HoeffdingAdaptiveTreeClassifier","text":"

    Hoeffding Adaptive Tree classifier.

    "},{"location":"api/tree/HoeffdingAdaptiveTreeClassifier/#parameters","title":"Parameters","text":"
    • grace_period

      Type \u2192 int

      Default \u2192 200

      Number of instances a leaf should observe between split attempts.

    • max_depth

      Type \u2192 int | None

      Default \u2192 None

      The maximum depth a tree can reach. If None, the tree will grow indefinitely.

    • split_criterion

      Type \u2192 str

      Default \u2192 info_gain

      Split criterion to use. - 'gini' - Gini - 'info_gain' - Information Gain - 'hellinger' - Helinger Distance

    • delta

      Type \u2192 float

      Default \u2192 1e-07

      Significance level to calculate the Hoeffding bound. The significance level is given by 1 - delta. Values closer to zero imply longer split decision delays.

    • tau

      Type \u2192 float

      Default \u2192 0.05

      Threshold below which a split will be forced to break ties.

    • leaf_prediction

      Type \u2192 str

      Default \u2192 nba

      Prediction mechanism used at leafs. - 'mc' - Majority Class - 'nb' - Naive Bayes - 'nba' - Naive Bayes Adaptive

    • nb_threshold

      Type \u2192 int

      Default \u2192 0

      Number of instances a leaf should observe before allowing Naive Bayes.

    • nominal_attributes

      Type \u2192 list | None

      Default \u2192 None

      List of Nominal attributes. If empty, then assume that all numeric attributes should be treated as continuous.

    • splitter

      Type \u2192 Splitter | None

      Default \u2192 None

      The Splitter or Attribute Observer (AO) used to monitor the class statistics of numeric features and perform splits. Splitters are available in the tree.splitter module. Different splitters are available for classification and regression tasks. Classification and regression splitters can be distinguished by their property is_target_class. This is an advanced option. Special care must be taken when choosing different splitters. By default, tree.splitter.GaussianSplitter is used if splitter is None.

    • bootstrap_sampling

      Type \u2192 bool

      Default \u2192 True

      If True, perform bootstrap sampling in the leaf nodes.

    • drift_window_threshold

      Type \u2192 int

      Default \u2192 300

      Minimum number of examples an alternate tree must observe before being considered as a potential replacement to the current one.

    • drift_detector

      Type \u2192 base.DriftDetector | None

      Default \u2192 None

      The drift detector used to build the tree. If None then drift.ADWIN is used.

    • switch_significance

      Type \u2192 float

      Default \u2192 0.05

      The significance level to assess whether alternate subtrees are significantly better than their main subtree counterparts.

    • binary_split

      Type \u2192 bool

      Default \u2192 False

      If True, only allow binary splits.

    • min_branch_fraction

      Type \u2192 float

      Default \u2192 0.01

      The minimum percentage of observed data required for branches resulting from split candidates. To validate a split candidate, at least two resulting branches must have a percentage of samples greater than min_branch_fraction. This criterion prevents unnecessary splits when the majority of instances are concentrated in a single branch.

    • max_share_to_split

      Type \u2192 float

      Default \u2192 0.99

      Only perform a split in a leaf if the proportion of elements in the majority class is smaller than this parameter value. This parameter avoids performing splits when most of the data belongs to a single class.

    • max_size

      Type \u2192 float

      Default \u2192 100.0

      The max size of the tree, in Megabytes (MB).

    • memory_estimate_period

      Type \u2192 int

      Default \u2192 1000000

      Interval (number of processed instances) between memory consumption checks.

    • stop_mem_management

      Type \u2192 bool

      Default \u2192 False

      If True, stop growing as soon as memory limit is hit.

    • remove_poor_attrs

      Type \u2192 bool

      Default \u2192 False

      If True, disable poor attributes to reduce memory usage.

    • merit_preprune

      Type \u2192 bool

      Default \u2192 True

      If True, enable merit-based tree pre-pruning.

    • seed

      Type \u2192 int | None

      Default \u2192 None

      Random seed for reproducibility.

    "},{"location":"api/tree/HoeffdingAdaptiveTreeClassifier/#attributes","title":"Attributes","text":"
    • height

    • leaf_prediction

      Return the prediction strategy used by the tree at its leaves.

    • max_size

      Max allowed size tree can reach (in MB).

    • n_active_leaves

    • n_alternate_trees

    • n_branches

    • n_inactive_leaves

    • n_leaves

    • n_nodes

    • n_pruned_alternate_trees

    • n_switch_alternate_trees

    • split_criterion

      Return a string with the name of the split criterion being used by the tree.

    • summary

      Collect metrics corresponding to the current status of the tree in a string buffer.

    "},{"location":"api/tree/HoeffdingAdaptiveTreeClassifier/#examples","title":"Examples","text":"

    from river.datasets import synth\nfrom river import evaluate\nfrom river import metrics\nfrom river import tree\n\ngen = synth.ConceptDriftStream(stream=synth.SEA(seed=42, variant=0),\n                               drift_stream=synth.SEA(seed=42, variant=1),\n                               seed=1, position=500, width=50)\ndataset = iter(gen.take(1000))\n\nmodel = tree.HoeffdingAdaptiveTreeClassifier(\n    grace_period=100,\n    delta=1e-5,\n    leaf_prediction='nb',\n    nb_threshold=10,\n    seed=0\n)\n\nmetric = metrics.Accuracy()\n\nevaluate.progressive_val_score(dataset, model, metric)\n
    Accuracy: 91.49%\n

    "},{"location":"api/tree/HoeffdingAdaptiveTreeClassifier/#methods","title":"Methods","text":"debug_one

    Print an explanation of how x is predicted.

    Parameters

    • x \u2014 'dict'

    Returns

    str | None: A representation of the path followed by the tree to predict x; None if

    draw

    Draw the tree using the graphviz library.

    Since the tree is drawn without passing incoming samples, classification trees will show the majority class in their leaves, whereas regression trees will use the target mean.

    Parameters

    • max_depth \u2014 'int | None' \u2014 defaults to None The maximum depth a tree can reach. If None, the tree will grow indefinitely.

    learn_one

    Train the model on instance x and corresponding target y.

    Parameters

    • x
    • y
    • sample_weight \u2014 defaults to 1.0

    Returns

    self

    predict_one

    Predict the label of a set of features x.

    Parameters

    • x \u2014 'dict'
    • kwargs

    Returns

    base.typing.ClfTarget | None: The predicted label.

    predict_proba_one

    Predict the probability of each label for a dictionary of features x.

    Parameters

    • x

    Returns

    A dictionary that associates a probability which each label.

    to_dataframe

    Return a representation of the current tree structure organized in a pandas.DataFrame object.

    In case the tree is empty or it only contains a single node (a leaf), None is returned.

    Returns

    df

    "},{"location":"api/tree/HoeffdingAdaptiveTreeClassifier/#notes","title":"Notes","text":"

    The Hoeffding Adaptive Tree 1 uses a drift detector to monitor performance of branches in the tree and to replace them with new branches when their accuracy decreases.

    The bootstrap sampling strategy is an improvement over the original Hoeffding Adaptive Tree algorithm. It is enabled by default since, in general, it results in better performance.

    1. Bifet, Albert, and Ricard Gavald\u00e0. \"Adaptive learning from evolving data streams.\" In International Symposium on Intelligent Data Analysis, pp. 249-260. Springer, Berlin, Heidelberg, 2009.\u00a0\u21a9

    "},{"location":"api/tree/HoeffdingAdaptiveTreeRegressor/","title":"HoeffdingAdaptiveTreeRegressor","text":"

    Hoeffding Adaptive Tree regressor (HATR).

    This class implements a regression version of the Hoeffding Adaptive Tree Classifier. Hence, it also uses an ADWIN concept-drift detector instance at each decision node to monitor possible changes in the data distribution. If a drift is detected in a node, an alternate tree begins to be induced in the background. When enough information is gathered, HATR swaps the node where the change was detected by its alternate tree.

    "},{"location":"api/tree/HoeffdingAdaptiveTreeRegressor/#parameters","title":"Parameters","text":"
    • grace_period

      Type \u2192 int

      Default \u2192 200

      Number of instances a leaf should observe between split attempts.

    • max_depth

      Type \u2192 int | None

      Default \u2192 None

      The maximum depth a tree can reach. If None, the tree will grow indefinitely.

    • delta

      Type \u2192 float

      Default \u2192 1e-07

      Significance level to calculate the Hoeffding bound. The significance level is given by 1 - delta. Values closer to zero imply longer split decision delays.

    • tau

      Type \u2192 float

      Default \u2192 0.05

      Threshold below which a split will be forced to break ties.

    • leaf_prediction

      Type \u2192 str

      Default \u2192 adaptive

      Prediction mechanism used at leafs. - 'mean' - Target mean - 'model' - Uses the model defined in leaf_model - 'adaptive' - Chooses between 'mean' and 'model' dynamically

    • leaf_model

      Type \u2192 base.Regressor | None

      Default \u2192 None

      The regression model used to provide responses if leaf_prediction='model'. If not provided an instance of linear_model.LinearRegression with the default hyperparameters is used.

    • model_selector_decay

      Type \u2192 float

      Default \u2192 0.95

      The exponential decaying factor applied to the learning models' squared errors, that are monitored if leaf_prediction='adaptive'. Must be between 0 and 1. The closer to 1, the more importance is going to be given to past observations. On the other hand, if its value approaches 0, the recent observed errors are going to have more influence on the final decision.

    • nominal_attributes

      Type \u2192 list | None

      Default \u2192 None

      List of Nominal attributes. If empty, then assume that all numeric attributes should be treated as continuous.

    • splitter

      Type \u2192 Splitter | None

      Default \u2192 None

      The Splitter or Attribute Observer (AO) used to monitor the class statistics of numeric features and perform splits. Splitters are available in the tree.splitter module. Different splitters are available for classification and regression tasks. Classification and regression splitters can be distinguished by their property is_target_class. This is an advanced option. Special care must be taken when choosing different splitters. By default, tree.splitter.TEBSTSplitter is used if splitter is None.

    • min_samples_split

      Type \u2192 int

      Default \u2192 5

      The minimum number of samples every branch resulting from a split candidate must have to be considered valid.

    • bootstrap_sampling

      Type \u2192 bool

      Default \u2192 True

      If True, perform bootstrap sampling in the leaf nodes.

    • drift_window_threshold

      Type \u2192 int

      Default \u2192 300

      Minimum number of examples an alternate tree must observe before being considered as a potential replacement to the current one.

    • drift_detector

      Type \u2192 base.DriftDetector | None

      Default \u2192 None

      The drift detector used to build the tree. If None then drift.ADWIN is used. Only detectors that support arbitrarily valued continuous data can be used for regression.

    • switch_significance

      Type \u2192 float

      Default \u2192 0.05

      The significance level to assess whether alternate subtrees are significantly better than their main subtree counterparts.

    • binary_split

      Type \u2192 bool

      Default \u2192 False

      If True, only allow binary splits.

    • max_size

      Type \u2192 float

      Default \u2192 500.0

      The max size of the tree, in Megabytes (MB).

    • memory_estimate_period

      Type \u2192 int

      Default \u2192 1000000

      Interval (number of processed instances) between memory consumption checks.

    • stop_mem_management

      Type \u2192 bool

      Default \u2192 False

      If True, stop growing as soon as memory limit is hit.

    • remove_poor_attrs

      Type \u2192 bool

      Default \u2192 False

      If True, disable poor attributes to reduce memory usage.

    • merit_preprune

      Type \u2192 bool

      Default \u2192 True

      If True, enable merit-based tree pre-pruning.

    • seed

      Type \u2192 int | None

      Default \u2192 None

      Random seed for reproducibility.

    "},{"location":"api/tree/HoeffdingAdaptiveTreeRegressor/#attributes","title":"Attributes","text":"
    • height

    • leaf_prediction

      Return the prediction strategy used by the tree at its leaves.

    • max_size

      Max allowed size tree can reach (in MB).

    • n_active_leaves

    • n_alternate_trees

    • n_branches

    • n_inactive_leaves

    • n_leaves

    • n_nodes

    • n_pruned_alternate_trees

    • n_switch_alternate_trees

    • split_criterion

      Return a string with the name of the split criterion being used by the tree.

    • summary

      Collect metrics corresponding to the current status of the tree in a string buffer.

    "},{"location":"api/tree/HoeffdingAdaptiveTreeRegressor/#examples","title":"Examples","text":"

    from river import datasets\nfrom river import evaluate\nfrom river import metrics\nfrom river import tree\nfrom river import preprocessing\n\ndataset = datasets.TrumpApproval()\n\nmodel = (\n    preprocessing.StandardScaler() |\n    tree.HoeffdingAdaptiveTreeRegressor(\n        grace_period=50,\n        model_selector_decay=0.3,\n        seed=0\n    )\n)\n\nmetric = metrics.MAE()\n\nevaluate.progressive_val_score(dataset, model, metric)\n
    MAE: 0.823026\n

    "},{"location":"api/tree/HoeffdingAdaptiveTreeRegressor/#methods","title":"Methods","text":"debug_one

    Print an explanation of how x is predicted.

    Parameters

    • x \u2014 'dict'

    Returns

    str | None: A representation of the path followed by the tree to predict x; None if

    draw

    Draw the tree using the graphviz library.

    Since the tree is drawn without passing incoming samples, classification trees will show the majority class in their leaves, whereas regression trees will use the target mean.

    Parameters

    • max_depth \u2014 'int | None' \u2014 defaults to None The maximum depth a tree can reach. If None, the tree will grow indefinitely.

    learn_one

    Train the tree model on sample x and corresponding target y.

    Parameters

    • x
    • y
    • sample_weight \u2014 defaults to 1.0

    Returns

    self

    predict_one

    Predict the target value using one of the leaf prediction strategies.

    Parameters

    • x

    Returns

    Predicted target value.

    to_dataframe

    Return a representation of the current tree structure organized in a pandas.DataFrame object.

    In case the tree is empty or it only contains a single node (a leaf), None is returned.

    Returns

    df

    "},{"location":"api/tree/HoeffdingAdaptiveTreeRegressor/#notes","title":"Notes","text":"

    The Hoeffding Adaptive Tree 1 uses drift detectors to monitor performance of branches in the tree and to replace them with new branches when their accuracy decreases.

    The bootstrap sampling strategy is an improvement over the original Hoeffding Adaptive Tree algorithm. It is enabled by default since, in general, it results in better performance.

    To cope with ADWIN's requirements of bounded input data, HATR uses a novel error normalization strategy based on the empiral rule of Gaussian distributions. We assume the deviations of the predictions from the expected values follow a normal distribution. Hence, we subject these errors to a min-max normalization assuming that most of the data lies in the \\(\\left[-3\\sigma, 3\\sigma\\right]\\) range. These normalized errors are passed to the ADWIN instances. This is the same strategy used by Adaptive Random Forest Regressor.

    1. Bifet, Albert, and Ricard Gavald\u00e0. \"Adaptive learning from evolving data streams.\" In International Symposium on Intelligent Data Analysis, pp. 249-260. Springer, Berlin, Heidelberg, 2009.\u00a0\u21a9

    "},{"location":"api/tree/HoeffdingTreeClassifier/","title":"HoeffdingTreeClassifier","text":"

    Hoeffding Tree or Very Fast Decision Tree classifier.

    "},{"location":"api/tree/HoeffdingTreeClassifier/#parameters","title":"Parameters","text":"
    • grace_period

      Type \u2192 int

      Default \u2192 200

      Number of instances a leaf should observe between split attempts.

    • max_depth

      Type \u2192 int | None

      Default \u2192 None

      The maximum depth a tree can reach. If None, the tree will grow indefinitely.

    • split_criterion

      Type \u2192 str

      Default \u2192 info_gain

      Split criterion to use. - 'gini' - Gini - 'info_gain' - Information Gain - 'hellinger' - Helinger Distance

    • delta

      Type \u2192 float

      Default \u2192 1e-07

      Significance level to calculate the Hoeffding bound. The significance level is given by 1 - delta. Values closer to zero imply longer split decision delays.

    • tau

      Type \u2192 float

      Default \u2192 0.05

      Threshold below which a split will be forced to break ties.

    • leaf_prediction

      Type \u2192 str

      Default \u2192 nba

      Prediction mechanism used at leafs. - 'mc' - Majority Class - 'nb' - Naive Bayes - 'nba' - Naive Bayes Adaptive

    • nb_threshold

      Type \u2192 int

      Default \u2192 0

      Number of instances a leaf should observe before allowing Naive Bayes.

    • nominal_attributes

      Type \u2192 list | None

      Default \u2192 None

      List of Nominal attributes identifiers. If empty, then assume that all numeric attributes should be treated as continuous.

    • splitter

      Type \u2192 Splitter | None

      Default \u2192 None

      The Splitter or Attribute Observer (AO) used to monitor the class statistics of numeric features and perform splits. Splitters are available in the tree.splitter module. Different splitters are available for classification and regression tasks. Classification and regression splitters can be distinguished by their property is_target_class. This is an advanced option. Special care must be taken when choosing different splitters. By default, tree.splitter.GaussianSplitter is used if splitter is None.

    • binary_split

      Type \u2192 bool

      Default \u2192 False

      If True, only allow binary splits.

    • min_branch_fraction

      Type \u2192 float

      Default \u2192 0.01

      The minimum percentage of observed data required for branches resulting from split candidates. To validate a split candidate, at least two resulting branches must have a percentage of samples greater than min_branch_fraction. This criterion prevents unnecessary splits when the majority of instances are concentrated in a single branch.

    • max_share_to_split

      Type \u2192 float

      Default \u2192 0.99

      Only perform a split in a leaf if the proportion of elements in the majority class is smaller than this parameter value. This parameter avoids performing splits when most of the data belongs to a single class.

    • max_size

      Type \u2192 float

      Default \u2192 100.0

      The max size of the tree, in Megabytes (MB).

    • memory_estimate_period

      Type \u2192 int

      Default \u2192 1000000

      Interval (number of processed instances) between memory consumption checks.

    • stop_mem_management

      Type \u2192 bool

      Default \u2192 False

      If True, stop growing as soon as memory limit is hit.

    • remove_poor_attrs

      Type \u2192 bool

      Default \u2192 False

      If True, disable poor attributes to reduce memory usage.

    • merit_preprune

      Type \u2192 bool

      Default \u2192 True

      If True, enable merit-based tree pre-pruning.

    "},{"location":"api/tree/HoeffdingTreeClassifier/#attributes","title":"Attributes","text":"
    • height

    • leaf_prediction

      Return the prediction strategy used by the tree at its leaves.

    • max_size

      Max allowed size tree can reach (in MB).

    • n_active_leaves

    • n_branches

    • n_inactive_leaves

    • n_leaves

    • n_nodes

    • split_criterion

      Return a string with the name of the split criterion being used by the tree.

    • summary

      Collect metrics corresponding to the current status of the tree in a string buffer.

    "},{"location":"api/tree/HoeffdingTreeClassifier/#examples","title":"Examples","text":"

    from river.datasets import synth\nfrom river import evaluate\nfrom river import metrics\nfrom river import tree\n\ngen = synth.Agrawal(classification_function=0, seed=42)\ndataset = iter(gen.take(1000))\n\nmodel = tree.HoeffdingTreeClassifier(\n    grace_period=100,\n    delta=1e-5,\n    nominal_attributes=['elevel', 'car', 'zipcode']\n)\n\nmetric = metrics.Accuracy()\n\nevaluate.progressive_val_score(dataset, model, metric)\n
    Accuracy: 84.58%\n

    "},{"location":"api/tree/HoeffdingTreeClassifier/#methods","title":"Methods","text":"debug_one

    Print an explanation of how x is predicted.

    Parameters

    • x \u2014 'dict'

    Returns

    str | None: A representation of the path followed by the tree to predict x; None if

    draw

    Draw the tree using the graphviz library.

    Since the tree is drawn without passing incoming samples, classification trees will show the majority class in their leaves, whereas regression trees will use the target mean.

    Parameters

    • max_depth \u2014 'int | None' \u2014 defaults to None The maximum depth a tree can reach. If None, the tree will grow indefinitely.

    learn_one

    Train the model on instance x and corresponding target y.

    Parameters

    • x
    • y
    • sample_weight \u2014 defaults to 1.0

    Returns

    self

    predict_one

    Predict the label of a set of features x.

    Parameters

    • x \u2014 'dict'
    • kwargs

    Returns

    base.typing.ClfTarget | None: The predicted label.

    predict_proba_one

    Predict the probability of each label for a dictionary of features x.

    Parameters

    • x

    Returns

    A dictionary that associates a probability which each label.

    to_dataframe

    Return a representation of the current tree structure organized in a pandas.DataFrame object.

    In case the tree is empty or it only contains a single node (a leaf), None is returned.

    Returns

    df

    "},{"location":"api/tree/HoeffdingTreeClassifier/#notes","title":"Notes","text":"

    A Hoeffding Tree 1 is an incremental, anytime decision tree induction algorithm that is capable of learning from massive data streams, assuming that the distribution generating examples does not change over time. Hoeffding trees exploit the fact that a small sample can often be enough to choose an optimal splitting attribute. This idea is supported mathematically by the Hoeffding bound, which quantifies the number of observations (in our case, examples) needed to estimate some statistics within a prescribed precision (in our case, the goodness of an attribute).

    A theoretically appealing feature of Hoeffding Trees not shared by other incremental decision tree learners is that it has sound guarantees of performance. Using the Hoeffding bound one can show that its output is asymptotically nearly identical to that of a non-incremental learner using infinitely many examples. Implementation based on MOA 2.

    1. G. Hulten, L. Spencer, and P. Domingos. Mining time-changing data streams. In KDD\u201901, pages 97\u2013106, San Francisco, CA, 2001. ACM Press.\u00a0\u21a9

    2. Albert Bifet, Geoff Holmes, Richard Kirkby, Bernhard Pfahringer. MOA: Massive Online Analysis; Journal of Machine Learning Research 11: 1601-1604, 2010.\u00a0\u21a9

    "},{"location":"api/tree/HoeffdingTreeRegressor/","title":"HoeffdingTreeRegressor","text":"

    Hoeffding Tree regressor.

    "},{"location":"api/tree/HoeffdingTreeRegressor/#parameters","title":"Parameters","text":"
    • grace_period

      Type \u2192 int

      Default \u2192 200

      Number of instances a leaf should observe between split attempts.

    • max_depth

      Type \u2192 int | None

      Default \u2192 None

      The maximum depth a tree can reach. If None, the tree will grow indefinitely.

    • delta

      Type \u2192 float

      Default \u2192 1e-07

      Significance level to calculate the Hoeffding bound. The significance level is given by 1 - delta. Values closer to zero imply longer split decision delays.

    • tau

      Type \u2192 float

      Default \u2192 0.05

      Threshold below which a split will be forced to break ties.

    • leaf_prediction

      Type \u2192 str

      Default \u2192 adaptive

      Prediction mechanism used at leafs. - 'mean' - Target mean - 'model' - Uses the model defined in leaf_model - 'adaptive' - Chooses between 'mean' and 'model' dynamically

    • leaf_model

      Type \u2192 base.Regressor | None

      Default \u2192 None

      The regression model used to provide responses if leaf_prediction='model'. If not provided an instance of linear_model.LinearRegression with the default hyperparameters is used.

    • model_selector_decay

      Type \u2192 float

      Default \u2192 0.95

      The exponential decaying factor applied to the learning models' squared errors, that are monitored if leaf_prediction='adaptive'. Must be between 0 and 1. The closer to 1, the more importance is going to be given to past observations. On the other hand, if its value approaches 0, the recent observed errors are going to have more influence on the final decision.

    • nominal_attributes

      Type \u2192 list | None

      Default \u2192 None

      List of Nominal attributes identifiers. If empty, then assume that all numeric attributes should be treated as continuous.

    • splitter

      Type \u2192 Splitter | None

      Default \u2192 None

      The Splitter or Attribute Observer (AO) used to monitor the class statistics of numeric features and perform splits. Splitters are available in the tree.splitter module. Different splitters are available for classification and regression tasks. Classification and regression splitters can be distinguished by their property is_target_class. This is an advanced option. Special care must be taken when choosing different splitters. By default, tree.splitter.TEBSTSplitter is used if splitter is None.

    • min_samples_split

      Type \u2192 int

      Default \u2192 5

      The minimum number of samples every branch resulting from a split candidate must have to be considered valid.

    • binary_split

      Type \u2192 bool

      Default \u2192 False

      If True, only allow binary splits.

    • max_size

      Type \u2192 float

      Default \u2192 500.0

      The max size of the tree, in Megabytes (MB).

    • memory_estimate_period

      Type \u2192 int

      Default \u2192 1000000

      Interval (number of processed instances) between memory consumption checks.

    • stop_mem_management

      Type \u2192 bool

      Default \u2192 False

      If True, stop growing as soon as memory limit is hit.

    • remove_poor_attrs

      Type \u2192 bool

      Default \u2192 False

      If True, disable poor attributes to reduce memory usage.

    • merit_preprune

      Type \u2192 bool

      Default \u2192 True

      If True, enable merit-based tree pre-pruning.

    "},{"location":"api/tree/HoeffdingTreeRegressor/#attributes","title":"Attributes","text":"
    • height

    • leaf_prediction

      Return the prediction strategy used by the tree at its leaves.

    • max_size

      Max allowed size tree can reach (in MB).

    • n_active_leaves

    • n_branches

    • n_inactive_leaves

    • n_leaves

    • n_nodes

    • split_criterion

      Return a string with the name of the split criterion being used by the tree.

    • summary

      Collect metrics corresponding to the current status of the tree in a string buffer.

    "},{"location":"api/tree/HoeffdingTreeRegressor/#examples","title":"Examples","text":"

    from river import datasets\nfrom river import evaluate\nfrom river import metrics\nfrom river import tree\nfrom river import preprocessing\n\ndataset = datasets.TrumpApproval()\n\nmodel = (\n    preprocessing.StandardScaler() |\n    tree.HoeffdingTreeRegressor(\n        grace_period=100,\n        model_selector_decay=0.9\n    )\n)\n\nmetric = metrics.MAE()\n\nevaluate.progressive_val_score(dataset, model, metric)\n
    MAE: 0.793345\n

    "},{"location":"api/tree/HoeffdingTreeRegressor/#methods","title":"Methods","text":"debug_one

    Print an explanation of how x is predicted.

    Parameters

    • x \u2014 'dict'

    Returns

    str | None: A representation of the path followed by the tree to predict x; None if

    draw

    Draw the tree using the graphviz library.

    Since the tree is drawn without passing incoming samples, classification trees will show the majority class in their leaves, whereas regression trees will use the target mean.

    Parameters

    • max_depth \u2014 'int | None' \u2014 defaults to None The maximum depth a tree can reach. If None, the tree will grow indefinitely.

    learn_one

    Train the tree model on sample x and corresponding target y.

    Parameters

    • x
    • y
    • sample_weight \u2014 defaults to 1.0

    Returns

    self

    predict_one

    Predict the target value using one of the leaf prediction strategies.

    Parameters

    • x

    Returns

    Predicted target value.

    to_dataframe

    Return a representation of the current tree structure organized in a pandas.DataFrame object.

    In case the tree is empty or it only contains a single node (a leaf), None is returned.

    Returns

    df

    "},{"location":"api/tree/HoeffdingTreeRegressor/#notes","title":"Notes","text":"

    The Hoeffding Tree Regressor (HTR) is an adaptation of the incremental tree algorithm of the same name for classification. Similarly to its classification counterpart, HTR uses the Hoeffding bound to control its split decisions. Differently from the classification algorithm, HTR relies on calculating the reduction of variance in the target space to decide among the split candidates. The smallest the variance at its leaf nodes, the more homogeneous the partitions are. At its leaf nodes, HTR fits either linear models or uses the target average as the predictor.

    "},{"location":"api/tree/SGTClassifier/","title":"SGTClassifier","text":"

    Stochastic Gradient Tree1 for binary classification.

    Binary decision tree classifier that minimizes the binary cross-entropy to guide its growth.

    Stochastic Gradient Trees (SGT) directly minimize a loss function to guide tree growth and update their predictions. Thus, they differ from other incrementally tree learners that do not directly optimize the loss, but data impurity-related heuristics.

    "},{"location":"api/tree/SGTClassifier/#parameters","title":"Parameters","text":"
    • delta

      Type \u2192 float

      Default \u2192 1e-07

      Define the significance level of the F-tests performed to decide upon creating splits or updating predictions.

    • grace_period

      Type \u2192 int

      Default \u2192 200

      Interval between split attempts or prediction updates.

    • init_pred

      Type \u2192 float

      Default \u2192 0.0

      Initial value predicted by the tree.

    • max_depth

      Type \u2192 int | None

      Default \u2192 None

      The maximum depth the tree might reach. If set to None, the trees will grow indefinitely.

    • lambda_value

      Type \u2192 float

      Default \u2192 0.1

      Positive float value used to impose a penalty over the tree's predictions and force them to become smaller. The greater the lambda value, the more constrained are the predictions.

    • gamma

      Type \u2192 float

      Default \u2192 1.0

      Positive float value used to impose a penalty over the tree's splits and force them to be avoided when possible. The greater the gamma value, the smaller the chance of a split occurring.

    • nominal_attributes

      Type \u2192 list | None

      Default \u2192 None

      List with identifiers of the nominal attributes. If None, all features containing numbers are assumed to be numeric.

    • feature_quantizer

      Type \u2192 tree.splitter.Quantizer | None

      Default \u2192 None

      The algorithm used to quantize numeric features. Either a static quantizer (as in the original implementation) or a dynamic quantizer can be used. The correct choice and setup of the feature quantizer is a crucial step to determine the performance of SGTs. Feature quantizers are akin to the attribute observers used in Hoeffding Trees. By default, an instance of tree.splitter.StaticQuantizer (with default parameters) is used if this parameter is not set.

    "},{"location":"api/tree/SGTClassifier/#attributes","title":"Attributes","text":"
    • height

    • n_branches

    • n_leaves

    • n_node_updates

    • n_nodes

    • n_observations

    • n_splits

    "},{"location":"api/tree/SGTClassifier/#examples","title":"Examples","text":"

    from river import datasets\nfrom river import evaluate\nfrom river import metrics\nfrom river import tree\n\ndataset = datasets.Phishing()\nmodel = tree.SGTClassifier(\n    feature_quantizer=tree.splitter.StaticQuantizer(\n        n_bins=32, warm_start=10\n    )\n)\nmetric = metrics.Accuracy()\n\nevaluate.progressive_val_score(dataset, model, metric)\n
    Accuracy: 82.24%\n

    "},{"location":"api/tree/SGTClassifier/#methods","title":"Methods","text":"learn_one

    Update the model with a set of features x and a label y.

    Parameters

    • x \u2014 'dict'
    • y \u2014 'base.typing.ClfTarget'
    • w \u2014 defaults to 1.0

    Returns

    Classifier: self

    predict_one

    Predict the label of a set of features x.

    Parameters

    • x \u2014 'dict'
    • kwargs

    Returns

    base.typing.ClfTarget | None: The predicted label.

    predict_proba_one

    Predict the probability of each label for a dictionary of features x.

    Parameters

    • x \u2014 'dict'

    Returns

    dict[base.typing.ClfTarget, float]: A dictionary that associates a probability which each label.

    1. Gouk, H., Pfahringer, B., & Frank, E. (2019, October). Stochastic Gradient Trees. In Asian Conference on Machine Learning (pp. 1094-1109).\u00a0\u21a9

    "},{"location":"api/tree/SGTRegressor/","title":"SGTRegressor","text":"

    Stochastic Gradient Tree for regression.

    Incremental decision tree regressor that minimizes the mean square error to guide its growth.

    Stochastic Gradient Trees (SGT) directly minimize a loss function to guide tree growth and update their predictions. Thus, they differ from other incrementally tree learners that do not directly optimize the loss, but a data impurity-related heuristic.

    "},{"location":"api/tree/SGTRegressor/#parameters","title":"Parameters","text":"
    • delta

      Type \u2192 float

      Default \u2192 1e-07

      Define the significance level of the F-tests performed to decide upon creating splits or updating predictions.

    • grace_period

      Type \u2192 int

      Default \u2192 200

      Interval between split attempts or prediction updates.

    • init_pred

      Type \u2192 float

      Default \u2192 0.0

      Initial value predicted by the tree.

    • max_depth

      Type \u2192 int | None

      Default \u2192 None

      The maximum depth the tree might reach. If set to None, the trees will grow indefinitely.

    • lambda_value

      Type \u2192 float

      Default \u2192 0.1

      Positive float value used to impose a penalty over the tree's predictions and force them to become smaller. The greater the lambda value, the more constrained are the predictions.

    • gamma

      Type \u2192 float

      Default \u2192 1.0

      Positive float value used to impose a penalty over the tree's splits and force them to be avoided when possible. The greater the gamma value, the smaller the chance of a split occurring.

    • nominal_attributes

      Type \u2192 list | None

      Default \u2192 None

      List with identifiers of the nominal attributes. If None, all features containing numbers are assumed to be numeric.

    • feature_quantizer

      Type \u2192 tree.splitter.Quantizer | None

      Default \u2192 None

      The algorithm used to quantize numeric features. Either a static quantizer (as in the original implementation) or a dynamic quantizer can be used. The correct choice and setup of the feature quantizer is a crucial step to determine the performance of SGTs. Feature quantizers are akin to the attribute observers used in Hoeffding Trees. By default, an instance of tree.splitter.StaticQuantizer (with default parameters) is used if this parameter is not set.

    "},{"location":"api/tree/SGTRegressor/#attributes","title":"Attributes","text":"
    • height

    • n_branches

    • n_leaves

    • n_node_updates

    • n_nodes

    • n_observations

    • n_splits

    "},{"location":"api/tree/SGTRegressor/#examples","title":"Examples","text":"

    from river import datasets\nfrom river import evaluate\nfrom river import metrics\nfrom river import tree\n\ndataset = datasets.TrumpApproval()\nmodel = tree.SGTRegressor(\n    delta=0.01,\n    lambda_value=0.01,\n    grace_period=20,\n    feature_quantizer=tree.splitter.DynamicQuantizer(std_prop=0.1)\n)\nmetric = metrics.MAE()\n\nevaluate.progressive_val_score(dataset, model, metric)\n
    MAE: 1.721818\n

    "},{"location":"api/tree/SGTRegressor/#methods","title":"Methods","text":"learn_one

    Fits to a set of features x and a real-valued target y.

    Parameters

    • x \u2014 'dict'
    • y \u2014 'base.typing.RegTarget'
    • w \u2014 defaults to 1.0

    Returns

    Regressor: self

    predict_one

    Predict the output of features x.

    Parameters

    • x \u2014 'dict'

    Returns

    base.typing.RegTarget: The prediction.

    "},{"location":"api/tree/SGTRegressor/#notes","title":"Notes","text":"

    This implementation enhances the original proposal 1 by using an incremental strategy to discretize numerical features dynamically, rather than relying on a calibration set and parameterized number of bins. The strategy used is an adaptation of the Quantization Observer (QO) 2. Different bin size setting policies are available for selection. They directly related to number of split candidates the tree is going to explore, and thus, how accurate its split decisions are going to be. Besides, the number of stored bins per feature is directly related to the tree's memory usage and runtime.

    1. Gouk, H., Pfahringer, B., & Frank, E. (2019, October). Stochastic Gradient Trees. In Asian Conference on Machine Learning (pp. 1094-1109).\u00a0\u21a9

    2. Mastelini, S.M. and de Leon Ferreira, A.C.P., 2021. Using dynamical quantization to perform split attempts in online tree regressors. Pattern Recognition Letters.\u00a0\u21a9

    "},{"location":"api/tree/iSOUPTreeRegressor/","title":"iSOUPTreeRegressor","text":"

    Incremental Structured Output Prediction Tree (iSOUP-Tree) for multi-target regression.

    This is an implementation of the iSOUP-Tree proposed by A. Osojnik, P. Panov, and S. D\u017eeroski 1.

    "},{"location":"api/tree/iSOUPTreeRegressor/#parameters","title":"Parameters","text":"
    • grace_period

      Type \u2192 int

      Default \u2192 200

      Number of instances a leaf should observe between split attempts.

    • max_depth

      Type \u2192 int | None

      Default \u2192 None

      The maximum depth a tree can reach. If None, the tree will grow indefinitely.

    • delta

      Type \u2192 float

      Default \u2192 1e-07

      Allowed error in split decision, a value closer to 0 takes longer to decide.

    • tau

      Type \u2192 float

      Default \u2192 0.05

      Threshold below which a split will be forced to break ties.

    • leaf_prediction

      Type \u2192 str

      Default \u2192 adaptive

      Prediction mechanism used at leafs. - 'mean' - Target mean - 'model' - Uses the model defined in leaf_model - 'adaptive' - Chooses between 'mean' and 'model' dynamically

    • leaf_model

      Type \u2192 base.Regressor | dict | None

      Default \u2192 None

      The regression model(s) used to provide responses if leaf_prediction='model'. It can be either a regressor (in which case it is going to be replicated to all the targets) or a dictionary whose keys are target identifiers, and the values are instances of base.Regressor.If not provided, instances of [linear_model.LinearRegression`](../../linear-model/LinearRegression) with the default hyperparameters are used for all the targets. If a dictionary is passed and not all target models are specified, copies from the first model match in the dictionary will be used to the remaining targets.

    • model_selector_decay

      Type \u2192 float

      Default \u2192 0.95

      The exponential decaying factor applied to the learning models' squared errors, that are monitored if leaf_prediction='adaptive'. Must be between 0 and 1. The closer to 1, the more importance is going to be given to past observations. On the other hand, if its value approaches 0, the recent observed errors are going to have more influence on the final decision.

    • nominal_attributes

      Type \u2192 list | None

      Default \u2192 None

      List of Nominal attributes identifiers. If empty, then assume that all numeric attributes should be treated as continuous.

    • splitter

      Type \u2192 Splitter | None

      Default \u2192 None

      The Splitter or Attribute Observer (AO) used to monitor the class statistics of numeric features and perform splits. Splitters are available in the tree.splitter module. Different splitters are available for classification and regression tasks. Classification and regression splitters can be distinguished by their property is_target_class. This is an advanced option. Special care must be taken when choosing different splitters. By default, tree.splitter.TEBSTSplitter is used if splitter is None.

    • min_samples_split

      Type \u2192 int

      Default \u2192 5

      The minimum number of samples every branch resulting from a split candidate must have to be considered valid.

    • binary_split

      Type \u2192 bool

      Default \u2192 False

      If True, only allow binary splits.

    • max_size

      Type \u2192 float

      Default \u2192 500.0

      The max size of the tree, in Megabytes (MB).

    • memory_estimate_period

      Type \u2192 int

      Default \u2192 1000000

      Interval (number of processed instances) between memory consumption checks.

    • stop_mem_management

      Type \u2192 bool

      Default \u2192 False

      If True, stop growing as soon as memory limit is hit.

    • remove_poor_attrs

      Type \u2192 bool

      Default \u2192 False

      If True, disable poor attributes to reduce memory usage.

    • merit_preprune

      Type \u2192 bool

      Default \u2192 True

      If True, enable merit-based tree pre-pruning.

    "},{"location":"api/tree/iSOUPTreeRegressor/#attributes","title":"Attributes","text":"
    • height

    • leaf_prediction

      Return the prediction strategy used by the tree at its leaves.

    • max_size

      Max allowed size tree can reach (in MB).

    • n_active_leaves

    • n_branches

    • n_inactive_leaves

    • n_leaves

    • n_nodes

    • split_criterion

      Return a string with the name of the split criterion being used by the tree.

    • summary

      Collect metrics corresponding to the current status of the tree in a string buffer.

    "},{"location":"api/tree/iSOUPTreeRegressor/#examples","title":"Examples","text":"

    import numbers\nfrom river import compose\nfrom river import datasets\nfrom river import evaluate\nfrom river import linear_model\nfrom river import metrics\nfrom river import preprocessing\nfrom river import tree\n\ndataset = datasets.SolarFlare()\n\nnum = compose.SelectType(numbers.Number) | preprocessing.MinMaxScaler()\ncat = compose.SelectType(str) | preprocessing.OneHotEncoder()\n\nmodel = tree.iSOUPTreeRegressor(\n    grace_period=100,\n    leaf_prediction='model',\n    leaf_model={\n        'c-class-flares': linear_model.LinearRegression(l2=0.02),\n        'm-class-flares': linear_model.PARegressor(),\n        'x-class-flares': linear_model.LinearRegression(l2=0.1)\n    }\n)\n\npipeline = (num + cat) | model\nmetric = metrics.multioutput.MicroAverage(metrics.MAE())\n\nevaluate.progressive_val_score(dataset, pipeline, metric)\n
    MicroAverage(MAE): 0.426177\n

    "},{"location":"api/tree/iSOUPTreeRegressor/#methods","title":"Methods","text":"debug_one

    Print an explanation of how x is predicted.

    Parameters

    • x \u2014 'dict'

    Returns

    str | None: A representation of the path followed by the tree to predict x; None if

    draw

    Draw the tree using the graphviz library.

    Since the tree is drawn without passing incoming samples, classification trees will show the majority class in their leaves, whereas regression trees will use the target mean.

    Parameters

    • max_depth \u2014 'int | None' \u2014 defaults to None The maximum depth a tree can reach. If None, the tree will grow indefinitely.

    learn_one

    Incrementally train the model with one sample.

    Training tasks: * If the tree is empty, create a leaf node as the root. * If the tree is already initialized, find the corresponding leaf for the instance and update the leaf node statistics. * If growth is allowed and the number of instances that the leaf has observed between split attempts exceed the grace period then attempt to split.

    Parameters

    • x
    • y
    • sample_weight \u2014 'float' \u2014 defaults to 1.0

    predict_one

    Predict the target value using one of the leaf prediction strategies.

    Parameters

    • x

    Returns

    Predicted target value.

    to_dataframe

    Return a representation of the current tree structure organized in a pandas.DataFrame object.

    In case the tree is empty or it only contains a single node (a leaf), None is returned.

    Returns

    df

    1. Alja\u017e Osojnik, Pan\u010de Panov, and Sa\u0161o D\u017eeroski. \"Tree-based methods for online multi-target regression.\" Journal of Intelligent Information Systems 50.2 (2018): 315-339.\u00a0\u21a9

    "},{"location":"api/tree/base/Branch/","title":"Branch","text":"

    A generic tree branch.

    "},{"location":"api/tree/base/Branch/#parameters","title":"Parameters","text":"
    • children

      Child branches and/or leaves.

    "},{"location":"api/tree/base/Branch/#attributes","title":"Attributes","text":"
    • height

      Distance to the deepest descendant.

    • n_branches

      Number of branches, including thyself.

    • n_leaves

      Number of leaves.

    • n_nodes

      Number of descendants, including thyself.

    • repr_split

      String representation of the split.

    "},{"location":"api/tree/base/Branch/#methods","title":"Methods","text":"iter_bfs

    Iterate over nodes in breadth-first order.

    iter_branches

    Iterate over branches in depth-first order.

    iter_dfs

    Iterate over nodes in depth-first order.

    iter_edges

    Iterate over edges in depth-first order.

    iter_leaves

    Iterate over leaves from the left-most one to the right-most one.

    most_common_path

    Return a tuple with the branch index and the child node related to the most traversed path.

    Used in case the split feature is missing from an instance.

    next

    Move to the next node down the tree.

    Parameters

    • x

    to_dataframe

    Build a DataFrame containing one record for each node.

    traverse

    Return the leaf corresponding to the given input.

    Parameters

    • x
    • until_leaf \u2014 defaults to True

    walk

    Iterate over the nodes of the path induced by x.

    Parameters

    • x
    • until_leaf \u2014 defaults to True

    "},{"location":"api/tree/base/Leaf/","title":"Leaf","text":"

    A generic tree node.

    "},{"location":"api/tree/base/Leaf/#parameters","title":"Parameters","text":"
    • kwargs

      Each provided keyword argument is stored in the leaf as an attribute.

    "},{"location":"api/tree/base/Leaf/#attributes","title":"Attributes","text":"
    • height

    • n_branches

    • n_leaves

    • n_nodes

    "},{"location":"api/tree/base/Leaf/#methods","title":"Methods","text":"iter_branches iter_dfs iter_edges iter_leaves walk"},{"location":"api/tree/splitter/DynamicQuantizer/","title":"DynamicQuantizer","text":"

    Adapted version of the Quantizer Observer (QO)1 that is applied to Stochastic Gradient Trees (SGT).

    This feature quantizer starts by partitioning the inputs using the passed radius value. As more splits are created in the SGTs, new feature quantizers will use std * std_prop as the quantization radius. In the expression, std represents the standard deviation of the input data, which is calculated incrementally.

    "},{"location":"api/tree/splitter/DynamicQuantizer/#parameters","title":"Parameters","text":"
    • radius

      Type \u2192 float

      Default \u2192 0.5

      The initial quantization radius.

    • std_prop

      Type \u2192 float

      Default \u2192 0.25

      The proportion of the standard deviation that is going to be used to define the radius value for new quantizer instances following the initial one.

    "},{"location":"api/tree/splitter/DynamicQuantizer/#methods","title":"Methods","text":"update
    1. Mastelini, S.M. and de Leon Ferreira, A.C.P., 2021. Using dynamical quantization to perform split attempts in online tree regressors. Pattern Recognition Letters.\u00a0\u21a9

    "},{"location":"api/tree/splitter/EBSTSplitter/","title":"EBSTSplitter","text":"

    iSOUP-Tree's Extended Binary Search Tree (E-BST).

    This class implements the Extended Binary Search Tree1 (E-BST) structure, using the variant employed by Osojnik et al.2 in the iSOUP-Tree algorithm. This structure is employed to observe the target space distribution.

    Proposed along with Fast Incremental Model Tree with Drift Detection1 (FIMT-DD), E-BST was the first attribute observer (AO) proposed for incremental Hoeffding Tree regressors. This AO works by storing all observations between splits in an extended binary search tree structure. E-BST stores the input feature realizations and statistics of the target(s) that enable calculating the split heuristic at any time. To alleviate time and memory costs, E-BST implements a memory management routine, where the worst split candidates are pruned from the binary tree.

    In this variant, only the left branch statistics are stored and the complete split-enabling statistics are calculated with an in-order traversal of the binary search tree.

    "},{"location":"api/tree/splitter/EBSTSplitter/#attributes","title":"Attributes","text":"
    • is_numeric

      Determine whether or not the splitter works with numerical features.

    • is_target_class

      Check on which kind of learning task the splitter is designed to work. If True, the splitter works with classification trees, otherwise it is designed for regression trees.

    "},{"location":"api/tree/splitter/EBSTSplitter/#methods","title":"Methods","text":"best_evaluated_split_suggestion

    Get the best split suggestion given a criterion and the target's statistics.

    Parameters

    • criterion \u2014 'SplitCriterion'
    • pre_split_dist \u2014 'list | dict'
    • att_idx \u2014 'base.typing.FeatureName'
    • binary_only \u2014 'bool' \u2014 defaults to True

    Returns

    BranchFactory: Suggestion of the best attribute split.

    cond_proba

    Not implemented in regression splitters.

    Parameters

    • att_val
    • target_val \u2014 'base.typing.ClfTarget'

    remove_bad_splits

    Remove bad splits.

    Based on FIMT-DD's 1 procedure to remove bad split candidates from the E-BST. This mechanism is triggered every time a split attempt fails. The rationale is to remove points whose split merit is much worse than the best candidate overall (for which the growth decision already failed). Let \\(m_1\\) be the merit of the best split point and \\(m_2\\) be the merit of the second best split candidate. The ratio \\(r = m_2/m_1\\) along with the Hoeffding bound (\\(\\epsilon\\)) are used to decide upon creating a split. A split occurs when \\(r < 1 - \\epsilon\\). A split candidate, with merit \\(m_i\\), is considered badr if \\(m_i / m_1 < r - 2\\epsilon\\). The rationale is the following: if the merit ratio for this point is smaller than the lower bound of \\(r\\), then the true merit of that split relative to the best one is small. Hence, this candidate can be safely removed. To avoid excessive and costly manipulations of the E-BST to update the stored statistics, only the nodes whose children are all bad split points are pruned, as defined in 1.

    Parameters

    • criterion
    • last_check_ratio \u2014 'float'
    • last_check_vr \u2014 'float'
    • last_check_e \u2014 'float'
    • pre_split_dist \u2014 'list | dict'

    update

    Update statistics of this observer given an attribute value, its target value and the weight of the instance observed.

    Parameters

    • att_val
    • target_val \u2014 'base.typing.Target'
    • sample_weight \u2014 'float'

    1. Ikonomovska, E., Gama, J., & D\u017eeroski, S. (2011). Learning model trees from evolving data streams. Data mining and knowledge discovery, 23(1), 128-168.\u00a0\u21a9\u21a9\u21a9\u21a9

    2. Osojnik, Alja\u017e. 2017. Structured output prediction on Data Streams (Doctoral Dissertation) \u21a9

    "},{"location":"api/tree/splitter/ExhaustiveSplitter/","title":"ExhaustiveSplitter","text":"

    Numeric attribute observer for classification tasks that is based on a Binary Search Tree.

    This algorithm1 is also referred to as exhaustive attribute observer, since it ends up storing all the observations between split attempts2.

    This splitter cannot perform probability density estimations, so it does not work well when coupled with tree leaves using naive bayes models.

    "},{"location":"api/tree/splitter/ExhaustiveSplitter/#attributes","title":"Attributes","text":"
    • is_numeric

      Determine whether or not the splitter works with numerical features.

    • is_target_class

      Check on which kind of learning task the splitter is designed to work. If True, the splitter works with classification trees, otherwise it is designed for regression trees.

    "},{"location":"api/tree/splitter/ExhaustiveSplitter/#methods","title":"Methods","text":"best_evaluated_split_suggestion

    Get the best split suggestion given a criterion and the target's statistics.

    Parameters

    • criterion \u2014 'SplitCriterion'
    • pre_split_dist \u2014 'list | dict'
    • att_idx \u2014 'base.typing.FeatureName'
    • binary_only \u2014 'bool'

    Returns

    BranchFactory: Suggestion of the best attribute split.

    cond_proba

    The underlying data structure used to monitor the input does not allow probability density estimations. Hence, it always returns zero for any given input.

    Parameters

    • att_val
    • target_val \u2014 'base.typing.ClfTarget'

    update

    Update statistics of this observer given an attribute value, its target value and the weight of the instance observed.

    Parameters

    • att_val
    • target_val \u2014 'base.typing.Target'
    • sample_weight \u2014 'float'

    1. Domingos, P. and Hulten, G., 2000, August. Mining high-speed data streams. In Proceedings of the sixth ACM SIGKDD international conference on Knowledge discovery and data mining (pp. 71-80).\u00a0\u21a9

    2. Pfahringer, B., Holmes, G. and Kirkby, R., 2008, May. Handling numeric attributes in hoeffding trees. In Pacific-Asia Conference on Knowledge Discovery and Data Mining (pp. 296-307). Springer, Berlin, Heidelberg.\u00a0\u21a9

    "},{"location":"api/tree/splitter/GaussianSplitter/","title":"GaussianSplitter","text":"

    Numeric attribute observer for classification tasks that is based on Gaussian estimators.

    The distribution of each class is approximated using a Gaussian distribution. Hence, the probability density function can be easily calculated.

    "},{"location":"api/tree/splitter/GaussianSplitter/#parameters","title":"Parameters","text":"
    • n_splits

      Type \u2192 int

      Default \u2192 10

      The number of partitions to consider when querying for split candidates.

    "},{"location":"api/tree/splitter/GaussianSplitter/#attributes","title":"Attributes","text":"
    • is_numeric

      Determine whether or not the splitter works with numerical features.

    • is_target_class

      Check on which kind of learning task the splitter is designed to work. If True, the splitter works with classification trees, otherwise it is designed for regression trees.

    "},{"location":"api/tree/splitter/GaussianSplitter/#methods","title":"Methods","text":"best_evaluated_split_suggestion

    Get the best split suggestion given a criterion and the target's statistics.

    Parameters

    • criterion \u2014 'SplitCriterion'
    • pre_split_dist \u2014 'list | dict'
    • att_idx \u2014 'base.typing.FeatureName'
    • binary_only \u2014 'bool'

    Returns

    BranchFactory: Suggestion of the best attribute split.

    cond_proba

    Get the probability for an attribute value given a class.

    Parameters

    • att_val
    • target_val \u2014 'base.typing.ClfTarget'

    Returns

    float: Probability for an attribute value given a class.

    update

    Update statistics of this observer given an attribute value, its target value and the weight of the instance observed.

    Parameters

    • att_val
    • target_val \u2014 'base.typing.Target'
    • sample_weight \u2014 'float'

    "},{"location":"api/tree/splitter/HistogramSplitter/","title":"HistogramSplitter","text":"

    Numeric attribute observer for classification tasks that discretizes features using histograms.

    "},{"location":"api/tree/splitter/HistogramSplitter/#parameters","title":"Parameters","text":"
    • n_bins

      Type \u2192 int

      Default \u2192 256

      The maximum number of bins in the histogram.

    • n_splits

      Type \u2192 int

      Default \u2192 32

      The number of split points to evaluate when querying for the best split candidate.

    "},{"location":"api/tree/splitter/HistogramSplitter/#attributes","title":"Attributes","text":"
    • is_numeric

      Determine whether or not the splitter works with numerical features.

    • is_target_class

      Check on which kind of learning task the splitter is designed to work. If True, the splitter works with classification trees, otherwise it is designed for regression trees.

    "},{"location":"api/tree/splitter/HistogramSplitter/#methods","title":"Methods","text":"best_evaluated_split_suggestion

    Get the best split suggestion given a criterion and the target's statistics.

    Parameters

    • criterion \u2014 'SplitCriterion'
    • pre_split_dist \u2014 'list | dict'
    • att_idx \u2014 'base.typing.FeatureName'
    • binary_only \u2014 'bool'

    Returns

    BranchFactory: Suggestion of the best attribute split.

    cond_proba

    Get the probability for an attribute value given a class.

    Parameters

    • att_val
    • target_val \u2014 'base.typing.ClfTarget'

    Returns

    float: Probability for an attribute value given a class.

    update

    Update statistics of this observer given an attribute value, its target value and the weight of the instance observed.

    Parameters

    • att_val
    • target_val \u2014 'base.typing.Target'
    • sample_weight \u2014 'float'

    "},{"location":"api/tree/splitter/QOSplitter/","title":"QOSplitter","text":"

    Quantization observer (QO).

    This splitter utilizes a hash-based quantization algorithm to keep track of the target statistics and evaluate split candidates. QO, relies on the radius parameter to define discretization intervals for each incoming feature. Split candidates are defined as the midpoints between two consecutive hash slots. Both binary splits and multi-way splits can be created by this attribute observer. This class implements the algorithm described in 1.

    The smaller the quantization radius, the more hash slots will be created to accommodate the discretized data. Hence, both the running time and memory consumption increase, but the resulting splits ought to be closer to the ones obtained by a batch exhaustive approach. On the other hand, if the radius is too large, fewer slots will be created, less memory and running time will be required, but at the cost of coarse split suggestions.

    QO assumes that all features have the same range. It is always advised to scale the features to apply this splitter. That can be done using the preprocessing module. A good \"rule of thumb\" is to scale data using preprocessing.StandardScaler and define the radius as a proportion of the features' standard deviation. For instance, the default radius value would correspond to one quarter of the normalized features' standard deviation (since the scaled data has zero mean and unit variance). If the features come from normal distributions, by following the empirical rule, roughly 32 hash slots will be created.

    "},{"location":"api/tree/splitter/QOSplitter/#parameters","title":"Parameters","text":"
    • radius

      Type \u2192 float

      Default \u2192 0.25

      The quantization radius. QO discretizes the incoming feature in intervals of equal length that are defined by this parameter.

    • allow_multiway_splits

      Default \u2192 False

      Whether or not allow that multiway splits are evaluated. Numeric multi-way splits use the same quantization strategy of QO to create multiple tree branches. The same quantization radius is used, and each stored slot represents the split enabling statistics of one branch.

    "},{"location":"api/tree/splitter/QOSplitter/#attributes","title":"Attributes","text":"
    • is_numeric

      Determine whether or not the splitter works with numerical features.

    • is_target_class

      Check on which kind of learning task the splitter is designed to work. If True, the splitter works with classification trees, otherwise it is designed for regression trees.

    "},{"location":"api/tree/splitter/QOSplitter/#methods","title":"Methods","text":"best_evaluated_split_suggestion

    Get the best split suggestion given a criterion and the target's statistics.

    Parameters

    • criterion \u2014 'SplitCriterion'
    • pre_split_dist \u2014 'list | dict'
    • att_idx \u2014 'base.typing.FeatureName'
    • binary_only \u2014 'bool' \u2014 defaults to True

    Returns

    BranchFactory: Suggestion of the best attribute split.

    cond_proba

    Get the probability for an attribute value given a class.

    Parameters

    • att_val
    • target_val \u2014 'base.typing.ClfTarget'

    Returns

    float: Probability for an attribute value given a class.

    update

    Update statistics of this observer given an attribute value, its target value and the weight of the instance observed.

    Parameters

    • att_val
    • target_val \u2014 'base.typing.Target'
    • sample_weight \u2014 'float'

    1. Mastelini, S.M. and de Leon Ferreira, A.C.P., 2021. Using dynamical quantization to perform split attempts in online tree regressors. Pattern Recognition Letters.\u00a0\u21a9

    "},{"location":"api/tree/splitter/Quantizer/","title":"Quantizer","text":"

    Base class for the feature quantizers used in Stochastic Gradient Trees1.

    "},{"location":"api/tree/splitter/Quantizer/#methods","title":"Methods","text":"update
    1. Gouk, H., Pfahringer, B., & Frank, E. (2019, October). Stochastic Gradient Trees. In Asian Conference on Machine Learning (pp. 1094-1109).\u00a0\u21a9

    "},{"location":"api/tree/splitter/Splitter/","title":"Splitter","text":"

    Base class for the tree splitters.

    Each Attribute Observer (AO) or Splitter monitors one input feature and finds the best split point for this attribute. AOs can also perform other tasks related to the monitored feature, such as estimating its probability density function (classification case).

    This class should not be instantiated, as none of its methods are implemented.

    "},{"location":"api/tree/splitter/Splitter/#attributes","title":"Attributes","text":"
    • is_numeric

      Determine whether or not the splitter works with numerical features.

    • is_target_class

      Check on which kind of learning task the splitter is designed to work. If True, the splitter works with classification trees, otherwise it is designed for regression trees.

    "},{"location":"api/tree/splitter/Splitter/#methods","title":"Methods","text":"best_evaluated_split_suggestion

    Get the best split suggestion given a criterion and the target's statistics.

    Parameters

    • criterion \u2014 'SplitCriterion'
    • pre_split_dist \u2014 'list | dict'
    • att_idx \u2014 'base.typing.FeatureName'
    • binary_only \u2014 'bool'

    Returns

    BranchFactory: Suggestion of the best attribute split.

    cond_proba

    Get the probability for an attribute value given a class.

    Parameters

    • att_val
    • target_val \u2014 'base.typing.ClfTarget'

    Returns

    float: Probability for an attribute value given a class.

    update

    Update statistics of this observer given an attribute value, its target value and the weight of the instance observed.

    Parameters

    • att_val
    • target_val \u2014 'base.typing.Target'
    • sample_weight \u2014 'float'

    "},{"location":"api/tree/splitter/StaticQuantizer/","title":"StaticQuantizer","text":"

    Quantization strategy originally used in Stochastic Gradient Trees (SGT)1.

    Firstly, a buffer of size warm_start is stored. The data stored in the buffer is then used to quantize the input feature into n_bins intervals. These intervals will be replicated to every new quantizer. Feature values lying outside of the limits defined by the initial buffer will be mapped to the head or tail of the list of intervals.

    "},{"location":"api/tree/splitter/StaticQuantizer/#parameters","title":"Parameters","text":"
    • n_bins

      Type \u2192 int

      Default \u2192 64

      The number of bins (intervals) to divide the input feature.

    • warm_start

      Type \u2192 int

      Default \u2192 100

      The number of observations used to initialize the quantization intervals.

    • buckets

      Type \u2192 list | None

      Default \u2192 None

      This parameter is only used internally by the quantizer, so it must not be set. Once the intervals are defined, new instances of this quantizer will receive the quantization information via this parameter.

    "},{"location":"api/tree/splitter/StaticQuantizer/#methods","title":"Methods","text":"update
    1. Gouk, H., Pfahringer, B., & Frank, E. (2019, October). Stochastic Gradient Trees. In Asian Conference on Machine Learning (pp. 1094-1109).\u00a0\u21a9

    "},{"location":"api/tree/splitter/TEBSTSplitter/","title":"TEBSTSplitter","text":"

    Truncated E-BST.

    Variation of E-BST that rounds the incoming feature values before passing them to the binary search tree (BST). By doing so, the attribute observer might reduce its processing time and memory usage since small variations in the input values will end up being mapped to the same BST node.

    "},{"location":"api/tree/splitter/TEBSTSplitter/#parameters","title":"Parameters","text":"
    • digits

      Type \u2192 int

      Default \u2192 1

      The number of decimal places used to round the input feature values.

    "},{"location":"api/tree/splitter/TEBSTSplitter/#attributes","title":"Attributes","text":"
    • is_numeric

      Determine whether or not the splitter works with numerical features.

    • is_target_class

      Check on which kind of learning task the splitter is designed to work. If True, the splitter works with classification trees, otherwise it is designed for regression trees.

    "},{"location":"api/tree/splitter/TEBSTSplitter/#methods","title":"Methods","text":"best_evaluated_split_suggestion

    Get the best split suggestion given a criterion and the target's statistics.

    Parameters

    • criterion \u2014 'SplitCriterion'
    • pre_split_dist \u2014 'list | dict'
    • att_idx \u2014 'base.typing.FeatureName'
    • binary_only \u2014 'bool' \u2014 defaults to True

    Returns

    BranchFactory: Suggestion of the best attribute split.

    cond_proba

    Not implemented in regression splitters.

    Parameters

    • att_val
    • target_val \u2014 'base.typing.ClfTarget'

    remove_bad_splits

    Remove bad splits.

    Based on FIMT-DD's [^1] procedure to remove bad split candidates from the E-BST. This mechanism is triggered every time a split attempt fails. The rationale is to remove points whose split merit is much worse than the best candidate overall (for which the growth decision already failed). Let \\(m_1\\) be the merit of the best split point and \\(m_2\\) be the merit of the second best split candidate. The ratio \\(r = m_2/m_1\\) along with the Hoeffding bound (\\(\\epsilon\\)) are used to decide upon creating a split. A split occurs when \\(r < 1 - \\epsilon\\). A split candidate, with merit \\(m_i\\), is considered badr if \\(m_i / m_1 < r - 2\\epsilon\\). The rationale is the following: if the merit ratio for this point is smaller than the lower bound of \\(r\\), then the true merit of that split relative to the best one is small. Hence, this candidate can be safely removed. To avoid excessive and costly manipulations of the E-BST to update the stored statistics, only the nodes whose children are all bad split points are pruned, as defined in [^1].

    Parameters

    • criterion
    • last_check_ratio \u2014 'float'
    • last_check_vr \u2014 'float'
    • last_check_e \u2014 'float'
    • pre_split_dist \u2014 'list | dict'

    update

    Update statistics of this observer given an attribute value, its target value and the weight of the instance observed.

    Parameters

    • att_val
    • target_val \u2014 'base.typing.Target'
    • sample_weight \u2014 'float'

    "},{"location":"api/utils/Rolling/","title":"Rolling","text":"

    A generic wrapper for performing rolling computations.

    This can be wrapped around any object which implements both an update and a revert method. Inputs to update are stored in a queue. Elements of the queue are popped when the window is full.

    "},{"location":"api/utils/Rolling/#parameters","title":"Parameters","text":"
    • obj

      Type \u2192 Rollable

      An object that implements both an update method and a rollingmethod.

    • window_size

      Type \u2192 int

      Size of the window.

    "},{"location":"api/utils/Rolling/#attributes","title":"Attributes","text":"
    • window_size
    "},{"location":"api/utils/Rolling/#examples","title":"Examples","text":"

    For instance, here is how you can compute a rolling average over a window of size 3:

    from river import stats, utils\n\nX = [1, 3, 5, 7]\nrmean = utils.Rolling(stats.Mean(), window_size=3)\n\nfor x in X:\n    print(rmean.update(x).get())\n
    1.0\n2.0\n3.0\n5.0\n

    "},{"location":"api/utils/Rolling/#methods","title":"Methods","text":"update"},{"location":"api/utils/SortedWindow/","title":"SortedWindow","text":"

    Sorted running window data structure.

    "},{"location":"api/utils/SortedWindow/#parameters","title":"Parameters","text":"
    • size

      Type \u2192 int

      Size of the window to compute the rolling quantile.

    "},{"location":"api/utils/SortedWindow/#attributes","title":"Attributes","text":"
    • size
    "},{"location":"api/utils/SortedWindow/#examples","title":"Examples","text":"

    from river import utils\n\nwindow = utils.SortedWindow(size=3)\n\nfor i in reversed(range(9)):\n    print(window.append(i))\n
    [8]\n[7, 8]\n[6, 7, 8]\n[5, 6, 7]\n[4, 5, 6]\n[3, 4, 5]\n[2, 3, 4]\n[1, 2, 3]\n[0, 1, 2]\n

    "},{"location":"api/utils/SortedWindow/#methods","title":"Methods","text":"
    1. Left sorted inserts in Python \u21a9

    "},{"location":"api/utils/TimeRolling/","title":"TimeRolling","text":"

    A generic wrapper for performing time rolling computations.

    This can be wrapped around any object which implements both an update and a revert method. Inputs to update are stored in a queue. Elements of the queue are popped when they are too old.

    "},{"location":"api/utils/TimeRolling/#parameters","title":"Parameters","text":"
    • obj

      Type \u2192 Rollable

      An object that implements both an update method and a rollingmethod.

    • period

      Type \u2192 dt.timedelta

      A duration of time, expressed as a datetime.timedelta.

    "},{"location":"api/utils/TimeRolling/#examples","title":"Examples","text":"

    For instance, here is how you can compute a rolling average over a period of 3 days:

    from river import stats, utils\n\nX = {\n    dt.datetime(2019, 1, 1): 1,\n    dt.datetime(2019, 1, 2): 5,\n    dt.datetime(2019, 1, 3): 9,\n    dt.datetime(2019, 1, 4): 13\n}\n\nrmean = utils.TimeRolling(stats.Mean(), period=dt.timedelta(days=3))\nfor t, x in X.items():\n    print(rmean.update(x, t=t).get())\n
    1.0\n3.0\n5.0\n9.0\n

    "},{"location":"api/utils/TimeRolling/#methods","title":"Methods","text":"update"},{"location":"api/utils/VectorDict/","title":"VectorDict","text":""},{"location":"api/utils/VectorDict/#methods","title":"Methods","text":"abs clear get

    Parameters

    • key
    • args
    • kwargs

    items

    keys

    max

    maximum

    Parameters

    • other

    min

    minimum

    Parameters

    • other

    pop

    Parameters

    • args
    • kwargs

    popitem

    setdefault

    Parameters

    • key
    • args
    • kwargs

    to_dict

    to_numpy

    Parameters

    • fields

    update

    Parameters

    • args
    • kwargs

    values

    with_mask

    Parameters

    • mask
    • copy \u2014 defaults to False

    "},{"location":"api/utils/dict2numpy/","title":"dict2numpy","text":"

    Convert a dictionary containing data to a numpy array.

    There is not restriction to the type of keys in data, but values must be strictly numeric. To make sure random permutations of the features do not impact on the learning algorithms, keys are first converted to strings and then sorted prior to the conversion.

    "},{"location":"api/utils/dict2numpy/#parameters","title":"Parameters","text":"
    • data

      A dictionary whose keys represent input attributes and the values represent their observed contents.

    "},{"location":"api/utils/dict2numpy/#examples","title":"Examples","text":"

    from river.utils import dict2numpy\ndict2numpy({'a': 1, 'b': 2, 3: 3})\n
    array([3, 1, 2])\n

    "},{"location":"api/utils/expand-param-grid/","title":"expand_param_grid","text":"

    Expands a grid of parameters.

    This method can be used to generate a list of model parametrizations from a dictionary where each parameter is associated with a list of possible parameters. In other words, it expands a grid of parameters.

    Typically, this method can be used to create copies of a given model with different parameter choices. The models can then be used as part of a model selection process, such as a selection.SuccessiveHalvingClassifier or a selection.EWARegressor.

    The syntax for the parameter grid is quite flexible. It allows nesting parameters and can therefore be used to generate parameters for a pipeline.

    "},{"location":"api/utils/expand-param-grid/#parameters","title":"Parameters","text":"
    • model

      Type \u2192 base.Estimator

    • grid

      Type \u2192 dict

      The grid of parameters to expand. The provided dictionary can be nested. The only requirement is that the values at the leaves need to be lists.

    "},{"location":"api/utils/expand-param-grid/#examples","title":"Examples","text":"

    As an initial example, we can expand a grid of parameters for a single model.

    from river import linear_model\nfrom river import optim\nfrom river import utils\n\nmodel = linear_model.LinearRegression()\n\ngrid = {'optimizer': [optim.SGD(.1), optim.SGD(.01), optim.SGD(.001)]}\nmodels = utils.expand_param_grid(model, grid)\nlen(models)\n
    3\n

    models[0]\n
    LinearRegression (\n  optimizer=SGD (\n    lr=Constant (\n      learning_rate=0.1\n    )\n  )\n  loss=Squared ()\n  l2=0.\n  l1=0.\n  intercept_init=0.\n  intercept_lr=Constant (\n    learning_rate=0.01\n  )\n  clip_gradient=1e+12\n  initializer=Zeros ()\n)\n

    You can expand parameters for multiple choices like so:

    grid = {\n    'optimizer': [\n        (optim.SGD, {'lr': [.1, .01, .001]}),\n        (optim.Adam, {'lr': [.1, .01, .01]})\n    ]\n}\nmodels = utils.expand_param_grid(model, grid)\nlen(models)\n
    6\n

    You may specify a grid of parameters for a pipeline via nesting:

    from river import feature_extraction\n\nmodel = (\n    feature_extraction.BagOfWords() |\n    linear_model.LinearRegression()\n)\n\ngrid = {\n    'BagOfWords': {\n        'strip_accents': [False, True]\n    },\n    'LinearRegression': {\n        'optimizer': [\n            (optim.SGD, {'lr': [.1, .01]}),\n            (optim.Adam, {'lr': [.1, .01]})\n        ]\n    }\n}\n\nmodels = utils.expand_param_grid(model, grid)\nlen(models)\n
    8\n

    "},{"location":"api/utils/log-method-calls/","title":"log_method_calls","text":"

    A context manager to log method calls.

    All method calls will be logged by default. This behavior can be overriden by passing filtering functions.

    "},{"location":"api/utils/log-method-calls/#parameters","title":"Parameters","text":"
    • class_condition

      Type \u2192 typing.Callable[[typing.Any], bool] | None

      Default \u2192 None

      A function which determines if a class should be logged or not.

    • method_condition

      Type \u2192 typing.Callable[[typing.Any], bool] | None

      Default \u2192 None

      A function which determines if a method should be logged or not.

    "},{"location":"api/utils/log-method-calls/#examples","title":"Examples","text":"

    import io\nimport logging\nfrom river import anomaly\nfrom river import compose\nfrom river import datasets\nfrom river import preprocessing\nfrom river import utils\n\nmodel = compose.Pipeline(\n    preprocessing.MinMaxScaler(),\n    anomaly.HalfSpaceTrees(seed=42)\n)\n\nclass_condition = lambda x: x.__class__.__name__ in ('MinMaxScaler', 'HalfSpaceTrees')\n\nlogger = logging.getLogger()\nlogger.setLevel(logging.DEBUG)\n\nlogs = io.StringIO()\nsh = logging.StreamHandler(logs)\nsh.setLevel(logging.DEBUG)\nlogger.addHandler(sh)\n\nwith utils.log_method_calls(class_condition):\n    for x, y in datasets.CreditCard().take(1):\n        score = model.score_one(x)\n        model = model.learn_one(x)\n\nprint(logs.getvalue())\n
    MinMaxScaler.transform_one\nHalfSpaceTrees.score_one\nMinMaxScaler.learn_one\nMinMaxScaler.transform_one\nHalfSpaceTrees.learn_one\n

    logs.close()\n
    "},{"location":"api/utils/numpy2dict/","title":"numpy2dict","text":"

    Convert a numpy array to a dictionary.

    "},{"location":"api/utils/numpy2dict/#parameters","title":"Parameters","text":"
    • data

      Type \u2192 np.ndarray

      An one-dimensional numpy.array.

    "},{"location":"api/utils/numpy2dict/#examples","title":"Examples","text":"

    import numpy as np\nfrom river.utils import numpy2dict\nnumpy2dict(np.array([1.0, 2.0, 3.0]))\n
    {0: 1.0, 1: 2.0, 2: 3.0}\n

    "},{"location":"api/utils/math/argmax/","title":"argmax","text":"

    Argmax function.

    "},{"location":"api/utils/math/argmax/#parameters","title":"Parameters","text":"
    • lst

      Type \u2192 list

    "},{"location":"api/utils/math/chain-dot/","title":"chain_dot","text":"

    Returns the dot product of multiple vectors represented as dicts.

    "},{"location":"api/utils/math/chain-dot/#parameters","title":"Parameters","text":"
    • xs
    "},{"location":"api/utils/math/chain-dot/#examples","title":"Examples","text":"

    from river import utils\n\nx = {'x0': 1, 'x1': 2, 'x2': 1}\ny = {'x1': 21, 'x2': 3}\nz = {'x1': 2, 'x2': 1 / 3}\n\nutils.math.chain_dot(x, y, z)\n
    85.0\n

    "},{"location":"api/utils/math/clamp/","title":"clamp","text":"

    Clamp a number.

    This is a synonym of clipping.

    "},{"location":"api/utils/math/clamp/#parameters","title":"Parameters","text":"
    • x

      Type \u2192 float

    • minimum

      Default \u2192 0.0

    • maximum

      Default \u2192 1.0

    "},{"location":"api/utils/math/dot/","title":"dot","text":"

    Returns the dot product of two vectors represented as dicts.

    "},{"location":"api/utils/math/dot/#parameters","title":"Parameters","text":"
    • x

      Type \u2192 dict

    • y

      Type \u2192 dict

    "},{"location":"api/utils/math/dot/#examples","title":"Examples","text":"

    from river import utils\n\nx = {'x0': 1, 'x1': 2}\ny = {'x1': 21, 'x2': 3}\n\nutils.math.dot(x, y)\n
    42\n

    "},{"location":"api/utils/math/dotvecmat/","title":"dotvecmat","text":"

    Vector times matrix from left side, i.e. transpose(x)A.

    "},{"location":"api/utils/math/dotvecmat/#parameters","title":"Parameters","text":"
    • x

    • A

    "},{"location":"api/utils/math/dotvecmat/#examples","title":"Examples","text":"

    from river import utils\n\nx = {0: 4, 1: 5}\n\nA = {\n    (0, 0): 0, (0, 1): 1,\n    (1, 0): 2, (1, 1): 3\n}\n\nC = utils.math.dotvecmat(x, A)\nprint(C)\n
    {0: 10.0, 1: 19.0}\n

    "},{"location":"api/utils/math/log-sum-2-exp/","title":"log_sum_2_exp","text":"

    Computation of log( (e^a + e^b) / 2) in an overflow-proof way

    "},{"location":"api/utils/math/log-sum-2-exp/#parameters","title":"Parameters","text":"
    • a

      Type \u2192 float

      First number

    • b

      Type \u2192 float

      Second number

    "},{"location":"api/utils/math/matmul2d/","title":"matmul2d","text":"

    Multiplication for 2D matrices.

    "},{"location":"api/utils/math/matmul2d/#parameters","title":"Parameters","text":"
    • A

    • B

    "},{"location":"api/utils/math/matmul2d/#examples","title":"Examples","text":"

    import pprint\nfrom river import utils\n\nA = {\n    (0, 0): 2, (0, 1): 0, (0, 2): 4,\n    (1, 0): 5, (1, 1): 6, (1, 2): 0\n}\n\nB = {\n    (0, 0): 1, (0, 1): 1, (0, 2): 0, (0, 3): 0,\n    (1, 0): 2, (1, 1): 0, (1, 2): 1, (1, 3): 3,\n    (2, 0): 4, (2, 1): 0, (2, 2): 0, (2, 3): 0\n}\n\nC = utils.math.matmul2d(A, B)\npprint.pprint(C)\n
    {(0, 0): 18.0,\n    (0, 1): 2.0,\n    (0, 2): 0.0,\n    (0, 3): 0.0,\n    (1, 0): 17.0,\n    (1, 1): 5.0,\n    (1, 2): 6.0,\n    (1, 3): 18.0}\n

    "},{"location":"api/utils/math/minkowski-distance/","title":"minkowski_distance","text":"

    Minkowski distance.

    "},{"location":"api/utils/math/minkowski-distance/#parameters","title":"Parameters","text":"
    • a

      Type \u2192 dict

    • b

      Type \u2192 dict

    • p

      Type \u2192 int

      Parameter for the Minkowski distance. When p=1, this is equivalent to using the Manhattan distance. When p=2, this is equivalent to using the Euclidean distance.

    "},{"location":"api/utils/math/norm/","title":"norm","text":"

    Compute the norm of a dictionaries values.

    "},{"location":"api/utils/math/norm/#parameters","title":"Parameters","text":"
    • x

      Type \u2192 dict

    • order

      Default \u2192 None

    "},{"location":"api/utils/math/outer/","title":"outer","text":"

    Outer-product between two vectors.

    "},{"location":"api/utils/math/outer/#parameters","title":"Parameters","text":"
    • u

      Type \u2192 dict

    • v

      Type \u2192 dict

    "},{"location":"api/utils/math/outer/#examples","title":"Examples","text":"

    import pprint\nfrom river import utils\n\nu = dict(enumerate((1, 2, 3)))\nv = dict(enumerate((2, 4, 8)))\n\nuTv = utils.math.outer(u, v)\npprint.pprint(uTv)\n
    {(0, 0): 2,\n    (0, 1): 4,\n    (0, 2): 8,\n    (1, 0): 4,\n    (1, 1): 8,\n    (1, 2): 16,\n    (2, 0): 6,\n    (2, 1): 12,\n    (2, 2): 24}\n

    "},{"location":"api/utils/math/prod/","title":"prod","text":"

    Product function.

    "},{"location":"api/utils/math/prod/#parameters","title":"Parameters","text":"
    • iterable
    "},{"location":"api/utils/math/sherman-morrison/","title":"sherman_morrison","text":"

    Sherman-Morrison formula.

    This is an inplace function.

    "},{"location":"api/utils/math/sherman-morrison/#parameters","title":"Parameters","text":"
    • A

      Type \u2192 np.ndarray

    • u

      Type \u2192 np.ndarray

    • v

      Type \u2192 np.ndarray

    1. Fast rank-one updates to matrix inverse? \u2014 Tim Vieira \u21a9

    "},{"location":"api/utils/math/sigmoid/","title":"sigmoid","text":"

    Sigmoid function.

    "},{"location":"api/utils/math/sigmoid/#parameters","title":"Parameters","text":"
    • x

      Type \u2192 float

    "},{"location":"api/utils/math/sign/","title":"sign","text":"

    Sign function.

    "},{"location":"api/utils/math/sign/#parameters","title":"Parameters","text":"
    • x

      Type \u2192 float

    "},{"location":"api/utils/math/softmax/","title":"softmax","text":"

    Normalizes a dictionary of predicted probabilities, in-place.

    "},{"location":"api/utils/math/softmax/#parameters","title":"Parameters","text":"
    • y_pred

      Type \u2192 dict

    "},{"location":"api/utils/math/woodbury-matrix/","title":"woodbury_matrix","text":"

    Woodbury matrix identity.

    This is an inplace function.

    "},{"location":"api/utils/math/woodbury-matrix/#parameters","title":"Parameters","text":"
    • A

      Type \u2192 np.ndarray

    • U

      Type \u2192 np.ndarray

    • V

      Type \u2192 np.ndarray

    1. Matrix inverse mini-batch updates \u2014 Max Halford \u21a9

    "},{"location":"api/utils/norm/normalize-values-in-dict/","title":"normalize_values_in_dict","text":"

    Normalize the values in a dictionary using the given factor.

    For each element in the dictionary, applies value/factor.

    "},{"location":"api/utils/norm/normalize-values-in-dict/#parameters","title":"Parameters","text":"
    • dictionary

      Dictionary to normalize.

    • factor

      Default \u2192 None

      Normalization factor value. If not set, use the sum of values.

    • inplace

      Default \u2192 True

      If True, perform operation in-place

    • raise_error

      Default \u2192 False

      In case the normalization factor is either 0 or None: - True: raise an error. - False: return gracefully (if inplace=False, a copy of) dictionary.

    "},{"location":"api/utils/norm/scale-values-in-dict/","title":"scale_values_in_dict","text":"

    Scale the values in a dictionary.

    For each element in the dictionary, applies value * multiplier.

    "},{"location":"api/utils/norm/scale-values-in-dict/#parameters","title":"Parameters","text":"
    • dictionary

      Dictionary to scale.

    • multiplier

      Scaling value.

    • inplace

      Default \u2192 True

      If True, perform operation in-place

    "},{"location":"api/utils/pretty/humanize-bytes/","title":"humanize_bytes","text":"

    Returns a human-friendly byte size.

    "},{"location":"api/utils/pretty/humanize-bytes/#parameters","title":"Parameters","text":"
    • n_bytes

      Type \u2192 int

    "},{"location":"api/utils/pretty/print-table/","title":"print_table","text":"

    Pretty-prints a table.

    "},{"location":"api/utils/pretty/print-table/#parameters","title":"Parameters","text":"
    • headers

      Type \u2192 list[str]

      The column names.

    • columns

      Type \u2192 list[list[str]]

      The column values.

    • order

      Type \u2192 list[int] | None

      Default \u2192 None

      Order in which to print the column the values. Defaults to the order in which the values are given.

    "},{"location":"api/utils/random/exponential/","title":"exponential","text":"

    Sample a random value from a Poisson distribution.

    "},{"location":"api/utils/random/exponential/#parameters","title":"Parameters","text":"
    • rate

      Type \u2192 float

      Default \u2192 1.0

    • rng

      Default \u2192 <module 'random' from '/opt/hostedtoolcache/Python/3.11.5/x64/lib/python3.11/random.py'>

    1. Wikipedia article \u21a9

    "},{"location":"api/utils/random/poisson/","title":"poisson","text":"

    Sample a random value from a Poisson distribution.

    "},{"location":"api/utils/random/poisson/#parameters","title":"Parameters","text":"
    • rate

      Type \u2192 float

    • rng

      Default \u2192 <module 'random' from '/opt/hostedtoolcache/Python/3.11.5/x64/lib/python3.11/random.py'>

    [^1] Wikipedia article

    "},{"location":"benchmarks/Binary%20classification/","title":"Binary classification","text":"TableChart Model Dataset Accuracy F1 Memory in Mb Time in s ADWIN Bagging Bananas 0.625967 0.448218 0.416215 147.786 ADWIN Bagging Elec2 0.823285 0.777237 0.733135 1359.96 ADWIN Bagging Phishing 0.893515 0.879201 1.34233 106.016 ADWIN Bagging SMTP 0.999748 0.368421 1.09872 1278.41 ALMA Bananas 0.506415 0.482595 0.0029211 11.8441 ALMA Elec2 0.906402 0.889756 0.00435829 129.979 ALMA Phishing 0.8264 0.811795 0.0045805 4.33824 ALMA SMTP 0.764971 0.00178548 0.00309372 202.144 AdaBoost Bananas 0.677864 0.645041 0.468451 140.518 AdaBoost Elec2 0.875119 0.851923 14.8672 1611.8 AdaBoost Phishing 0.878303 0.863555 0.899108 53.255 AdaBoost SMTP 0.999622 0.526316 1.46643 848.066 Adaptive Random Forest Bananas 0.88696 0.871542 13.8454 307.673 Adaptive Random Forest Elec2 0.87662 0.851959 20.3554 2086.72 Adaptive Random Forest Phishing 0.908727 0.896926 3.82644 123.578 Adaptive Random Forest SMTP 0.999811 0.653846 1.32656 1588.86 Bagging Bananas 0.634082 0.459437 0.722435 194.332 Bagging Elec2 0.841939 0.804093 3.20333 2236.85 Bagging Phishing 0.893515 0.879201 1.42051 103.926 Bagging SMTP 0.999748 0.368421 1.36508 1929.49 Hoeffding Adaptive Tree Bananas 0.616531 0.42825 0.0624046 19.3398 Hoeffding Adaptive Tree Elec2 0.828672 0.795392 0.399325 382.504 Hoeffding Adaptive Tree Phishing 0.874299 0.856095 0.144985 12.0149 Hoeffding Adaptive Tree SMTP 0.999548 0.358209 0.137261 255.925 Hoeffding Tree Bananas 0.642197 0.503405 0.0602674 15.6935 Hoeffding Tree Elec2 0.796993 0.759154 1.18787 137.818 Hoeffding Tree Phishing 0.879904 0.860595 0.134742 5.7113 Hoeffding Tree SMTP 0.999622 0.419355 0.10326 230.368 Leveraging Bagging Bananas 0.828269 0.802603 3.31306 301.152 Leveraging Bagging Elec2 0.892382 0.871457 4.89464 4013.28 Leveraging Bagging Phishing 0.895116 0.878366 3.93267 254.561 Leveraging Bagging SMTP 0.999779 0.553191 1.32725 3620.36 Logistic regression Bananas 0.543019 0.195349 0.00424099 12.8476 Logistic regression Elec2 0.822163 0.777151 0.005373 151.719 Logistic regression Phishing 0.888 0.872263 0.00556469 4.74798 Logistic regression SMTP 0.999769 0.421053 0.00438309 146.608 Naive Bayes Bananas 0.61521 0.413912 0.0140247 21.4993 Naive Bayes Elec2 0.728714 0.603823 0.0510378 183.712 Naive Bayes Phishing 0.884708 0.871429 0.05723 8.45491 Naive Bayes SMTP 0.993484 0.0490798 0.0201406 263.696 Stacking Bananas 0.850349 0.829938 21.2839 421.54 Stacking Elec2 0.896797 0.877621 42.8805 3675.44 Stacking Phishing 0.899119 0.886691 4.31951 185.738 Stacking SMTP 0.99979 0.52381 1.51676 3474.07 Streaming Random Patches Bananas 0.869032 0.850817 12.1355 492.613 Streaming Random Patches Elec2 0.882249 0.859113 58.3436 5128.27 Streaming Random Patches Phishing 0.911129 0.89991 7.09044 222.948 Streaming Random Patches SMTP 0.999832 0.666667 1.41018 3718.08 Voting Bananas 0.830157 0.794989 0.122465 78.6442 Voting Elec2 0.858871 0.820255 1.31479 729.274 Voting Phishing 0.890312 0.876909 0.270739 50.7734 Voting SMTP 0.999685 0.53125 0.17401 1099.49 Vowpal Wabbit logistic regression Bananas 0.551321 0 0.000646591 14.0488 Vowpal Wabbit logistic regression Elec2 0.697439 0.459628 0.000646591 168.403 Vowpal Wabbit logistic regression Phishing 0.7736 0.669778 0.000646591 4.23902 Vowpal Wabbit logistic regression SMTP 0.999695 0.121212 0.000646591 218.564 [baseline] Last Class Bananas 0.50953 0.452957 0.000510216 2.21764 [baseline] Last Class Elec2 0.853352 0.827316 0.000510216 26.8887 [baseline] Last Class Phishing 0.515612 0.447489 0.000510216 1.7058 [baseline] Last Class SMTP 0.999601 0.366667 0.000510216 88.1401 k-Nearest Neighbors Bananas 0.848462 0.827423 0.0418806 60.5128 k-Nearest Neighbors Elec2 0.884435 0.862904 0.0689526 421.994 k-Nearest Neighbors Phishing 0.867094 0.847985 0.0714331 14.7191 k-Nearest Neighbors SMTP 0.999853 0.740741 0.0443382 606.731 sklearn SGDClassifier Bananas 0.546604 0.205094 0.00549507 83.2497 sklearn SGDClassifier Elec2 0.819051 0.772854 0.00667286 497.166 sklearn SGDClassifier Phishing 0.8888 0.875336 0.00687218 22.1178 sklearn SGDClassifier SMTP 0.999748 0.4 0.0056448 960.794

    Try reloading the page if something is buggy

    { \"$schema\": \"https://vega.github.io/schema/vega-lite/v5.json\", \"data\": { \"values\": [ { \"step\": 106, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.490566037735849, \"F1\": 0.325, \"Memory in Mb\": 0.0041875839233398, \"Time in s\": 0.013756 }, { \"step\": 212, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5141509433962265, \"F1\": 0.3757575757575758, \"Memory in Mb\": 0.0041875839233398, \"Time in s\": 0.038575 }, { \"step\": 318, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5188679245283019, \"F1\": 0.4137931034482758, \"Memory in Mb\": 0.0042409896850585, \"Time in s\": 0.073067 }, { \"step\": 424, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5165094339622641, \"F1\": 0.3952802359882006, \"Memory in Mb\": 0.0042409896850585, \"Time in s\": 0.1170739999999999 }, { \"step\": 530, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5320754716981132, \"F1\": 0.3575129533678756, \"Memory in Mb\": 0.0042409896850585, \"Time in s\": 0.1709859999999999 }, { \"step\": 636, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5377358490566038, \"F1\": 0.3225806451612903, \"Memory in Mb\": 0.0042409896850585, \"Time in s\": 0.2348859999999999 }, { \"step\": 742, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5525606469002695, \"F1\": 0.2995780590717299, \"Memory in Mb\": 0.0042409896850585, \"Time in s\": 0.308749 }, { \"step\": 848, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5518867924528302, \"F1\": 0.2720306513409961, \"Memory in Mb\": 0.0042409896850585, \"Time in s\": 0.392516 }, { \"step\": 954, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5545073375262054, \"F1\": 0.2504409171075837, \"Memory in Mb\": 0.0042409896850585, \"Time in s\": 0.486674 }, { \"step\": 1060, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5613207547169812, \"F1\": 0.2339373970345963, \"Memory in Mb\": 0.0042409896850585, \"Time in s\": 0.590985 }, { \"step\": 1166, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5600343053173242, \"F1\": 0.216793893129771, \"Memory in Mb\": 0.0042409896850585, \"Time in s\": 0.705545 }, { \"step\": 1272, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5605345911949685, \"F1\": 0.2137834036568213, \"Memory in Mb\": 0.0042409896850585, \"Time in s\": 0.830071 }, { \"step\": 1378, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5638606676342526, \"F1\": 0.2018592297476759, \"Memory in Mb\": 0.0042409896850585, \"Time in s\": 0.964451 }, { \"step\": 1484, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5640161725067385, \"F1\": 0.1902377972465581, \"Memory in Mb\": 0.0042409896850585, \"Time in s\": 1.108635 }, { \"step\": 1590, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5641509433962264, \"F1\": 0.1798816568047337, \"Memory in Mb\": 0.0042409896850585, \"Time in s\": 1.262826 }, { \"step\": 1696, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5654481132075472, \"F1\": 0.1728395061728395, \"Memory in Mb\": 0.0042409896850585, \"Time in s\": 1.426847 }, { \"step\": 1802, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5621531631520533, \"F1\": 0.165079365079365, \"Memory in Mb\": 0.0042409896850585, \"Time in s\": 1.600621 }, { \"step\": 1908, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5581761006289309, \"F1\": 0.1628599801390268, \"Memory in Mb\": 0.0042409896850585, \"Time in s\": 1.784431 }, { \"step\": 2014, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.551142005958292, \"F1\": 0.1614100185528756, \"Memory in Mb\": 0.0042409896850585, \"Time in s\": 1.978045 }, { \"step\": 2120, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5490566037735849, \"F1\": 0.1643356643356643, \"Memory in Mb\": 0.0042409896850585, \"Time in s\": 2.1815 }, { \"step\": 2226, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5480682839173405, \"F1\": 0.1767594108019639, \"Memory in Mb\": 0.0042409896850585, \"Time in s\": 2.394877 }, { \"step\": 2332, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5480274442538593, \"F1\": 0.1929555895865237, \"Memory in Mb\": 0.0042409896850585, \"Time in s\": 2.6182670000000003 }, { \"step\": 2438, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5467596390484003, \"F1\": 0.1963636363636363, \"Memory in Mb\": 0.0042409896850585, \"Time in s\": 2.8514960000000005 }, { \"step\": 2544, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.547562893081761, \"F1\": 0.2132604237867396, \"Memory in Mb\": 0.0042409896850585, \"Time in s\": 3.0947110000000007 }, { \"step\": 2650, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5449056603773584, \"F1\": 0.2229381443298969, \"Memory in Mb\": 0.0042409896850585, \"Time in s\": 3.3477670000000006 }, { \"step\": 2756, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5391872278664731, \"F1\": 0.2256097560975609, \"Memory in Mb\": 0.0042409896850585, \"Time in s\": 3.6106280000000006 }, { \"step\": 2862, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5387840670859538, \"F1\": 0.2271662763466042, \"Memory in Mb\": 0.0042409896850585, \"Time in s\": 3.8834530000000007 }, { \"step\": 2968, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5407681940700808, \"F1\": 0.2233618233618233, \"Memory in Mb\": 0.0042409896850585, \"Time in s\": 4.166078000000001 }, { \"step\": 3074, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5400130123617437, \"F1\": 0.2187845303867403, \"Memory in Mb\": 0.0042409896850585, \"Time in s\": 4.4584340000000005 }, { \"step\": 3180, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5433962264150943, \"F1\": 0.2176724137931034, \"Memory in Mb\": 0.0042409896850585, \"Time in s\": 4.760795000000001 }, { \"step\": 3286, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5447352404138771, \"F1\": 0.213459516298633, \"Memory in Mb\": 0.0042409896850585, \"Time in s\": 5.072864000000001 }, { \"step\": 3392, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5436320754716981, \"F1\": 0.210204081632653, \"Memory in Mb\": 0.0042409896850585, \"Time in s\": 5.3947970000000005 }, { \"step\": 3498, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5454545454545454, \"F1\": 0.2057942057942058, \"Memory in Mb\": 0.0042409896850585, \"Time in s\": 5.726813000000001 }, { \"step\": 3604, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5477247502774695, \"F1\": 0.2017629774730656, \"Memory in Mb\": 0.0042409896850585, \"Time in s\": 6.068550000000001 }, { \"step\": 3710, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5466307277628032, \"F1\": 0.1967526265520535, \"Memory in Mb\": 0.0042409896850585, \"Time in s\": 6.420104000000001 }, { \"step\": 3816, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5461215932914046, \"F1\": 0.1921641791044775, \"Memory in Mb\": 0.0042409896850585, \"Time in s\": 6.781176000000001 }, { \"step\": 3922, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5471698113207547, \"F1\": 0.1882998171846435, \"Memory in Mb\": 0.0042409896850585, \"Time in s\": 7.151771000000001 }, { \"step\": 4028, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5476663356504469, \"F1\": 0.1844225604297224, \"Memory in Mb\": 0.0042409896850585, \"Time in s\": 7.532081000000001 }, { \"step\": 4134, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5478955007256894, \"F1\": 0.1806225339763262, \"Memory in Mb\": 0.0042409896850585, \"Time in s\": 7.921987000000001 }, { \"step\": 4240, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5471698113207547, \"F1\": 0.176672384219554, \"Memory in Mb\": 0.0042409896850585, \"Time in s\": 8.321417 }, { \"step\": 4346, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5473999079613437, \"F1\": 0.1745698699118757, \"Memory in Mb\": 0.0042409896850585, \"Time in s\": 8.730515 }, { \"step\": 4452, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5496406109613656, \"F1\": 0.1799591002044989, \"Memory in Mb\": 0.0042409896850585, \"Time in s\": 9.149203 }, { \"step\": 4558, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5465116279069767, \"F1\": 0.1794362842397777, \"Memory in Mb\": 0.0042409896850585, \"Time in s\": 9.577468 }, { \"step\": 4664, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5463121783876501, \"F1\": 0.1861538461538461, \"Memory in Mb\": 0.0042409896850585, \"Time in s\": 10.015743 }, { \"step\": 4770, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5465408805031446, \"F1\": 0.1889763779527558, \"Memory in Mb\": 0.0042409896850585, \"Time in s\": 10.463621 }, { \"step\": 4876, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5467596390484003, \"F1\": 0.1892883345561261, \"Memory in Mb\": 0.0042409896850585, \"Time in s\": 10.921126 }, { \"step\": 4982, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5467683661180249, \"F1\": 0.1958689458689458, \"Memory in Mb\": 0.0042409896850585, \"Time in s\": 11.388366 }, { \"step\": 5088, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5446147798742138, \"F1\": 0.1940869565217391, \"Memory in Mb\": 0.0042409896850585, \"Time in s\": 11.865171 }, { \"step\": 5194, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5427416249518675, \"F1\": 0.1924515470928255, \"Memory in Mb\": 0.0042409896850585, \"Time in s\": 12.351544 }, { \"step\": 5300, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5430188679245282, \"F1\": 0.1953488372093023, \"Memory in Mb\": 0.0042409896850585, \"Time in s\": 12.847639 }, { \"step\": 906, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.7980132450331126, \"F1\": 0.7834319526627219, \"Memory in Mb\": 0.0053730010986328, \"Time in s\": 0.108703 }, { \"step\": 1812, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8134657836644592, \"F1\": 0.7488855869242199, \"Memory in Mb\": 0.0053730010986328, \"Time in s\": 0.336789 }, { \"step\": 2718, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8024282560706402, \"F1\": 0.7300150829562596, \"Memory in Mb\": 0.0053730010986328, \"Time in s\": 0.6856869999999999 }, { \"step\": 3624, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8192604856512141, \"F1\": 0.7598093142647598, \"Memory in Mb\": 0.0053730010986328, \"Time in s\": 1.150847 }, { \"step\": 4530, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8289183222958058, \"F1\": 0.7613181398213735, \"Memory in Mb\": 0.0053730010986328, \"Time in s\": 1.732046 }, { \"step\": 5436, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8226637233259749, \"F1\": 0.7528205128205128, \"Memory in Mb\": 0.0053730010986328, \"Time in s\": 2.429434 }, { \"step\": 6342, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8229265216020183, \"F1\": 0.7589611504614724, \"Memory in Mb\": 0.0053730010986328, \"Time in s\": 3.242126 }, { \"step\": 7248, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8261589403973509, \"F1\": 0.7617246596066566, \"Memory in Mb\": 0.0053730010986328, \"Time in s\": 4.168583 }, { \"step\": 8154, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8318616629874908, \"F1\": 0.7833096254148886, \"Memory in Mb\": 0.0053730010986328, \"Time in s\": 5.210413 }, { \"step\": 9060, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8375275938189846, \"F1\": 0.7975797579757975, \"Memory in Mb\": 0.0053730010986328, \"Time in s\": 6.367211 }, { \"step\": 9966, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8377483443708609, \"F1\": 0.802008081302804, \"Memory in Mb\": 0.0053730010986328, \"Time in s\": 7.639357 }, { \"step\": 10872, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8400478292862399, \"F1\": 0.8089220964729151, \"Memory in Mb\": 0.0053730010986328, \"Time in s\": 9.025984 }, { \"step\": 11778, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8433520122261844, \"F1\": 0.8128613449639923, \"Memory in Mb\": 0.0053730010986328, \"Time in s\": 10.527624 }, { \"step\": 12684, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8420056764427626, \"F1\": 0.8118309859154929, \"Memory in Mb\": 0.0053730010986328, \"Time in s\": 12.142268 }, { \"step\": 13590, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8438557763061074, \"F1\": 0.8167846658608184, \"Memory in Mb\": 0.0053730010986328, \"Time in s\": 13.8717 }, { \"step\": 14496, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8447847682119205, \"F1\": 0.8189863234111022, \"Memory in Mb\": 0.0053730010986328, \"Time in s\": 15.715708 }, { \"step\": 15402, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8465134398130113, \"F1\": 0.8201734367868553, \"Memory in Mb\": 0.0053730010986328, \"Time in s\": 17.682764000000002 }, { \"step\": 16308, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8412435614422369, \"F1\": 0.8128388635870744, \"Memory in Mb\": 0.0053730010986328, \"Time in s\": 19.778485000000003 }, { \"step\": 17214, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8397815731381434, \"F1\": 0.8070519098922625, \"Memory in Mb\": 0.0053730010986328, \"Time in s\": 22.000603000000005 }, { \"step\": 18120, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8419977924944813, \"F1\": 0.8099316205271195, \"Memory in Mb\": 0.0053730010986328, \"Time in s\": 24.347790000000003 }, { \"step\": 19026, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8451592557552823, \"F1\": 0.8116368286445013, \"Memory in Mb\": 0.0053730010986328, \"Time in s\": 26.819926 }, { \"step\": 19932, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8428657435279951, \"F1\": 0.8098129706096673, \"Memory in Mb\": 0.0053730010986328, \"Time in s\": 29.418034 }, { \"step\": 20838, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8394279681351378, \"F1\": 0.805736182071528, \"Memory in Mb\": 0.0053730010986328, \"Time in s\": 32.142589 }, { \"step\": 21744, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8403237674760854, \"F1\": 0.8037087290818633, \"Memory in Mb\": 0.0053730010986328, \"Time in s\": 34.992653000000004 }, { \"step\": 22650, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8395143487858719, \"F1\": 0.800963697092482, \"Memory in Mb\": 0.0053730010986328, \"Time in s\": 37.96833 }, { \"step\": 23556, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8357530989981321, \"F1\": 0.7954965907288969, \"Memory in Mb\": 0.0053730010986328, \"Time in s\": 41.070336000000005 }, { \"step\": 24462, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8330880549423596, \"F1\": 0.7914815382258312, \"Memory in Mb\": 0.0053730010986328, \"Time in s\": 44.296124000000006 }, { \"step\": 25368, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8298643960895616, \"F1\": 0.787326303340889, \"Memory in Mb\": 0.0053730010986328, \"Time in s\": 47.644214000000005 }, { \"step\": 26274, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8304788003349318, \"F1\": 0.7877834953306653, \"Memory in Mb\": 0.0053730010986328, \"Time in s\": 51.11458400000001 }, { \"step\": 27180, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8309050772626931, \"F1\": 0.789000091818933, \"Memory in Mb\": 0.0053730010986328, \"Time in s\": 54.70630400000001 }, { \"step\": 28086, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8277433596809799, \"F1\": 0.7844028520499109, \"Memory in Mb\": 0.0053730010986328, \"Time in s\": 58.42138600000001 }, { \"step\": 28992, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8270557395143487, \"F1\": 0.782037906451052, \"Memory in Mb\": 0.0053730010986328, \"Time in s\": 62.25746200000001 }, { \"step\": 29898, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8260753227640645, \"F1\": 0.7809050307575629, \"Memory in Mb\": 0.0053730010986328, \"Time in s\": 66.21287400000001 }, { \"step\": 30804, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8259316971821842, \"F1\": 0.7798127463863337, \"Memory in Mb\": 0.0053730010986328, \"Time in s\": 70.29091000000001 }, { \"step\": 31710, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8213181961526332, \"F1\": 0.7731603811353991, \"Memory in Mb\": 0.0053730010986328, \"Time in s\": 74.48714300000002 }, { \"step\": 32616, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8188925680647535, \"F1\": 0.7700393195001364, \"Memory in Mb\": 0.0053730010986328, \"Time in s\": 78.80153600000001 }, { \"step\": 33522, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8169261977208997, \"F1\": 0.7682314286793308, \"Memory in Mb\": 0.0053730010986328, \"Time in s\": 83.23605800000001 }, { \"step\": 34428, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8144243057976066, \"F1\": 0.764807656911467, \"Memory in Mb\": 0.0053730010986328, \"Time in s\": 87.78961800000002 }, { \"step\": 35334, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8142299201901851, \"F1\": 0.7628098576280986, \"Memory in Mb\": 0.0053730010986328, \"Time in s\": 92.463459 }, { \"step\": 36240, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8155077262693157, \"F1\": 0.7630254483589707, \"Memory in Mb\": 0.0053730010986328, \"Time in s\": 97.25864300000002 }, { \"step\": 37146, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8151887148010553, \"F1\": 0.7614745839268963, \"Memory in Mb\": 0.0053730010986328, \"Time in s\": 102.17619800000004 }, { \"step\": 38052, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8151739724587407, \"F1\": 0.7609855564995752, \"Memory in Mb\": 0.0053730010986328, \"Time in s\": 107.21554300000004 }, { \"step\": 38958, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8162636685661482, \"F1\": 0.7631526702402223, \"Memory in Mb\": 0.0053730010986328, \"Time in s\": 112.37010800000004 }, { \"step\": 39864, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8169526389725065, \"F1\": 0.7662192035369877, \"Memory in Mb\": 0.0053730010986328, \"Time in s\": 117.64186300000004 }, { \"step\": 40770, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8186902133922002, \"F1\": 0.7707480461481205, \"Memory in Mb\": 0.0053730010986328, \"Time in s\": 123.03144600000005 }, { \"step\": 41676, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8201842787215664, \"F1\": 0.7745623007039286, \"Memory in Mb\": 0.0053730010986328, \"Time in s\": 128.53961200000003 }, { \"step\": 42582, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8212155370813959, \"F1\": 0.7763841973858129, \"Memory in Mb\": 0.0053730010986328, \"Time in s\": 134.16514200000003 }, { \"step\": 43488, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8217209345106696, \"F1\": 0.7773086313370673, \"Memory in Mb\": 0.0053730010986328, \"Time in s\": 139.90254000000004 }, { \"step\": 44394, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8211920529801324, \"F1\": 0.7754328391988233, \"Memory in Mb\": 0.0053730010986328, \"Time in s\": 145.75278900000004 }, { \"step\": 45300, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8221633554083885, \"F1\": 0.7771507607192254, \"Memory in Mb\": 0.0053730010986328, \"Time in s\": 151.71939900000004 }, { \"step\": 25, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.64, \"F1\": 0.6896551724137931, \"Memory in Mb\": 0.005324363708496, \"Time in s\": 0.005171 }, { \"step\": 50, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.78, \"F1\": 0.7755102040816326, \"Memory in Mb\": 0.005324363708496, \"Time in s\": 0.014932 }, { \"step\": 75, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8133333333333334, \"F1\": 0.8157894736842105, \"Memory in Mb\": 0.005324363708496, \"Time in s\": 0.027624 }, { \"step\": 100, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.82, \"F1\": 0.8163265306122449, \"Memory in Mb\": 0.005324363708496, \"Time in s\": 0.045128 }, { \"step\": 125, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.808, \"F1\": 0.8032786885245902, \"Memory in Mb\": 0.005324363708496, \"Time in s\": 0.065471 }, { \"step\": 150, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8133333333333334, \"F1\": 0.8157894736842104, \"Memory in Mb\": 0.005324363708496, \"Time in s\": 0.088498 }, { \"step\": 175, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8228571428571428, \"F1\": 0.8143712574850299, \"Memory in Mb\": 0.005324363708496, \"Time in s\": 0.1141639999999999 }, { \"step\": 200, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.82, \"F1\": 0.8105263157894737, \"Memory in Mb\": 0.005324363708496, \"Time in s\": 0.143232 }, { \"step\": 225, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8177777777777778, \"F1\": 0.8038277511961723, \"Memory in Mb\": 0.005324363708496, \"Time in s\": 0.175843 }, { \"step\": 250, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.824, \"F1\": 0.811965811965812, \"Memory in Mb\": 0.005324363708496, \"Time in s\": 0.212135 }, { \"step\": 275, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8254545454545454, \"F1\": 0.8125, \"Memory in Mb\": 0.0055646896362304, \"Time in s\": 0.2519909999999999 }, { \"step\": 300, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8366666666666667, \"F1\": 0.8205128205128205, \"Memory in Mb\": 0.0055646896362304, \"Time in s\": 0.295434 }, { \"step\": 325, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8430769230769231, \"F1\": 0.8222996515679442, \"Memory in Mb\": 0.0055646896362304, \"Time in s\": 0.3424399999999999 }, { \"step\": 350, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8542857142857143, \"F1\": 0.8316831683168316, \"Memory in Mb\": 0.0055646896362304, \"Time in s\": 0.3929919999999999 }, { \"step\": 375, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8506666666666667, \"F1\": 0.825, \"Memory in Mb\": 0.0055646896362304, \"Time in s\": 0.4472389999999999 }, { \"step\": 400, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8525, \"F1\": 0.8249258160237388, \"Memory in Mb\": 0.0055646896362304, \"Time in s\": 0.5051399999999999 }, { \"step\": 425, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8588235294117647, \"F1\": 0.8285714285714286, \"Memory in Mb\": 0.0055646896362304, \"Time in s\": 0.5668209999999999 }, { \"step\": 450, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8622222222222222, \"F1\": 0.8306010928961749, \"Memory in Mb\": 0.0055646896362304, \"Time in s\": 0.6322009999999999 }, { \"step\": 475, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8589473684210527, \"F1\": 0.8277634961439589, \"Memory in Mb\": 0.0055646896362304, \"Time in s\": 0.7013439999999999 }, { \"step\": 500, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.86, \"F1\": 0.8325358851674641, \"Memory in Mb\": 0.0055646896362304, \"Time in s\": 0.7743169999999998 }, { \"step\": 525, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8590476190476191, \"F1\": 0.827906976744186, \"Memory in Mb\": 0.0055646896362304, \"Time in s\": 0.8510559999999998 }, { \"step\": 550, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.86, \"F1\": 0.8300220750551875, \"Memory in Mb\": 0.0055646896362304, \"Time in s\": 0.9315169999999998 }, { \"step\": 575, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8626086956521739, \"F1\": 0.8329809725158562, \"Memory in Mb\": 0.0055646896362304, \"Time in s\": 1.015688 }, { \"step\": 600, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8666666666666667, \"F1\": 0.8353909465020577, \"Memory in Mb\": 0.0055646896362304, \"Time in s\": 1.103615 }, { \"step\": 625, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8688, \"F1\": 0.8346774193548386, \"Memory in Mb\": 0.0055646896362304, \"Time in s\": 1.195378 }, { \"step\": 650, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8723076923076923, \"F1\": 0.8413001912045889, \"Memory in Mb\": 0.0055646896362304, \"Time in s\": 1.291006 }, { \"step\": 675, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8725925925925926, \"F1\": 0.8447653429602888, \"Memory in Mb\": 0.0055646896362304, \"Time in s\": 1.390501 }, { \"step\": 700, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8771428571428571, \"F1\": 0.8485915492957746, \"Memory in Mb\": 0.0055646896362304, \"Time in s\": 1.494099 }, { \"step\": 725, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8786206896551724, \"F1\": 0.8533333333333334, \"Memory in Mb\": 0.0055646896362304, \"Time in s\": 1.601701 }, { \"step\": 750, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.88, \"F1\": 0.8557692307692307, \"Memory in Mb\": 0.0055646896362304, \"Time in s\": 1.713159 }, { \"step\": 775, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8812903225806452, \"F1\": 0.8566978193146417, \"Memory in Mb\": 0.0055646896362304, \"Time in s\": 1.828506 }, { \"step\": 800, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.88125, \"F1\": 0.8584202682563338, \"Memory in Mb\": 0.0055646896362304, \"Time in s\": 1.947668 }, { \"step\": 825, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8812121212121212, \"F1\": 0.8595988538681948, \"Memory in Mb\": 0.0055646896362304, \"Time in s\": 2.0707090000000004 }, { \"step\": 850, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8823529411764706, \"F1\": 0.8603351955307262, \"Memory in Mb\": 0.0055646896362304, \"Time in s\": 2.197589 }, { \"step\": 875, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8857142857142857, \"F1\": 0.8637602179836512, \"Memory in Mb\": 0.0055646896362304, \"Time in s\": 2.3283080000000003 }, { \"step\": 900, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8855555555555555, \"F1\": 0.8632138114209827, \"Memory in Mb\": 0.0055646896362304, \"Time in s\": 2.462826 }, { \"step\": 925, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8875675675675676, \"F1\": 0.867007672634271, \"Memory in Mb\": 0.0055646896362304, \"Time in s\": 2.6011680000000004 }, { \"step\": 950, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8863157894736842, \"F1\": 0.8669950738916257, \"Memory in Mb\": 0.0055646896362304, \"Time in s\": 2.7434260000000004 }, { \"step\": 975, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8871794871794871, \"F1\": 0.8677884615384616, \"Memory in Mb\": 0.0055646896362304, \"Time in s\": 2.889565 }, { \"step\": 1000, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.888, \"F1\": 0.8688524590163934, \"Memory in Mb\": 0.0055646896362304, \"Time in s\": 3.039628 }, { \"step\": 1025, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8878048780487805, \"F1\": 0.8691695108077361, \"Memory in Mb\": 0.0055646896362304, \"Time in s\": 3.19355 }, { \"step\": 1050, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8895238095238095, \"F1\": 0.8716814159292035, \"Memory in Mb\": 0.0055646896362304, \"Time in s\": 3.351259 }, { \"step\": 1075, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8883720930232558, \"F1\": 0.8715203426124196, \"Memory in Mb\": 0.0055646896362304, \"Time in s\": 3.512691 }, { \"step\": 1100, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.89, \"F1\": 0.8735632183908045, \"Memory in Mb\": 0.0055646896362304, \"Time in s\": 3.677881 }, { \"step\": 1125, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8906666666666667, \"F1\": 0.8753799392097265, \"Memory in Mb\": 0.0055646896362304, \"Time in s\": 3.846795 }, { \"step\": 1150, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8904347826086957, \"F1\": 0.8750000000000001, \"Memory in Mb\": 0.0055646896362304, \"Time in s\": 4.0194600000000005 }, { \"step\": 1175, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8893617021276595, \"F1\": 0.8735408560311284, \"Memory in Mb\": 0.0055646896362304, \"Time in s\": 4.195879000000001 }, { \"step\": 1200, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.89, \"F1\": 0.8740458015267174, \"Memory in Mb\": 0.0055646896362304, \"Time in s\": 4.376099000000001 }, { \"step\": 1225, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8906122448979592, \"F1\": 0.874766355140187, \"Memory in Mb\": 0.0055646896362304, \"Time in s\": 4.560123000000001 }, { \"step\": 1250, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.888, \"F1\": 0.8722627737226277, \"Memory in Mb\": 0.0055646896362304, \"Time in s\": 4.747978000000001 }, { \"step\": 1903, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.0043830871582031, \"Time in s\": 0.201733 }, { \"step\": 3806, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.0043830871582031, \"Time in s\": 0.541161 }, { \"step\": 5709, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.0043830871582031, \"Time in s\": 0.989434 }, { \"step\": 7612, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.0043830871582031, \"Time in s\": 1.548242 }, { \"step\": 9515, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.0043830871582031, \"Time in s\": 2.214419 }, { \"step\": 11418, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.0043830871582031, \"Time in s\": 2.987891 }, { \"step\": 13321, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.0043830871582031, \"Time in s\": 3.868043 }, { \"step\": 15224, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996715712033633, \"F1\": 0.7058823529411764, \"Memory in Mb\": 0.0043830871582031, \"Time in s\": 4.854806 }, { \"step\": 17127, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997080632918784, \"F1\": 0.761904761904762, \"Memory in Mb\": 0.0043830871582031, \"Time in s\": 5.948104 }, { \"step\": 19030, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997372569626904, \"F1\": 0.761904761904762, \"Memory in Mb\": 0.0043830871582031, \"Time in s\": 7.14776 }, { \"step\": 20933, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999761142693355, \"F1\": 0.761904761904762, \"Memory in Mb\": 0.0043830871582031, \"Time in s\": 8.453778 }, { \"step\": 22836, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997810474689088, \"F1\": 0.761904761904762, \"Memory in Mb\": 0.0043830871582031, \"Time in s\": 9.865972 }, { \"step\": 24739, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997978899713004, \"F1\": 0.761904761904762, \"Memory in Mb\": 0.0043830871582031, \"Time in s\": 11.384388 }, { \"step\": 26642, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999774791682306, \"F1\": 0.7272727272727273, \"Memory in Mb\": 0.0043830871582031, \"Time in s\": 13.008482 }, { \"step\": 28545, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997898055701524, \"F1\": 0.7272727272727273, \"Memory in Mb\": 0.0043830871582031, \"Time in s\": 14.738381 }, { \"step\": 30448, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999802942722018, \"F1\": 0.7272727272727273, \"Memory in Mb\": 0.0043830871582031, \"Time in s\": 16.574147999999997 }, { \"step\": 32351, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999814534326605, \"F1\": 0.7272727272727273, \"Memory in Mb\": 0.0043830871582031, \"Time in s\": 18.52197 }, { \"step\": 34254, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999824837975127, \"F1\": 0.7272727272727273, \"Memory in Mb\": 0.0043830871582031, \"Time in s\": 20.589921 }, { \"step\": 36157, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998340570290676, \"F1\": 0.7272727272727273, \"Memory in Mb\": 0.0043830871582031, \"Time in s\": 22.77409 }, { \"step\": 38060, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998423541776142, \"F1\": 0.7272727272727273, \"Memory in Mb\": 0.0043830871582031, \"Time in s\": 25.073776 }, { \"step\": 39963, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998498611215374, \"F1\": 0.7272727272727273, \"Memory in Mb\": 0.0043830871582031, \"Time in s\": 27.489115 }, { \"step\": 41866, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999856685616013, \"F1\": 0.7272727272727273, \"Memory in Mb\": 0.0043830871582031, \"Time in s\": 30.019976 }, { \"step\": 43769, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998629166761864, \"F1\": 0.7272727272727273, \"Memory in Mb\": 0.0043830871582031, \"Time in s\": 32.666409 }, { \"step\": 45672, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998686284813452, \"F1\": 0.7272727272727273, \"Memory in Mb\": 0.0043830871582031, \"Time in s\": 35.428179 }, { \"step\": 47575, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998738833420916, \"F1\": 0.7272727272727273, \"Memory in Mb\": 0.0043830871582031, \"Time in s\": 38.305364 }, { \"step\": 49478, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998787339827804, \"F1\": 0.7272727272727273, \"Memory in Mb\": 0.0043830871582031, \"Time in s\": 41.297895 }, { \"step\": 51381, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998443004223352, \"F1\": 0.6666666666666666, \"Memory in Mb\": 0.0043830871582031, \"Time in s\": 44.405542 }, { \"step\": 53284, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998498611215374, \"F1\": 0.6666666666666666, \"Memory in Mb\": 0.0043830871582031, \"Time in s\": 47.6267 }, { \"step\": 55187, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999855038324243, \"F1\": 0.6666666666666666, \"Memory in Mb\": 0.0043830871582031, \"Time in s\": 50.959272 }, { \"step\": 57090, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997022245577158, \"F1\": 0.4848484848484848, \"Memory in Mb\": 0.0043830871582031, \"Time in s\": 54.404277 }, { \"step\": 58993, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997118302171444, \"F1\": 0.4848484848484848, \"Memory in Mb\": 0.0043830871582031, \"Time in s\": 57.960817 }, { \"step\": 60896, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997208355228586, \"F1\": 0.4848484848484848, \"Memory in Mb\": 0.0043830871582031, \"Time in s\": 61.629022 }, { \"step\": 62799, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999697447411583, \"F1\": 0.4571428571428571, \"Memory in Mb\": 0.0043830871582031, \"Time in s\": 65.409349 }, { \"step\": 64702, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997063460171248, \"F1\": 0.4571428571428571, \"Memory in Mb\": 0.0043830871582031, \"Time in s\": 69.30026500000001 }, { \"step\": 66605, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997147361309212, \"F1\": 0.4571428571428571, \"Memory in Mb\": 0.0043830871582031, \"Time in s\": 73.30473800000001 }, { \"step\": 68508, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996934664564724, \"F1\": 0.4324324324324324, \"Memory in Mb\": 0.0043830871582031, \"Time in s\": 77.42011300000001 }, { \"step\": 70411, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999701751146838, \"F1\": 0.4324324324324324, \"Memory in Mb\": 0.0043830871582031, \"Time in s\": 81.64355900000001 }, { \"step\": 72314, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997095998008684, \"F1\": 0.4324324324324324, \"Memory in Mb\": 0.0043830871582031, \"Time in s\": 85.97794200000001 }, { \"step\": 74217, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997170459598204, \"F1\": 0.4324324324324324, \"Memory in Mb\": 0.0043830871582031, \"Time in s\": 90.42391200000002 }, { \"step\": 76120, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999724119810825, \"F1\": 0.4324324324324324, \"Memory in Mb\": 0.0043830871582031, \"Time in s\": 94.979812 }, { \"step\": 78023, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997308485959268, \"F1\": 0.4324324324324324, \"Memory in Mb\": 0.0043830871582031, \"Time in s\": 99.646713 }, { \"step\": 79926, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997372569626904, \"F1\": 0.4324324324324324, \"Memory in Mb\": 0.0043830871582031, \"Time in s\": 104.425102 }, { \"step\": 81829, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997433672658838, \"F1\": 0.4324324324324324, \"Memory in Mb\": 0.0043830871582031, \"Time in s\": 109.315273 }, { \"step\": 83732, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997491998280228, \"F1\": 0.4324324324324324, \"Memory in Mb\": 0.0043830871582031, \"Time in s\": 114.3171 }, { \"step\": 85635, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997547731651778, \"F1\": 0.4324324324324324, \"Memory in Mb\": 0.0043830871582031, \"Time in s\": 119.427298 }, { \"step\": 87538, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999760104183326, \"F1\": 0.4324324324324324, \"Memory in Mb\": 0.0043830871582031, \"Time in s\": 124.64502 }, { \"step\": 89441, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997540277948592, \"F1\": 0.4210526315789474, \"Memory in Mb\": 0.0043830871582031, \"Time in s\": 129.97108 }, { \"step\": 91344, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997591522157996, \"F1\": 0.4210526315789474, \"Memory in Mb\": 0.0043830871582031, \"Time in s\": 135.407078 }, { \"step\": 93247, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997640674767015, \"F1\": 0.4210526315789474, \"Memory in Mb\": 0.0043830871582031, \"Time in s\": 140.95343100000002 }, { \"step\": 95150, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997687861271676, \"F1\": 0.4210526315789474, \"Memory in Mb\": 0.0043830871582031, \"Time in s\": 146.60779900000003 }, { \"step\": 106, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5377358490566038, \"F1\": 0.5242718446601942, \"Memory in Mb\": 0.0028944015502929, \"Time in s\": 0.022931 }, { \"step\": 212, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5330188679245284, \"F1\": 0.5217391304347825, \"Memory in Mb\": 0.0028944015502929, \"Time in s\": 0.057388 }, { \"step\": 318, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5188679245283019, \"F1\": 0.5173501577287066, \"Memory in Mb\": 0.0029211044311523, \"Time in s\": 0.098467 }, { \"step\": 424, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5330188679245284, \"F1\": 0.5330188679245282, \"Memory in Mb\": 0.0029211044311523, \"Time in s\": 0.14786 }, { \"step\": 530, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5207547169811321, \"F1\": 0.5115384615384615, \"Memory in Mb\": 0.0029211044311523, \"Time in s\": 0.2044709999999999 }, { \"step\": 636, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5377358490566038, \"F1\": 0.5303514376996804, \"Memory in Mb\": 0.0029211044311523, \"Time in s\": 0.268 }, { \"step\": 742, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Bananas\", \"Accuracy\": 0.522911051212938, \"F1\": 0.512396694214876, \"Memory in Mb\": 0.0029211044311523, \"Time in s\": 0.339749 }, { \"step\": 848, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5235849056603774, \"F1\": 0.5061124694376529, \"Memory in Mb\": 0.0029211044311523, \"Time in s\": 0.420031 }, { \"step\": 954, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5157232704402516, \"F1\": 0.5, \"Memory in Mb\": 0.0029211044311523, \"Time in s\": 0.5084810000000001 }, { \"step\": 1060, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5160377358490567, \"F1\": 0.4975514201762978, \"Memory in Mb\": 0.0029211044311523, \"Time in s\": 0.605006 }, { \"step\": 1166, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5154373927958834, \"F1\": 0.495985727029438, \"Memory in Mb\": 0.0029211044311523, \"Time in s\": 0.7097530000000001 }, { \"step\": 1272, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5165094339622641, \"F1\": 0.4979591836734694, \"Memory in Mb\": 0.0029211044311523, \"Time in s\": 0.823412 }, { \"step\": 1378, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5195936139332366, \"F1\": 0.4977238239757208, \"Memory in Mb\": 0.0029211044311523, \"Time in s\": 0.94616 }, { \"step\": 1484, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5195417789757413, \"F1\": 0.4968242766407903, \"Memory in Mb\": 0.0029211044311523, \"Time in s\": 1.077952 }, { \"step\": 1590, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5226415094339623, \"F1\": 0.4983476536682089, \"Memory in Mb\": 0.0029211044311523, \"Time in s\": 1.218949 }, { \"step\": 1696, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5194575471698113, \"F1\": 0.4947303161810291, \"Memory in Mb\": 0.0029211044311523, \"Time in s\": 1.368908 }, { \"step\": 1802, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5205327413984462, \"F1\": 0.4965034965034965, \"Memory in Mb\": 0.0029211044311523, \"Time in s\": 1.527947 }, { \"step\": 1908, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5193920335429769, \"F1\": 0.4964305326743548, \"Memory in Mb\": 0.0029211044311523, \"Time in s\": 1.696168 }, { \"step\": 2014, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Bananas\", \"Accuracy\": 0.519364448857994, \"F1\": 0.4989648033126293, \"Memory in Mb\": 0.0029211044311523, \"Time in s\": 1.873583 }, { \"step\": 2120, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5174528301886793, \"F1\": 0.4997555012224939, \"Memory in Mb\": 0.0029211044311523, \"Time in s\": 2.060039 }, { \"step\": 2226, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5197663971248877, \"F1\": 0.5002337540906966, \"Memory in Mb\": 0.0029211044311523, \"Time in s\": 2.255465 }, { \"step\": 2332, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5175814751286449, \"F1\": 0.4975435462259938, \"Memory in Mb\": 0.0029211044311523, \"Time in s\": 2.460073 }, { \"step\": 2438, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5176374077112387, \"F1\": 0.4957118353344769, \"Memory in Mb\": 0.0029211044311523, \"Time in s\": 2.67358 }, { \"step\": 2544, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5196540880503144, \"F1\": 0.5008169934640523, \"Memory in Mb\": 0.0029211044311523, \"Time in s\": 2.896329 }, { \"step\": 2650, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Bananas\", \"Accuracy\": 0.520377358490566, \"F1\": 0.5037094884810621, \"Memory in Mb\": 0.0029211044311523, \"Time in s\": 3.127217 }, { \"step\": 2756, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Bananas\", \"Accuracy\": 0.521044992743106, \"F1\": 0.5041322314049587, \"Memory in Mb\": 0.0029211044311523, \"Time in s\": 3.366825 }, { \"step\": 2862, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5213137665967854, \"F1\": 0.5032632342277013, \"Memory in Mb\": 0.0029211044311523, \"Time in s\": 3.615642 }, { \"step\": 2968, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5175202156334232, \"F1\": 0.4985994397759103, \"Memory in Mb\": 0.0029211044311523, \"Time in s\": 3.87375 }, { \"step\": 3074, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5152895250487963, \"F1\": 0.4969615124915597, \"Memory in Mb\": 0.0029211044311523, \"Time in s\": 4.141236999999999 }, { \"step\": 3180, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5132075471698113, \"F1\": 0.4931237721021611, \"Memory in Mb\": 0.0029211044311523, \"Time in s\": 4.418262 }, { \"step\": 3286, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5130858186244674, \"F1\": 0.4927076727964489, \"Memory in Mb\": 0.0029211044311523, \"Time in s\": 4.703974 }, { \"step\": 3392, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5103183962264151, \"F1\": 0.490959239963224, \"Memory in Mb\": 0.0029211044311523, \"Time in s\": 4.999187999999999 }, { \"step\": 3498, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5091480846197828, \"F1\": 0.4891401368640284, \"Memory in Mb\": 0.0029211044311523, \"Time in s\": 5.303419999999999 }, { \"step\": 3604, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5097114317425083, \"F1\": 0.4876775877065816, \"Memory in Mb\": 0.0029211044311523, \"Time in s\": 5.616715999999999 }, { \"step\": 3710, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5118598382749326, \"F1\": 0.4908630868709586, \"Memory in Mb\": 0.0029211044311523, \"Time in s\": 5.938947999999999 }, { \"step\": 3816, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Bananas\", \"Accuracy\": 0.510482180293501, \"F1\": 0.4893384363039912, \"Memory in Mb\": 0.0029211044311523, \"Time in s\": 6.270639999999999 }, { \"step\": 3922, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Bananas\", \"Accuracy\": 0.50790413054564, \"F1\": 0.485881726158764, \"Memory in Mb\": 0.0029211044311523, \"Time in s\": 6.611250999999999 }, { \"step\": 4028, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Bananas\", \"Accuracy\": 0.506454816285998, \"F1\": 0.4844398340248962, \"Memory in Mb\": 0.0029211044311523, \"Time in s\": 6.960905 }, { \"step\": 4134, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5050798258345428, \"F1\": 0.4828109201213346, \"Memory in Mb\": 0.0029211044311523, \"Time in s\": 7.319711 }, { \"step\": 4240, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5068396226415094, \"F1\": 0.4848484848484848, \"Memory in Mb\": 0.0029211044311523, \"Time in s\": 7.686943999999999 }, { \"step\": 4346, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5080533824206167, \"F1\": 0.4858104858104858, \"Memory in Mb\": 0.0029211044311523, \"Time in s\": 8.062541999999999 }, { \"step\": 4452, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5080862533692723, \"F1\": 0.4847058823529412, \"Memory in Mb\": 0.0029211044311523, \"Time in s\": 8.446741 }, { \"step\": 4558, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5063624396665204, \"F1\": 0.4837081229921982, \"Memory in Mb\": 0.0029211044311523, \"Time in s\": 8.840034999999999 }, { \"step\": 4664, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5051457975986278, \"F1\": 0.4829749103942652, \"Memory in Mb\": 0.0029211044311523, \"Time in s\": 9.242387 }, { \"step\": 4770, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5048218029350104, \"F1\": 0.482017543859649, \"Memory in Mb\": 0.0029211044311523, \"Time in s\": 9.653845 }, { \"step\": 4876, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5036915504511895, \"F1\": 0.4802405498281787, \"Memory in Mb\": 0.0029211044311523, \"Time in s\": 10.074018 }, { \"step\": 4982, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5038137294259334, \"F1\": 0.4811083123425693, \"Memory in Mb\": 0.0029211044311523, \"Time in s\": 10.503025 }, { \"step\": 5088, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5029481132075472, \"F1\": 0.4799506477483035, \"Memory in Mb\": 0.0029211044311523, \"Time in s\": 10.941391 }, { \"step\": 5194, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5040431266846361, \"F1\": 0.4810636583400483, \"Memory in Mb\": 0.0029211044311523, \"Time in s\": 11.388345 }, { \"step\": 5300, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5064150943396226, \"F1\": 0.4825949367088608, \"Memory in Mb\": 0.0029211044311523, \"Time in s\": 11.844057 }, { \"step\": 906, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9072847682119204, \"F1\": 0.90561797752809, \"Memory in Mb\": 0.0043582916259765, \"Time in s\": 0.085634 }, { \"step\": 1812, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9166666666666666, \"F1\": 0.8967874231032126, \"Memory in Mb\": 0.0043582916259765, \"Time in s\": 0.273724 }, { \"step\": 2718, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9175864606328182, \"F1\": 0.898458748866727, \"Memory in Mb\": 0.0043582916259765, \"Time in s\": 0.563875 }, { \"step\": 3624, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9268763796909492, \"F1\": 0.9098945936756204, \"Memory in Mb\": 0.0043582916259765, \"Time in s\": 0.954455 }, { \"step\": 4530, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9271523178807948, \"F1\": 0.9076664801343034, \"Memory in Mb\": 0.0043582916259765, \"Time in s\": 1.4444780000000002 }, { \"step\": 5436, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9269683590875644, \"F1\": 0.907437631149452, \"Memory in Mb\": 0.0043582916259765, \"Time in s\": 2.035788 }, { \"step\": 6342, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9274676758120468, \"F1\": 0.9089108910891088, \"Memory in Mb\": 0.0043582916259765, \"Time in s\": 2.724631 }, { \"step\": 7248, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9253587196467992, \"F1\": 0.9064499394777796, \"Memory in Mb\": 0.0043582916259765, \"Time in s\": 3.51195 }, { \"step\": 8154, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9250674515575178, \"F1\": 0.9098687121994394, \"Memory in Mb\": 0.0043582916259765, \"Time in s\": 4.397951 }, { \"step\": 9060, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9264900662251656, \"F1\": 0.9133714880332986, \"Memory in Mb\": 0.0043582916259765, \"Time in s\": 5.380316 }, { \"step\": 9966, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9292594822396149, \"F1\": 0.9181279758448496, \"Memory in Mb\": 0.0043582916259765, \"Time in s\": 6.461929 }, { \"step\": 10872, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9312913907284768, \"F1\": 0.9216077237905342, \"Memory in Mb\": 0.0043582916259765, \"Time in s\": 7.640261 }, { \"step\": 11778, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9313126167430804, \"F1\": 0.9217525872908404, \"Memory in Mb\": 0.0043582916259765, \"Time in s\": 8.918386 }, { \"step\": 12684, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9289656259854936, \"F1\": 0.9190694332165634, \"Memory in Mb\": 0.0043582916259765, \"Time in s\": 10.293447 }, { \"step\": 13590, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9297277409860192, \"F1\": 0.9208978712830284, \"Memory in Mb\": 0.0043582916259765, \"Time in s\": 11.76578 }, { \"step\": 14496, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9304635761589404, \"F1\": 0.9221381121581956, \"Memory in Mb\": 0.0043582916259765, \"Time in s\": 13.33625 }, { \"step\": 15402, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9307882093234644, \"F1\": 0.922201138519924, \"Memory in Mb\": 0.0043582916259765, \"Time in s\": 15.00584 }, { \"step\": 16308, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9292985038018152, \"F1\": 0.9202572792032644, \"Memory in Mb\": 0.0043582916259765, \"Time in s\": 16.773224 }, { \"step\": 17214, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Elec2\", \"Accuracy\": 0.927907517137214, \"F1\": 0.9175579618680662, \"Memory in Mb\": 0.0043582916259765, \"Time in s\": 18.640941 }, { \"step\": 18120, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9265452538631348, \"F1\": 0.915945689927376, \"Memory in Mb\": 0.0043582916259765, \"Time in s\": 20.621169 }, { \"step\": 19026, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9265216020182908, \"F1\": 0.9150771473697, \"Memory in Mb\": 0.0043582916259765, \"Time in s\": 22.710024 }, { \"step\": 19932, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9262492474413004, \"F1\": 0.915526950925181, \"Memory in Mb\": 0.0043582916259765, \"Time in s\": 24.907693 }, { \"step\": 20838, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9231692100969384, \"F1\": 0.9122114382848056, \"Memory in Mb\": 0.0043582916259765, \"Time in s\": 27.214713 }, { \"step\": 21744, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9224613686534217, \"F1\": 0.910137511992325, \"Memory in Mb\": 0.0043582916259765, \"Time in s\": 29.631112 }, { \"step\": 22650, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9216777041942604, \"F1\": 0.9086978898610396, \"Memory in Mb\": 0.0043582916259765, \"Time in s\": 32.155455 }, { \"step\": 23556, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9186194600101883, \"F1\": 0.9050096625538872, \"Memory in Mb\": 0.0043582916259765, \"Time in s\": 34.791879 }, { \"step\": 24462, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9172594227781866, \"F1\": 0.9028324531925108, \"Memory in Mb\": 0.0043582916259765, \"Time in s\": 37.534909 }, { \"step\": 25368, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9144591611479028, \"F1\": 0.8997134670487106, \"Memory in Mb\": 0.0043582916259765, \"Time in s\": 40.38939 }, { \"step\": 26274, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9142117682880414, \"F1\": 0.899213020926489, \"Memory in Mb\": 0.0043582916259765, \"Time in s\": 43.350156 }, { \"step\": 27180, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9137969094922738, \"F1\": 0.8990477831875565, \"Memory in Mb\": 0.0043582916259765, \"Time in s\": 46.420679 }, { \"step\": 28086, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9109876806950082, \"F1\": 0.8954587271054613, \"Memory in Mb\": 0.0043582916259765, \"Time in s\": 49.601356 }, { \"step\": 28992, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9101131346578366, \"F1\": 0.8940478126524638, \"Memory in Mb\": 0.0043582916259765, \"Time in s\": 52.888972 }, { \"step\": 29898, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9094588266773698, \"F1\": 0.8931264558411306, \"Memory in Mb\": 0.0043582916259765, \"Time in s\": 56.282618 }, { \"step\": 30804, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9082911310219453, \"F1\": 0.8912666948924213, \"Memory in Mb\": 0.0043582916259765, \"Time in s\": 59.78184 }, { \"step\": 31710, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9061810154525386, \"F1\": 0.8888307611823175, \"Memory in Mb\": 0.0043582916259765, \"Time in s\": 63.389233 }, { \"step\": 32616, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9052612214863872, \"F1\": 0.8878891227051738, \"Memory in Mb\": 0.0043582916259765, \"Time in s\": 67.10073700000001 }, { \"step\": 33522, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9050176003818388, \"F1\": 0.887665819926616, \"Memory in Mb\": 0.0043582916259765, \"Time in s\": 70.91761400000001 }, { \"step\": 34428, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9050482165679098, \"F1\": 0.8877519486316657, \"Memory in Mb\": 0.0043582916259765, \"Time in s\": 74.83635300000002 }, { \"step\": 35334, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9045112356370636, \"F1\": 0.8865729846029718, \"Memory in Mb\": 0.0043582916259765, \"Time in s\": 78.86287600000001 }, { \"step\": 36240, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9047737306843268, \"F1\": 0.8860115606936415, \"Memory in Mb\": 0.0043582916259765, \"Time in s\": 82.99223100000002 }, { \"step\": 37146, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9044850051149518, \"F1\": 0.8853857087479002, \"Memory in Mb\": 0.0043582916259765, \"Time in s\": 87.22379500000002 }, { \"step\": 38052, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9042363082098182, \"F1\": 0.884573962622743, \"Memory in Mb\": 0.0043582916259765, \"Time in s\": 91.56013600000004 }, { \"step\": 38958, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9043842086349402, \"F1\": 0.8849918182098861, \"Memory in Mb\": 0.0043582916259765, \"Time in s\": 96.00151600000002 }, { \"step\": 39864, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Elec2\", \"Accuracy\": 0.904901665663255, \"F1\": 0.8863302449701658, \"Memory in Mb\": 0.0043582916259765, \"Time in s\": 100.54490000000004 }, { \"step\": 40770, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Elec2\", \"Accuracy\": 0.905494235957812, \"F1\": 0.8878344153008646, \"Memory in Mb\": 0.0043582916259765, \"Time in s\": 105.19249400000002 }, { \"step\": 41676, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9060850369517228, \"F1\": 0.8891971464160343, \"Memory in Mb\": 0.0043582916259765, \"Time in s\": 109.94207100000004 }, { \"step\": 42582, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9063688882626462, \"F1\": 0.8897796699195533, \"Memory in Mb\": 0.0043582916259765, \"Time in s\": 114.79624900000005 }, { \"step\": 43488, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Elec2\", \"Accuracy\": 0.906686902133922, \"F1\": 0.8902234485743656, \"Memory in Mb\": 0.0043582916259765, \"Time in s\": 119.75466700000004 }, { \"step\": 44394, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9062485921520926, \"F1\": 0.8894437656059077, \"Memory in Mb\": 0.0043582916259765, \"Time in s\": 124.81683700000002 }, { \"step\": 45300, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Elec2\", \"Accuracy\": 0.906401766004415, \"F1\": 0.8897555902236091, \"Memory in Mb\": 0.0043582916259765, \"Time in s\": 129.97946500000003 }, { \"step\": 25, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Phishing\", \"Accuracy\": 0.56, \"F1\": 0.5217391304347826, \"Memory in Mb\": 0.0043668746948242, \"Time in s\": 0.006962 }, { \"step\": 50, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Phishing\", \"Accuracy\": 0.7, \"F1\": 0.6341463414634146, \"Memory in Mb\": 0.0043668746948242, \"Time in s\": 0.020485 }, { \"step\": 75, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Phishing\", \"Accuracy\": 0.72, \"F1\": 0.6956521739130435, \"Memory in Mb\": 0.0043668746948242, \"Time in s\": 0.040554 }, { \"step\": 100, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Phishing\", \"Accuracy\": 0.73, \"F1\": 0.7157894736842104, \"Memory in Mb\": 0.0043668746948242, \"Time in s\": 0.063947 }, { \"step\": 125, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Phishing\", \"Accuracy\": 0.728, \"F1\": 0.7166666666666666, \"Memory in Mb\": 0.0043668746948242, \"Time in s\": 0.089789 }, { \"step\": 150, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Phishing\", \"Accuracy\": 0.72, \"F1\": 0.7272727272727273, \"Memory in Mb\": 0.0043668746948242, \"Time in s\": 0.118115 }, { \"step\": 175, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Phishing\", \"Accuracy\": 0.7371428571428571, \"F1\": 0.7261904761904763, \"Memory in Mb\": 0.0043668746948242, \"Time in s\": 0.148737 }, { \"step\": 200, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Phishing\", \"Accuracy\": 0.74, \"F1\": 0.7291666666666666, \"Memory in Mb\": 0.0043668746948242, \"Time in s\": 0.181823 }, { \"step\": 225, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Phishing\", \"Accuracy\": 0.7288888888888889, \"F1\": 0.7081339712918661, \"Memory in Mb\": 0.0043668746948242, \"Time in s\": 0.217433 }, { \"step\": 250, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Phishing\", \"Accuracy\": 0.728, \"F1\": 0.7094017094017095, \"Memory in Mb\": 0.0043668746948242, \"Time in s\": 0.255636 }, { \"step\": 275, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Phishing\", \"Accuracy\": 0.7381818181818182, \"F1\": 0.7187499999999999, \"Memory in Mb\": 0.0045804977416992, \"Time in s\": 0.296124 }, { \"step\": 300, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Phishing\", \"Accuracy\": 0.74, \"F1\": 0.717391304347826, \"Memory in Mb\": 0.0045804977416992, \"Time in s\": 0.339387 }, { \"step\": 325, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Phishing\", \"Accuracy\": 0.7507692307692307, \"F1\": 0.7216494845360825, \"Memory in Mb\": 0.0045804977416992, \"Time in s\": 0.3849 }, { \"step\": 350, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Phishing\", \"Accuracy\": 0.7571428571428571, \"F1\": 0.7266881028938907, \"Memory in Mb\": 0.0045804977416992, \"Time in s\": 0.432909 }, { \"step\": 375, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Phishing\", \"Accuracy\": 0.76, \"F1\": 0.7272727272727273, \"Memory in Mb\": 0.0045804977416992, \"Time in s\": 0.483666 }, { \"step\": 400, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Phishing\", \"Accuracy\": 0.7625, \"F1\": 0.7293447293447294, \"Memory in Mb\": 0.0045804977416992, \"Time in s\": 0.537154 }, { \"step\": 425, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Phishing\", \"Accuracy\": 0.7623529411764706, \"F1\": 0.7232876712328767, \"Memory in Mb\": 0.0045804977416992, \"Time in s\": 0.593599 }, { \"step\": 450, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Phishing\", \"Accuracy\": 0.7644444444444445, \"F1\": 0.7239583333333334, \"Memory in Mb\": 0.0045804977416992, \"Time in s\": 0.65326 }, { \"step\": 475, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Phishing\", \"Accuracy\": 0.7684210526315789, \"F1\": 0.7303921568627451, \"Memory in Mb\": 0.0045804977416992, \"Time in s\": 0.7162679999999999 }, { \"step\": 500, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Phishing\", \"Accuracy\": 0.77, \"F1\": 0.735632183908046, \"Memory in Mb\": 0.0045804977416992, \"Time in s\": 0.7828129999999999 }, { \"step\": 525, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Phishing\", \"Accuracy\": 0.7733333333333333, \"F1\": 0.7349665924276169, \"Memory in Mb\": 0.0045804977416992, \"Time in s\": 0.8526399999999998 }, { \"step\": 550, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Phishing\", \"Accuracy\": 0.7727272727272727, \"F1\": 0.7368421052631579, \"Memory in Mb\": 0.0045804977416992, \"Time in s\": 0.92582 }, { \"step\": 575, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Phishing\", \"Accuracy\": 0.7756521739130435, \"F1\": 0.7404426559356138, \"Memory in Mb\": 0.0045804977416992, \"Time in s\": 1.00229 }, { \"step\": 600, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Phishing\", \"Accuracy\": 0.7816666666666666, \"F1\": 0.7426326129666011, \"Memory in Mb\": 0.0045804977416992, \"Time in s\": 1.08201 }, { \"step\": 625, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Phishing\", \"Accuracy\": 0.776, \"F1\": 0.7338403041825096, \"Memory in Mb\": 0.0045804977416992, \"Time in s\": 1.165182 }, { \"step\": 650, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Phishing\", \"Accuracy\": 0.7830769230769231, \"F1\": 0.7450271247739602, \"Memory in Mb\": 0.0045804977416992, \"Time in s\": 1.251548 }, { \"step\": 675, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Phishing\", \"Accuracy\": 0.7851851851851852, \"F1\": 0.7521367521367521, \"Memory in Mb\": 0.0045804977416992, \"Time in s\": 1.34123 }, { \"step\": 700, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Phishing\", \"Accuracy\": 0.7914285714285715, \"F1\": 0.7566666666666668, \"Memory in Mb\": 0.0045804977416992, \"Time in s\": 1.434087 }, { \"step\": 725, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Phishing\", \"Accuracy\": 0.7931034482758621, \"F1\": 0.7626582278481012, \"Memory in Mb\": 0.0045804977416992, \"Time in s\": 1.530377 }, { \"step\": 750, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Phishing\", \"Accuracy\": 0.7933333333333333, \"F1\": 0.7640791476407914, \"Memory in Mb\": 0.0045804977416992, \"Time in s\": 1.6300339999999998 }, { \"step\": 775, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Phishing\", \"Accuracy\": 0.7935483870967742, \"F1\": 0.7633136094674556, \"Memory in Mb\": 0.0045804977416992, \"Time in s\": 1.7333069999999997 }, { \"step\": 800, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Phishing\", \"Accuracy\": 0.79625, \"F1\": 0.7687943262411348, \"Memory in Mb\": 0.0045804977416992, \"Time in s\": 1.840083 }, { \"step\": 825, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Phishing\", \"Accuracy\": 0.7951515151515152, \"F1\": 0.7688098495212038, \"Memory in Mb\": 0.0045804977416992, \"Time in s\": 1.950315 }, { \"step\": 850, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Phishing\", \"Accuracy\": 0.7988235294117647, \"F1\": 0.7723035952063916, \"Memory in Mb\": 0.0045804977416992, \"Time in s\": 2.063938 }, { \"step\": 875, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8034285714285714, \"F1\": 0.7760416666666667, \"Memory in Mb\": 0.0045804977416992, \"Time in s\": 2.180811 }, { \"step\": 900, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8022222222222222, \"F1\": 0.7752525252525253, \"Memory in Mb\": 0.0045804977416992, \"Time in s\": 2.30125 }, { \"step\": 925, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8064864864864865, \"F1\": 0.781973203410475, \"Memory in Mb\": 0.0045804977416992, \"Time in s\": 2.424984 }, { \"step\": 950, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8084210526315789, \"F1\": 0.7863849765258215, \"Memory in Mb\": 0.0045804977416992, \"Time in s\": 2.552135 }, { \"step\": 975, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8112820512820513, \"F1\": 0.7894736842105262, \"Memory in Mb\": 0.0045804977416992, \"Time in s\": 2.682703 }, { \"step\": 1000, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Phishing\", \"Accuracy\": 0.812, \"F1\": 0.7906458797327395, \"Memory in Mb\": 0.0045804977416992, \"Time in s\": 2.8167349999999995 }, { \"step\": 1025, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8156097560975609, \"F1\": 0.7956756756756757, \"Memory in Mb\": 0.0045804977416992, \"Time in s\": 2.95405 }, { \"step\": 1050, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8171428571428572, \"F1\": 0.7983193277310925, \"Memory in Mb\": 0.0045804977416992, \"Time in s\": 3.094733 }, { \"step\": 1075, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8167441860465117, \"F1\": 0.7995930824008138, \"Memory in Mb\": 0.0045804977416992, \"Time in s\": 3.238751 }, { \"step\": 1100, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Phishing\", \"Accuracy\": 0.82, \"F1\": 0.8035714285714286, \"Memory in Mb\": 0.0045804977416992, \"Time in s\": 3.3859479999999995 }, { \"step\": 1125, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8222222222222222, \"F1\": 0.8073217726396917, \"Memory in Mb\": 0.0045804977416992, \"Time in s\": 3.536398 }, { \"step\": 1150, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8234782608695652, \"F1\": 0.8083097261567517, \"Memory in Mb\": 0.0045804977416992, \"Time in s\": 3.690036 }, { \"step\": 1175, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8221276595744681, \"F1\": 0.8070175438596491, \"Memory in Mb\": 0.0045804977416992, \"Time in s\": 3.847107 }, { \"step\": 1200, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8241666666666667, \"F1\": 0.8087035358114234, \"Memory in Mb\": 0.0045804977416992, \"Time in s\": 4.007475 }, { \"step\": 1225, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8253061224489796, \"F1\": 0.8099467140319715, \"Memory in Mb\": 0.0045804977416992, \"Time in s\": 4.171155 }, { \"step\": 1250, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8264, \"F1\": 0.8117953165654813, \"Memory in Mb\": 0.0045804977416992, \"Time in s\": 4.338235999999999 }, { \"step\": 1903, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"SMTP\", \"Accuracy\": 0.720966894377299, \"F1\": 0.0, \"Memory in Mb\": 0.0030937194824218, \"Time in s\": 0.171046 }, { \"step\": 3806, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"SMTP\", \"Accuracy\": 0.7769311613242249, \"F1\": 0.0, \"Memory in Mb\": 0.0030937194824218, \"Time in s\": 0.510929 }, { \"step\": 5709, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"SMTP\", \"Accuracy\": 0.7509196006305833, \"F1\": 0.0, \"Memory in Mb\": 0.0030937194824218, \"Time in s\": 1.019887 }, { \"step\": 7612, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"SMTP\", \"Accuracy\": 0.7900683131897005, \"F1\": 0.0, \"Memory in Mb\": 0.0030937194824218, \"Time in s\": 1.685465 }, { \"step\": 9515, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"SMTP\", \"Accuracy\": 0.7826589595375723, \"F1\": 0.0, \"Memory in Mb\": 0.0030937194824218, \"Time in s\": 2.527149 }, { \"step\": 11418, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"SMTP\", \"Accuracy\": 0.7699246803293046, \"F1\": 0.0, \"Memory in Mb\": 0.0030937194824218, \"Time in s\": 3.545039 }, { \"step\": 13321, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"SMTP\", \"Accuracy\": 0.7722393213722694, \"F1\": 0.0, \"Memory in Mb\": 0.0030937194824218, \"Time in s\": 4.73657 }, { \"step\": 15224, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"SMTP\", \"Accuracy\": 0.7791644771413557, \"F1\": 0.0041469194312796, \"Memory in Mb\": 0.0030937194824218, \"Time in s\": 6.098207 }, { \"step\": 17127, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"SMTP\", \"Accuracy\": 0.783207800548841, \"F1\": 0.004824443848834, \"Memory in Mb\": 0.0030937194824218, \"Time in s\": 7.63166 }, { \"step\": 19030, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"SMTP\", \"Accuracy\": 0.7891224382553862, \"F1\": 0.0044653932026792, \"Memory in Mb\": 0.0030937194824218, \"Time in s\": 9.334478 }, { \"step\": 20933, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"SMTP\", \"Accuracy\": 0.7832131084889887, \"F1\": 0.0039508340649692, \"Memory in Mb\": 0.0030937194824218, \"Time in s\": 11.212632 }, { \"step\": 22836, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"SMTP\", \"Accuracy\": 0.7821422315641969, \"F1\": 0.0036050470658922, \"Memory in Mb\": 0.0030937194824218, \"Time in s\": 13.266531 }, { \"step\": 24739, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"SMTP\", \"Accuracy\": 0.7877440478596548, \"F1\": 0.0034162080091098, \"Memory in Mb\": 0.0030937194824218, \"Time in s\": 15.48839 }, { \"step\": 26642, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"SMTP\", \"Accuracy\": 0.78188574431349, \"F1\": 0.0034299434059338, \"Memory in Mb\": 0.0030937194824218, \"Time in s\": 17.885128 }, { \"step\": 28545, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"SMTP\", \"Accuracy\": 0.7857418111753371, \"F1\": 0.0032594524119947, \"Memory in Mb\": 0.0030937194824218, \"Time in s\": 20.449795 }, { \"step\": 30448, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"SMTP\", \"Accuracy\": 0.7871452968996322, \"F1\": 0.0030764497769573, \"Memory in Mb\": 0.0030937194824218, \"Time in s\": 23.184561 }, { \"step\": 32351, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"SMTP\", \"Accuracy\": 0.7866835646502427, \"F1\": 0.0028897558156335, \"Memory in Mb\": 0.0030937194824218, \"Time in s\": 26.090132000000004 }, { \"step\": 34254, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"SMTP\", \"Accuracy\": 0.7860979739592456, \"F1\": 0.002722199537226, \"Memory in Mb\": 0.0030937194824218, \"Time in s\": 29.168944000000003 }, { \"step\": 36157, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"SMTP\", \"Accuracy\": 0.7771939043615345, \"F1\": 0.0024764735017335, \"Memory in Mb\": 0.0030937194824218, \"Time in s\": 32.428274 }, { \"step\": 38060, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"SMTP\", \"Accuracy\": 0.7831581713084603, \"F1\": 0.0024175027196905, \"Memory in Mb\": 0.0030937194824218, \"Time in s\": 35.852824 }, { \"step\": 39963, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"SMTP\", \"Accuracy\": 0.779496033831294, \"F1\": 0.0022644927536231, \"Memory in Mb\": 0.0030937194824218, \"Time in s\": 39.452307 }, { \"step\": 41866, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"SMTP\", \"Accuracy\": 0.7831175655663307, \"F1\": 0.0021978021978021, \"Memory in Mb\": 0.0030937194824218, \"Time in s\": 43.218835 }, { \"step\": 43769, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"SMTP\", \"Accuracy\": 0.7791130708949257, \"F1\": 0.0020644095788604, \"Memory in Mb\": 0.0030937194824218, \"Time in s\": 47.160889 }, { \"step\": 45672, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"SMTP\", \"Accuracy\": 0.7808066211245402, \"F1\": 0.0019938191606021, \"Memory in Mb\": 0.0030937194824218, \"Time in s\": 51.272051 }, { \"step\": 47575, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"SMTP\", \"Accuracy\": 0.7799684708355229, \"F1\": 0.001906941266209, \"Memory in Mb\": 0.0030937194824218, \"Time in s\": 55.555832 }, { \"step\": 49478, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"SMTP\", \"Accuracy\": 0.7778810784591131, \"F1\": 0.0018165304268846, \"Memory in Mb\": 0.0030937194824218, \"Time in s\": 60.01382 }, { \"step\": 51381, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"SMTP\", \"Accuracy\": 0.7807944570950351, \"F1\": 0.0021263400372109, \"Memory in Mb\": 0.0030937194824218, \"Time in s\": 64.63894599999999 }, { \"step\": 53284, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"SMTP\", \"Accuracy\": 0.7777193904361535, \"F1\": 0.0020222446916076, \"Memory in Mb\": 0.0030937194824218, \"Time in s\": 69.43803199999999 }, { \"step\": 55187, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"SMTP\", \"Accuracy\": 0.7785891604906953, \"F1\": 0.0019603038470963, \"Memory in Mb\": 0.0030937194824218, \"Time in s\": 74.40731799999999 }, { \"step\": 57090, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"SMTP\", \"Accuracy\": 0.7758801891749869, \"F1\": 0.0026502455374542, \"Memory in Mb\": 0.0030937194824218, \"Time in s\": 79.551965 }, { \"step\": 58993, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"SMTP\", \"Accuracy\": 0.774159646059702, \"F1\": 0.0025454817698585, \"Memory in Mb\": 0.0030937194824218, \"Time in s\": 84.828086 }, { \"step\": 60896, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"SMTP\", \"Accuracy\": 0.7746157383079348, \"F1\": 0.0024711098190275, \"Memory in Mb\": 0.0030937194824218, \"Time in s\": 90.195459 }, { \"step\": 62799, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"SMTP\", \"Accuracy\": 0.7704899759550311, \"F1\": 0.0023534297778085, \"Memory in Mb\": 0.0030937194824218, \"Time in s\": 95.659582 }, { \"step\": 64702, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"SMTP\", \"Accuracy\": 0.771274458285679, \"F1\": 0.002292186341266, \"Memory in Mb\": 0.0030937194824218, \"Time in s\": 101.21574 }, { \"step\": 66605, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"SMTP\", \"Accuracy\": 0.7721942797087306, \"F1\": 0.0022358124547905, \"Memory in Mb\": 0.0030937194824218, \"Time in s\": 106.861577 }, { \"step\": 68508, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"SMTP\", \"Accuracy\": 0.7705085537455479, \"F1\": 0.0024111675126903, \"Memory in Mb\": 0.0030937194824218, \"Time in s\": 112.596175 }, { \"step\": 70411, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"SMTP\", \"Accuracy\": 0.7685872945988553, \"F1\": 0.0023267205486162, \"Memory in Mb\": 0.0030937194824218, \"Time in s\": 118.42066 }, { \"step\": 72314, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"SMTP\", \"Accuracy\": 0.7687999557485411, \"F1\": 0.0022677090171271, \"Memory in Mb\": 0.0030937194824218, \"Time in s\": 124.335619 }, { \"step\": 74217, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"SMTP\", \"Accuracy\": 0.7657140547313958, \"F1\": 0.0021806496040399, \"Memory in Mb\": 0.0030937194824218, \"Time in s\": 130.34374499999998 }, { \"step\": 76120, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"SMTP\", \"Accuracy\": 0.7665002627430373, \"F1\": 0.0021333932180552, \"Memory in Mb\": 0.0030937194824218, \"Time in s\": 136.44014199999998 }, { \"step\": 78023, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"SMTP\", \"Accuracy\": 0.7657101111210797, \"F1\": 0.0020744622775412, \"Memory in Mb\": 0.0030937194824218, \"Time in s\": 142.623664 }, { \"step\": 79926, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"SMTP\", \"Accuracy\": 0.7636313590070816, \"F1\": 0.0020073956682514, \"Memory in Mb\": 0.0030937194824218, \"Time in s\": 148.896051 }, { \"step\": 81829, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"SMTP\", \"Accuracy\": 0.7647777682728617, \"F1\": 0.0019703411801306, \"Memory in Mb\": 0.0030937194824218, \"Time in s\": 155.253168 }, { \"step\": 83732, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"SMTP\", \"Accuracy\": 0.7652868676252806, \"F1\": 0.0019298156518206, \"Memory in Mb\": 0.0030937194824218, \"Time in s\": 161.69571599999998 }, { \"step\": 85635, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"SMTP\", \"Accuracy\": 0.7642552694575816, \"F1\": 0.0018787699001285, \"Memory in Mb\": 0.0030937194824218, \"Time in s\": 168.224788 }, { \"step\": 87538, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"SMTP\", \"Accuracy\": 0.7644680024674998, \"F1\": 0.001839659178931, \"Memory in Mb\": 0.0030937194824218, \"Time in s\": 174.840187 }, { \"step\": 89441, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"SMTP\", \"Accuracy\": 0.7635312664214399, \"F1\": 0.0018876828692779, \"Memory in Mb\": 0.0030937194824218, \"Time in s\": 181.540983 }, { \"step\": 91344, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"SMTP\", \"Accuracy\": 0.7650091960063058, \"F1\": 0.0018600325505696, \"Memory in Mb\": 0.0030937194824218, \"Time in s\": 188.324265 }, { \"step\": 93247, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"SMTP\", \"Accuracy\": 0.7647859984771628, \"F1\": 0.001820415965048, \"Memory in Mb\": 0.0030937194824218, \"Time in s\": 195.191916 }, { \"step\": 95150, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"SMTP\", \"Accuracy\": 0.7649710982658959, \"F1\": 0.0017854751595768, \"Memory in Mb\": 0.0030937194824218, \"Time in s\": 202.143655 }, { \"step\": 106, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5377358490566038, \"F1\": 0.4731182795698925, \"Memory in Mb\": 0.0054683685302734, \"Time in s\": 0.075077 }, { \"step\": 212, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5424528301886793, \"F1\": 0.4699453551912568, \"Memory in Mb\": 0.0054683685302734, \"Time in s\": 0.21882 }, { \"step\": 318, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5377358490566038, \"F1\": 0.4878048780487805, \"Memory in Mb\": 0.0054950714111328, \"Time in s\": 0.428712 }, { \"step\": 424, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5212264150943396, \"F1\": 0.4671916010498687, \"Memory in Mb\": 0.0054950714111328, \"Time in s\": 0.704487 }, { \"step\": 530, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5283018867924528, \"F1\": 0.4266055045871559, \"Memory in Mb\": 0.0054950714111328, \"Time in s\": 1.047006 }, { \"step\": 636, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5251572327044025, \"F1\": 0.388663967611336, \"Memory in Mb\": 0.0054950714111328, \"Time in s\": 1.456112 }, { \"step\": 742, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5377358490566038, \"F1\": 0.3636363636363636, \"Memory in Mb\": 0.0054950714111328, \"Time in s\": 1.9325 }, { \"step\": 848, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5412735849056604, \"F1\": 0.3395585738539897, \"Memory in Mb\": 0.0054950714111328, \"Time in s\": 2.475946 }, { \"step\": 954, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5450733752620545, \"F1\": 0.3154574132492113, \"Memory in Mb\": 0.0054950714111328, \"Time in s\": 3.086975 }, { \"step\": 1060, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5528301886792453, \"F1\": 0.2967359050445103, \"Memory in Mb\": 0.0054950714111328, \"Time in s\": 3.764565 }, { \"step\": 1166, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5531732418524872, \"F1\": 0.2793914246196404, \"Memory in Mb\": 0.0054950714111328, \"Time in s\": 4.50753 }, { \"step\": 1272, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5550314465408805, \"F1\": 0.2762148337595907, \"Memory in Mb\": 0.0054950714111328, \"Time in s\": 5.315441 }, { \"step\": 1378, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5573294629898403, \"F1\": 0.261501210653753, \"Memory in Mb\": 0.0054950714111328, \"Time in s\": 6.188685 }, { \"step\": 1484, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5579514824797843, \"F1\": 0.2477064220183486, \"Memory in Mb\": 0.0054950714111328, \"Time in s\": 7.126671999999999 }, { \"step\": 1590, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5584905660377358, \"F1\": 0.2352941176470588, \"Memory in Mb\": 0.0054950714111328, \"Time in s\": 8.129404999999998 }, { \"step\": 1696, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5601415094339622, \"F1\": 0.2261410788381742, \"Memory in Mb\": 0.0054950714111328, \"Time in s\": 9.196331 }, { \"step\": 1802, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5571587125416204, \"F1\": 0.2161100196463654, \"Memory in Mb\": 0.0054950714111328, \"Time in s\": 10.327506 }, { \"step\": 1908, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5550314465408805, \"F1\": 0.2116991643454038, \"Memory in Mb\": 0.0054950714111328, \"Time in s\": 11.523138 }, { \"step\": 2014, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5501489572989077, \"F1\": 0.201058201058201, \"Memory in Mb\": 0.0054950714111328, \"Time in s\": 12.782968 }, { \"step\": 2120, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5471698113207547, \"F1\": 0.1946308724832214, \"Memory in Mb\": 0.0054950714111328, \"Time in s\": 14.106907 }, { \"step\": 2226, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Bananas\", \"Accuracy\": 0.550763701707098, \"F1\": 0.2038216560509554, \"Memory in Mb\": 0.0054950714111328, \"Time in s\": 15.494886 }, { \"step\": 2332, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5497427101200686, \"F1\": 0.2105263157894736, \"Memory in Mb\": 0.0054950714111328, \"Time in s\": 16.946832999999998 }, { \"step\": 2438, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5484003281378179, \"F1\": 0.2118826055833929, \"Memory in Mb\": 0.0054950714111328, \"Time in s\": 18.462691 }, { \"step\": 2544, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5487421383647799, \"F1\": 0.2264150943396226, \"Memory in Mb\": 0.0054950714111328, \"Time in s\": 20.042348 }, { \"step\": 2650, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5464150943396227, \"F1\": 0.2324393358876117, \"Memory in Mb\": 0.0054950714111328, \"Time in s\": 21.68579 }, { \"step\": 2756, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5399129172714079, \"F1\": 0.2305825242718446, \"Memory in Mb\": 0.0054950714111328, \"Time in s\": 23.392549 }, { \"step\": 2862, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Bananas\", \"Accuracy\": 0.539832285115304, \"F1\": 0.2311733800350262, \"Memory in Mb\": 0.0054950714111328, \"Time in s\": 25.163525000000003 }, { \"step\": 2968, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5414420485175202, \"F1\": 0.2306387789711701, \"Memory in Mb\": 0.0054950714111328, \"Time in s\": 26.998099000000003 }, { \"step\": 3074, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5400130123617437, \"F1\": 0.2273224043715847, \"Memory in Mb\": 0.0054950714111328, \"Time in s\": 28.897093000000005 }, { \"step\": 3180, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5433962264150943, \"F1\": 0.2284803400637619, \"Memory in Mb\": 0.0054950714111328, \"Time in s\": 30.860975000000003 }, { \"step\": 3286, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5447352404138771, \"F1\": 0.2248704663212435, \"Memory in Mb\": 0.0054950714111328, \"Time in s\": 32.894016 }, { \"step\": 3392, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5448113207547169, \"F1\": 0.2248995983935743, \"Memory in Mb\": 0.0054950714111328, \"Time in s\": 34.991593 }, { \"step\": 3498, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5465980560320183, \"F1\": 0.2202556538839724, \"Memory in Mb\": 0.0054950714111328, \"Time in s\": 37.153313 }, { \"step\": 3604, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5491120976692564, \"F1\": 0.216867469879518, \"Memory in Mb\": 0.0054950714111328, \"Time in s\": 39.379211000000005 }, { \"step\": 3710, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5482479784366577, \"F1\": 0.212406015037594, \"Memory in Mb\": 0.0054950714111328, \"Time in s\": 41.66962600000001 }, { \"step\": 3816, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5476939203354297, \"F1\": 0.2075298438934802, \"Memory in Mb\": 0.0054950714111328, \"Time in s\": 44.024291000000005 }, { \"step\": 3922, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5486996430392657, \"F1\": 0.2034203420342034, \"Memory in Mb\": 0.0054950714111328, \"Time in s\": 46.443344 }, { \"step\": 4028, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5491559086395233, \"F1\": 0.1992945326278659, \"Memory in Mb\": 0.0054950714111328, \"Time in s\": 48.926930000000006 }, { \"step\": 4134, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5493468795355588, \"F1\": 0.1952483801295896, \"Memory in Mb\": 0.0054950714111328, \"Time in s\": 51.474790000000006 }, { \"step\": 4240, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5485849056603773, \"F1\": 0.1910397295012679, \"Memory in Mb\": 0.0054950714111328, \"Time in s\": 54.086862 }, { \"step\": 4346, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5487804878048781, \"F1\": 0.1886636326023996, \"Memory in Mb\": 0.0054950714111328, \"Time in s\": 56.763353 }, { \"step\": 4452, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5509883198562444, \"F1\": 0.1936264622831787, \"Memory in Mb\": 0.0054950714111328, \"Time in s\": 59.503949000000006 }, { \"step\": 4558, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5484861781483107, \"F1\": 0.1935736677115987, \"Memory in Mb\": 0.0054950714111328, \"Time in s\": 62.30891200000001 }, { \"step\": 4664, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5493138936535163, \"F1\": 0.1977099236641221, \"Memory in Mb\": 0.0054950714111328, \"Time in s\": 65.176854 }, { \"step\": 4770, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Bananas\", \"Accuracy\": 0.550314465408805, \"F1\": 0.1999254009697874, \"Memory in Mb\": 0.0054950714111328, \"Time in s\": 68.08610900000001 }, { \"step\": 4876, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Bananas\", \"Accuracy\": 0.550656275635767, \"F1\": 0.2000730193501277, \"Memory in Mb\": 0.0054950714111328, \"Time in s\": 71.03653600000001 }, { \"step\": 4982, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5505820955439582, \"F1\": 0.2063098192130449, \"Memory in Mb\": 0.0054950714111328, \"Time in s\": 74.02805500000001 }, { \"step\": 5088, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5481525157232704, \"F1\": 0.2042229145032883, \"Memory in Mb\": 0.0054950714111328, \"Time in s\": 77.060803 }, { \"step\": 5194, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5463996919522526, \"F1\": 0.2024373730534867, \"Memory in Mb\": 0.0054950714111328, \"Time in s\": 80.134657 }, { \"step\": 5300, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5466037735849056, \"F1\": 0.2050942772080714, \"Memory in Mb\": 0.0054950714111328, \"Time in s\": 83.249657 }, { \"step\": 906, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Elec2\", \"Accuracy\": 0.7991169977924945, \"F1\": 0.7853773584905659, \"Memory in Mb\": 0.0066728591918945, \"Time in s\": 0.581912 }, { \"step\": 1812, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8134657836644592, \"F1\": 0.7492581602373888, \"Memory in Mb\": 0.0066728591918945, \"Time in s\": 1.751174 }, { \"step\": 2718, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8002207505518764, \"F1\": 0.7256189994946943, \"Memory in Mb\": 0.0066728591918945, \"Time in s\": 3.503934 }, { \"step\": 3624, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8187086092715232, \"F1\": 0.7581891792418107, \"Memory in Mb\": 0.0066728591918945, \"Time in s\": 5.845535 }, { \"step\": 4530, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8275938189845474, \"F1\": 0.7584287039901021, \"Memory in Mb\": 0.0066728591918945, \"Time in s\": 8.772576 }, { \"step\": 5436, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8210080941869021, \"F1\": 0.7495495495495494, \"Memory in Mb\": 0.0066728591918945, \"Time in s\": 12.203207 }, { \"step\": 6342, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8221381267738883, \"F1\": 0.7573149741824441, \"Memory in Mb\": 0.0066728591918945, \"Time in s\": 16.003328 }, { \"step\": 7248, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8251931567328918, \"F1\": 0.7596281540504647, \"Memory in Mb\": 0.0066728591918945, \"Time in s\": 20.176524 }, { \"step\": 8154, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8302673534461614, \"F1\": 0.7805960684844642, \"Memory in Mb\": 0.0066728591918945, \"Time in s\": 24.718011 }, { \"step\": 9060, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8363134657836645, \"F1\": 0.7957018873123021, \"Memory in Mb\": 0.0066728591918945, \"Time in s\": 29.627709 }, { \"step\": 9966, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8370459562512542, \"F1\": 0.8009803921568629, \"Memory in Mb\": 0.0066728591918945, \"Time in s\": 34.907269 }, { \"step\": 10872, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8392200147167035, \"F1\": 0.8078276165347406, \"Memory in Mb\": 0.0066728591918945, \"Time in s\": 40.555269 }, { \"step\": 11778, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8422482594668025, \"F1\": 0.8113705583756344, \"Memory in Mb\": 0.0066728591918945, \"Time in s\": 46.57328199999999 }, { \"step\": 12684, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8409019236833807, \"F1\": 0.8104096204434422, \"Memory in Mb\": 0.0066728591918945, \"Time in s\": 52.95826299999999 }, { \"step\": 13590, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8427520235467255, \"F1\": 0.8153779697624189, \"Memory in Mb\": 0.0066728591918945, \"Time in s\": 59.70527599999999 }, { \"step\": 14496, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8438189845474614, \"F1\": 0.8177427145387216, \"Memory in Mb\": 0.0066728591918945, \"Time in s\": 66.808032 }, { \"step\": 15402, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Elec2\", \"Accuracy\": 0.845214907154915, \"F1\": 0.8184310738766185, \"Memory in Mb\": 0.0066728591918945, \"Time in s\": 74.254711 }, { \"step\": 16308, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8397105714986509, \"F1\": 0.8107990735379271, \"Memory in Mb\": 0.0066728591918945, \"Time in s\": 82.035365 }, { \"step\": 17214, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8384454513767864, \"F1\": 0.8052930056710774, \"Memory in Mb\": 0.0066728591918945, \"Time in s\": 90.148338 }, { \"step\": 18120, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Elec2\", \"Accuracy\": 0.840728476821192, \"F1\": 0.8082392026578072, \"Memory in Mb\": 0.0066728591918945, \"Time in s\": 98.575089 }, { \"step\": 19026, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Elec2\", \"Accuracy\": 0.843950383685483, \"F1\": 0.8100083189351762, \"Memory in Mb\": 0.0066728591918945, \"Time in s\": 107.31510699999998 }, { \"step\": 19932, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8412101143889223, \"F1\": 0.8075402858011553, \"Memory in Mb\": 0.0066728591918945, \"Time in s\": 116.36858499999998 }, { \"step\": 20838, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8373164411171897, \"F1\": 0.8027923211169286, \"Memory in Mb\": 0.0066728591918945, \"Time in s\": 125.73531899999998 }, { \"step\": 21744, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8382082413539367, \"F1\": 0.8007250481477285, \"Memory in Mb\": 0.0066728591918945, \"Time in s\": 135.41517199999998 }, { \"step\": 22650, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8376158940397351, \"F1\": 0.7981560750740864, \"Memory in Mb\": 0.0066728591918945, \"Time in s\": 145.412659 }, { \"step\": 23556, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8337154015961963, \"F1\": 0.7923888270525256, \"Memory in Mb\": 0.0066728591918945, \"Time in s\": 155.725043 }, { \"step\": 24462, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8312893467418854, \"F1\": 0.7886732551589942, \"Memory in Mb\": 0.0066728591918945, \"Time in s\": 166.35011 }, { \"step\": 25368, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8278145695364238, \"F1\": 0.7841897233201582, \"Memory in Mb\": 0.0066728591918945, \"Time in s\": 177.288636 }, { \"step\": 26274, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8282332343761893, \"F1\": 0.7844073950222137, \"Memory in Mb\": 0.0066728591918945, \"Time in s\": 188.539768 }, { \"step\": 27180, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Elec2\", \"Accuracy\": 0.828513612950699, \"F1\": 0.7853557448768134, \"Memory in Mb\": 0.0066728591918945, \"Time in s\": 200.1032 }, { \"step\": 28086, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8253222245958841, \"F1\": 0.7808451710890736, \"Memory in Mb\": 0.0066728591918945, \"Time in s\": 211.980006 }, { \"step\": 28992, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8246067880794702, \"F1\": 0.7784410265347915, \"Memory in Mb\": 0.0066728591918945, \"Time in s\": 224.170025 }, { \"step\": 29898, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Elec2\", \"Accuracy\": 0.822830958592548, \"F1\": 0.7765638840848695, \"Memory in Mb\": 0.0066728591918945, \"Time in s\": 236.672116 }, { \"step\": 30804, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8227178288533956, \"F1\": 0.775479998355466, \"Memory in Mb\": 0.0066728591918945, \"Time in s\": 249.486866 }, { \"step\": 31710, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8179754020813623, \"F1\": 0.768545994065282, \"Memory in Mb\": 0.0066728591918945, \"Time in s\": 262.615336 }, { \"step\": 32616, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8155506499877361, \"F1\": 0.7653116954045408, \"Memory in Mb\": 0.0066728591918945, \"Time in s\": 276.05616200000003 }, { \"step\": 33522, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Elec2\", \"Accuracy\": 0.813614939442754, \"F1\": 0.7635840774935674, \"Memory in Mb\": 0.0066728591918945, \"Time in s\": 289.80868200000003 }, { \"step\": 34428, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8107935401417451, \"F1\": 0.7596842027595366, \"Memory in Mb\": 0.0066728591918945, \"Time in s\": 303.873891 }, { \"step\": 35334, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8109752646176487, \"F1\": 0.758173720989174, \"Memory in Mb\": 0.0066728591918945, \"Time in s\": 318.256576 }, { \"step\": 36240, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8122792494481236, \"F1\": 0.7584590804189597, \"Memory in Mb\": 0.0066728591918945, \"Time in s\": 332.952142 }, { \"step\": 37146, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8118774565229095, \"F1\": 0.7566852367688023, \"Memory in Mb\": 0.0066728591918945, \"Time in s\": 347.962415 }, { \"step\": 38052, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Elec2\", \"Accuracy\": 0.811783874697782, \"F1\": 0.7561623314721503, \"Memory in Mb\": 0.0066728591918945, \"Time in s\": 363.28641 }, { \"step\": 38958, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8127983982750655, \"F1\": 0.7582617919055984, \"Memory in Mb\": 0.0066728591918945, \"Time in s\": 378.92486 }, { \"step\": 39864, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8135661248244029, \"F1\": 0.7615350060963869, \"Memory in Mb\": 0.0066728591918945, \"Time in s\": 394.877767 }, { \"step\": 40770, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8153544272749571, \"F1\": 0.7661821344266367, \"Memory in Mb\": 0.0066728591918945, \"Time in s\": 411.1448869999999 }, { \"step\": 41676, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8169450043190325, \"F1\": 0.7701762313601446, \"Memory in Mb\": 0.0066728591918945, \"Time in s\": 427.723092 }, { \"step\": 42582, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8179747311070406, \"F1\": 0.7719824669784955, \"Memory in Mb\": 0.0066728591918945, \"Time in s\": 444.613374 }, { \"step\": 43488, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8185476453274466, \"F1\": 0.7730057820096079, \"Memory in Mb\": 0.0066728591918945, \"Time in s\": 461.816869 }, { \"step\": 44394, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8179258458350227, \"F1\": 0.77092248830948, \"Memory in Mb\": 0.0066728591918945, \"Time in s\": 479.334408 }, { \"step\": 45300, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8190507726269316, \"F1\": 0.7728544905367584, \"Memory in Mb\": 0.0066728591918945, \"Time in s\": 497.166197 }, { \"step\": 25, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Phishing\", \"Accuracy\": 0.64, \"F1\": 0.6666666666666666, \"Memory in Mb\": 0.0066585540771484, \"Time in s\": 0.023761 }, { \"step\": 50, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Phishing\", \"Accuracy\": 0.78, \"F1\": 0.7659574468085107, \"Memory in Mb\": 0.0066585540771484, \"Time in s\": 0.064622 }, { \"step\": 75, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8133333333333334, \"F1\": 0.8108108108108109, \"Memory in Mb\": 0.0066585540771484, \"Time in s\": 0.122385 }, { \"step\": 100, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Phishing\", \"Accuracy\": 0.82, \"F1\": 0.8125, \"Memory in Mb\": 0.0066585540771484, \"Time in s\": 0.197128 }, { \"step\": 125, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Phishing\", \"Accuracy\": 0.808, \"F1\": 0.8, \"Memory in Mb\": 0.0066585540771484, \"Time in s\": 0.288843 }, { \"step\": 150, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8133333333333334, \"F1\": 0.8133333333333335, \"Memory in Mb\": 0.0066585540771484, \"Time in s\": 0.397551 }, { \"step\": 175, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8228571428571428, \"F1\": 0.812121212121212, \"Memory in Mb\": 0.0066585540771484, \"Time in s\": 0.523492 }, { \"step\": 200, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Phishing\", \"Accuracy\": 0.82, \"F1\": 0.8085106382978724, \"Memory in Mb\": 0.0066585540771484, \"Time in s\": 0.6664519999999999 }, { \"step\": 225, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8177777777777778, \"F1\": 0.8019323671497586, \"Memory in Mb\": 0.0066585540771484, \"Time in s\": 0.8264199999999999 }, { \"step\": 250, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Phishing\", \"Accuracy\": 0.82, \"F1\": 0.8068669527896996, \"Memory in Mb\": 0.0066585540771484, \"Time in s\": 1.003478 }, { \"step\": 275, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8218181818181818, \"F1\": 0.8078431372549019, \"Memory in Mb\": 0.0068721771240234, \"Time in s\": 1.197523 }, { \"step\": 300, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8333333333333334, \"F1\": 0.8161764705882353, \"Memory in Mb\": 0.0068721771240234, \"Time in s\": 1.4085949999999998 }, { \"step\": 325, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8430769230769231, \"F1\": 0.8222996515679442, \"Memory in Mb\": 0.0068721771240234, \"Time in s\": 1.6370319999999998 }, { \"step\": 350, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8485714285714285, \"F1\": 0.8262295081967213, \"Memory in Mb\": 0.0068721771240234, \"Time in s\": 1.88249 }, { \"step\": 375, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Phishing\", \"Accuracy\": 0.848, \"F1\": 0.8246153846153846, \"Memory in Mb\": 0.0068721771240234, \"Time in s\": 2.144992 }, { \"step\": 400, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Phishing\", \"Accuracy\": 0.85, \"F1\": 0.8245614035087719, \"Memory in Mb\": 0.0068721771240234, \"Time in s\": 2.424587 }, { \"step\": 425, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8541176470588235, \"F1\": 0.8258426966292134, \"Memory in Mb\": 0.0068721771240234, \"Time in s\": 2.721266 }, { \"step\": 450, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8577777777777778, \"F1\": 0.8279569892473118, \"Memory in Mb\": 0.0068721771240234, \"Time in s\": 3.035051 }, { \"step\": 475, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8568421052631578, \"F1\": 0.8282828282828283, \"Memory in Mb\": 0.0068721771240234, \"Time in s\": 3.366223 }, { \"step\": 500, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Phishing\", \"Accuracy\": 0.856, \"F1\": 0.8309859154929577, \"Memory in Mb\": 0.0068721771240234, \"Time in s\": 3.714529 }, { \"step\": 525, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8552380952380952, \"F1\": 0.8264840182648402, \"Memory in Mb\": 0.0068721771240234, \"Time in s\": 4.0798760000000005 }, { \"step\": 550, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Phishing\", \"Accuracy\": 0.86, \"F1\": 0.8336933045356371, \"Memory in Mb\": 0.0068721771240234, \"Time in s\": 4.4622850000000005 }, { \"step\": 575, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8608695652173913, \"F1\": 0.8347107438016529, \"Memory in Mb\": 0.0068721771240234, \"Time in s\": 4.8618310000000005 }, { \"step\": 600, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Phishing\", \"Accuracy\": 0.865, \"F1\": 0.8370221327967807, \"Memory in Mb\": 0.0068721771240234, \"Time in s\": 5.278527 }, { \"step\": 625, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8656, \"F1\": 0.8346456692913387, \"Memory in Mb\": 0.0068721771240234, \"Time in s\": 5.712664 }, { \"step\": 650, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8692307692307693, \"F1\": 0.8417132216014899, \"Memory in Mb\": 0.0068721771240234, \"Time in s\": 6.163824 }, { \"step\": 675, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8711111111111111, \"F1\": 0.8471001757469244, \"Memory in Mb\": 0.0068721771240234, \"Time in s\": 6.632034 }, { \"step\": 700, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8757142857142857, \"F1\": 0.8507718696397941, \"Memory in Mb\": 0.0068721771240234, \"Time in s\": 7.117376 }, { \"step\": 725, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8772413793103448, \"F1\": 0.8552845528455284, \"Memory in Mb\": 0.0068721771240234, \"Time in s\": 7.619913 }, { \"step\": 750, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8786666666666667, \"F1\": 0.8575899843505477, \"Memory in Mb\": 0.0068721771240234, \"Time in s\": 8.139518 }, { \"step\": 775, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Phishing\", \"Accuracy\": 0.88, \"F1\": 0.8584474885844748, \"Memory in Mb\": 0.0068721771240234, \"Time in s\": 8.676567 }, { \"step\": 800, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Phishing\", \"Accuracy\": 0.88, \"F1\": 0.8600583090379008, \"Memory in Mb\": 0.0068721771240234, \"Time in s\": 9.230727 }, { \"step\": 825, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Phishing\", \"Accuracy\": 0.88, \"F1\": 0.8611500701262274, \"Memory in Mb\": 0.0068721771240234, \"Time in s\": 9.801975 }, { \"step\": 850, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8811764705882353, \"F1\": 0.8618331053351573, \"Memory in Mb\": 0.0068721771240234, \"Time in s\": 10.390364 }, { \"step\": 875, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8845714285714286, \"F1\": 0.8651535380507342, \"Memory in Mb\": 0.0068721771240234, \"Time in s\": 10.99587 }, { \"step\": 900, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8833333333333333, \"F1\": 0.8634590377113134, \"Memory in Mb\": 0.0068721771240234, \"Time in s\": 11.618539 }, { \"step\": 925, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8854054054054054, \"F1\": 0.8671679197994987, \"Memory in Mb\": 0.0068721771240234, \"Time in s\": 12.258516 }, { \"step\": 950, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8852631578947369, \"F1\": 0.8685162846803377, \"Memory in Mb\": 0.0068721771240234, \"Time in s\": 12.91558 }, { \"step\": 975, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8861538461538462, \"F1\": 0.8692579505300353, \"Memory in Mb\": 0.0068721771240234, \"Time in s\": 13.589863 }, { \"step\": 1000, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Phishing\", \"Accuracy\": 0.887, \"F1\": 0.8702640642939151, \"Memory in Mb\": 0.0068721771240234, \"Time in s\": 14.281225 }, { \"step\": 1025, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8868292682926829, \"F1\": 0.8705357142857143, \"Memory in Mb\": 0.0068721771240234, \"Time in s\": 14.989779 }, { \"step\": 1050, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8885714285714286, \"F1\": 0.8729641693811075, \"Memory in Mb\": 0.0068721771240234, \"Time in s\": 15.714934 }, { \"step\": 1075, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8874418604651163, \"F1\": 0.8727655099894849, \"Memory in Mb\": 0.0068721771240234, \"Time in s\": 16.456921 }, { \"step\": 1100, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Phishing\", \"Accuracy\": 0.889090909090909, \"F1\": 0.8747433264887063, \"Memory in Mb\": 0.0068721771240234, \"Time in s\": 17.215539 }, { \"step\": 1125, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8906666666666667, \"F1\": 0.8776119402985074, \"Memory in Mb\": 0.0068721771240234, \"Time in s\": 17.990769 }, { \"step\": 1150, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8904347826086957, \"F1\": 0.8771929824561404, \"Memory in Mb\": 0.0068721771240234, \"Time in s\": 18.782632 }, { \"step\": 1175, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8893617021276595, \"F1\": 0.875717017208413, \"Memory in Mb\": 0.0068721771240234, \"Time in s\": 19.591204 }, { \"step\": 1200, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Phishing\", \"Accuracy\": 0.89, \"F1\": 0.8761726078799249, \"Memory in Mb\": 0.0068721771240234, \"Time in s\": 20.416515 }, { \"step\": 1225, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8906122448979592, \"F1\": 0.8768382352941176, \"Memory in Mb\": 0.0068721771240234, \"Time in s\": 21.258769 }, { \"step\": 1250, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8888, \"F1\": 0.8753363228699551, \"Memory in Mb\": 0.0068721771240234, \"Time in s\": 22.117832 }, { \"step\": 1903, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"SMTP\", \"Accuracy\": 0.998949027850762, \"F1\": 0.0, \"Memory in Mb\": 0.0056447982788085, \"Time in s\": 1.261861 }, { \"step\": 3806, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999474513925381, \"F1\": 0.0, \"Memory in Mb\": 0.0056447982788085, \"Time in s\": 3.784872 }, { \"step\": 5709, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999649675950254, \"F1\": 0.0, \"Memory in Mb\": 0.0056447982788085, \"Time in s\": 7.562933 }, { \"step\": 7612, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997372569626904, \"F1\": 0.0, \"Memory in Mb\": 0.0056447982788085, \"Time in s\": 12.592241 }, { \"step\": 9515, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997898055701524, \"F1\": 0.0, \"Memory in Mb\": 0.0056447982788085, \"Time in s\": 18.508988 }, { \"step\": 11418, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999824837975127, \"F1\": 0.0, \"Memory in Mb\": 0.0056447982788085, \"Time in s\": 25.182225000000003 }, { \"step\": 13321, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998498611215374, \"F1\": 0.0, \"Memory in Mb\": 0.0056447982788085, \"Time in s\": 32.571041 }, { \"step\": 15224, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9995401996847084, \"F1\": 0.631578947368421, \"Memory in Mb\": 0.0056447982788085, \"Time in s\": 40.604957 }, { \"step\": 17127, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9995912886086296, \"F1\": 0.6956521739130435, \"Memory in Mb\": 0.0056447982788085, \"Time in s\": 49.282818 }, { \"step\": 19030, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996321597477666, \"F1\": 0.6956521739130435, \"Memory in Mb\": 0.0056447982788085, \"Time in s\": 58.610725 }, { \"step\": 20933, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999665599770697, \"F1\": 0.6956521739130435, \"Memory in Mb\": 0.0056447982788085, \"Time in s\": 68.59192900000001 }, { \"step\": 22836, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996934664564724, \"F1\": 0.6956521739130435, \"Memory in Mb\": 0.0056447982788085, \"Time in s\": 79.219372 }, { \"step\": 24739, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997170459598204, \"F1\": 0.6956521739130435, \"Memory in Mb\": 0.0056447982788085, \"Time in s\": 90.492903 }, { \"step\": 26642, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996997222430748, \"F1\": 0.6666666666666666, \"Memory in Mb\": 0.0056447982788085, \"Time in s\": 102.41066 }, { \"step\": 28545, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997197407602032, \"F1\": 0.6666666666666666, \"Memory in Mb\": 0.0056447982788085, \"Time in s\": 114.976109 }, { \"step\": 30448, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997372569626904, \"F1\": 0.6666666666666666, \"Memory in Mb\": 0.0056447982788085, \"Time in s\": 128.185954 }, { \"step\": 32351, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997527124354734, \"F1\": 0.6666666666666666, \"Memory in Mb\": 0.0056447982788085, \"Time in s\": 142.03946100000002 }, { \"step\": 34254, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997664506335028, \"F1\": 0.6666666666666666, \"Memory in Mb\": 0.0056447982788085, \"Time in s\": 156.53684400000003 }, { \"step\": 36157, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997787427054236, \"F1\": 0.6666666666666666, \"Memory in Mb\": 0.0056447982788085, \"Time in s\": 171.68202700000003 }, { \"step\": 38060, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997898055701524, \"F1\": 0.6666666666666666, \"Memory in Mb\": 0.0056447982788085, \"Time in s\": 187.47483000000005 }, { \"step\": 39963, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997998148287166, \"F1\": 0.6666666666666666, \"Memory in Mb\": 0.0056447982788085, \"Time in s\": 203.911748 }, { \"step\": 41866, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999808914154684, \"F1\": 0.6666666666666666, \"Memory in Mb\": 0.0056447982788085, \"Time in s\": 220.99296 }, { \"step\": 43769, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998172222349152, \"F1\": 0.6666666666666666, \"Memory in Mb\": 0.0056447982788085, \"Time in s\": 238.718792 }, { \"step\": 45672, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999824837975127, \"F1\": 0.6666666666666666, \"Memory in Mb\": 0.0056447982788085, \"Time in s\": 257.088535 }, { \"step\": 47575, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999831844456122, \"F1\": 0.6666666666666666, \"Memory in Mb\": 0.0056447982788085, \"Time in s\": 276.101869 }, { \"step\": 49478, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998383119770404, \"F1\": 0.6666666666666666, \"Memory in Mb\": 0.0056447982788085, \"Time in s\": 295.75838 }, { \"step\": 51381, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998053755279188, \"F1\": 0.6153846153846154, \"Memory in Mb\": 0.0056447982788085, \"Time in s\": 316.063483 }, { \"step\": 53284, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998123264019216, \"F1\": 0.6153846153846154, \"Memory in Mb\": 0.0056447982788085, \"Time in s\": 337.01301299999994 }, { \"step\": 55187, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998187979053038, \"F1\": 0.6153846153846154, \"Memory in Mb\": 0.0056447982788085, \"Time in s\": 358.6068609999999 }, { \"step\": 57090, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996671921527412, \"F1\": 0.4571428571428571, \"Memory in Mb\": 0.0056447982788085, \"Time in s\": 380.844451 }, { \"step\": 58993, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996779278897496, \"F1\": 0.4571428571428571, \"Memory in Mb\": 0.0056447982788085, \"Time in s\": 403.726308 }, { \"step\": 60896, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999687992643195, \"F1\": 0.4571428571428571, \"Memory in Mb\": 0.0056447982788085, \"Time in s\": 427.25218 }, { \"step\": 62799, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999665599770697, \"F1\": 0.4324324324324324, \"Memory in Mb\": 0.0056447982788085, \"Time in s\": 451.4221749999999 }, { \"step\": 64702, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996754350715588, \"F1\": 0.4324324324324324, \"Memory in Mb\": 0.0056447982788085, \"Time in s\": 476.235657 }, { \"step\": 66605, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996847083552286, \"F1\": 0.4324324324324324, \"Memory in Mb\": 0.0056447982788085, \"Time in s\": 501.6931359999999 }, { \"step\": 68508, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"SMTP\", \"Accuracy\": 0.99966427278566, \"F1\": 0.4102564102564103, \"Memory in Mb\": 0.0056447982788085, \"Time in s\": 527.7941669999999 }, { \"step\": 70411, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996733464941556, \"F1\": 0.4102564102564103, \"Memory in Mb\": 0.0056447982788085, \"Time in s\": 554.5388179999999 }, { \"step\": 72314, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996819426390464, \"F1\": 0.4102564102564103, \"Memory in Mb\": 0.0056447982788085, \"Time in s\": 581.9269989999999 }, { \"step\": 74217, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999690097955994, \"F1\": 0.4102564102564103, \"Memory in Mb\": 0.0056447982788085, \"Time in s\": 609.9586109999999 }, { \"step\": 76120, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999697845507094, \"F1\": 0.4102564102564103, \"Memory in Mb\": 0.0056447982788085, \"Time in s\": 638.633216 }, { \"step\": 78023, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997052151288722, \"F1\": 0.4102564102564103, \"Memory in Mb\": 0.0056447982788085, \"Time in s\": 667.951428 }, { \"step\": 79926, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"SMTP\", \"Accuracy\": 0.99971223381628, \"F1\": 0.4102564102564103, \"Memory in Mb\": 0.0056447982788085, \"Time in s\": 697.9132639999999 }, { \"step\": 81829, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997189260531107, \"F1\": 0.4102564102564103, \"Memory in Mb\": 0.0056447982788085, \"Time in s\": 728.5228309999999 }, { \"step\": 83732, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997253140973582, \"F1\": 0.4102564102564103, \"Memory in Mb\": 0.0056447982788085, \"Time in s\": 759.7753099999999 }, { \"step\": 85635, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999731418228528, \"F1\": 0.4102564102564103, \"Memory in Mb\": 0.0056447982788085, \"Time in s\": 791.6709739999999 }, { \"step\": 87538, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997372569626904, \"F1\": 0.4102564102564103, \"Memory in Mb\": 0.0056447982788085, \"Time in s\": 824.2098649999999 }, { \"step\": 89441, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997316666853008, \"F1\": 0.4, \"Memory in Mb\": 0.0056447982788085, \"Time in s\": 857.3917109999999 }, { \"step\": 91344, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997372569626904, \"F1\": 0.4, \"Memory in Mb\": 0.0056447982788085, \"Time in s\": 891.2164919999999 }, { \"step\": 93247, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997426190654928, \"F1\": 0.4, \"Memory in Mb\": 0.0056447982788085, \"Time in s\": 925.683851 }, { \"step\": 95150, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997477666841827, \"F1\": 0.4, \"Memory in Mb\": 0.0056447982788085, \"Time in s\": 960.794209 }, { \"step\": 106, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5, \"F1\": 0.0, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 0.01481 }, { \"step\": 212, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5283018867924528, \"F1\": 0.0, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 0.041137 }, { \"step\": 318, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5314465408805031, \"F1\": 0.0, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 0.078177 }, { \"step\": 424, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5400943396226415, \"F1\": 0.0, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 0.125909 }, { \"step\": 530, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5547169811320755, \"F1\": 0.0, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 0.184447 }, { \"step\": 636, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5550314465408805, \"F1\": 0.0, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 0.253805 }, { \"step\": 742, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5660377358490566, \"F1\": 0.0, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 0.333961 }, { \"step\": 848, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5636792452830188, \"F1\": 0.0, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 0.425093 }, { \"step\": 954, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5649895178197065, \"F1\": 0.0, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 0.527744 }, { \"step\": 1060, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5707547169811321, \"F1\": 0.0, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 0.641554 }, { \"step\": 1166, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5686106346483705, \"F1\": 0.0, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 0.7661 }, { \"step\": 1272, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5644654088050315, \"F1\": 0.0, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 0.90156 }, { \"step\": 1378, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5682148040638607, \"F1\": 0.0, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 1.047827 }, { \"step\": 1484, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5680592991913747, \"F1\": 0.0, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 1.204662 }, { \"step\": 1590, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5679245283018868, \"F1\": 0.0, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 1.3725140000000002 }, { \"step\": 1696, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5683962264150944, \"F1\": 0.0, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 1.5511630000000003 }, { \"step\": 1802, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5643729189789123, \"F1\": 0.0, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 1.7405230000000005 }, { \"step\": 1908, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.560272536687631, \"F1\": 0.0, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 1.940796 }, { \"step\": 2014, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5551142005958292, \"F1\": 0.0, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 2.1520810000000004 }, { \"step\": 2120, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5509433962264151, \"F1\": 0.0, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 2.3743700000000003 }, { \"step\": 2226, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5512129380053908, \"F1\": 0.0, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 2.607588 }, { \"step\": 2332, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5506003430531733, \"F1\": 0.0, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 2.8515750000000004 }, { \"step\": 2438, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.551681706316653, \"F1\": 0.0, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 3.1063150000000004 }, { \"step\": 2544, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5487421383647799, \"F1\": 0.0, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 3.3719430000000004 }, { \"step\": 2650, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5467924528301886, \"F1\": 0.0, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 3.648299 }, { \"step\": 2756, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5471698113207547, \"F1\": 0.0, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 3.935475 }, { \"step\": 2862, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5489168413696716, \"F1\": 0.0, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 4.233696 }, { \"step\": 2968, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5505390835579514, \"F1\": 0.0, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 4.542693 }, { \"step\": 3074, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5487963565387117, \"F1\": 0.0, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 4.86295 }, { \"step\": 3180, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5509433962264151, \"F1\": 0.0, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 5.194204 }, { \"step\": 3286, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5517346317711503, \"F1\": 0.0, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 5.536035 }, { \"step\": 3392, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5498231132075472, \"F1\": 0.0, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 5.888475 }, { \"step\": 3498, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5514579759862779, \"F1\": 0.0, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 6.251513999999999 }, { \"step\": 3604, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5535516093229744, \"F1\": 0.0, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 6.625144999999999 }, { \"step\": 3710, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5522911051212938, \"F1\": 0.0, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 7.009503999999999 }, { \"step\": 3816, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5516247379454927, \"F1\": 0.0, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 7.4043329999999985 }, { \"step\": 3922, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5525242223355431, \"F1\": 0.0, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 7.8097699999999985 }, { \"step\": 4028, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5528798411122146, \"F1\": 0.0, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 8.225969999999998 }, { \"step\": 4134, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5529753265602322, \"F1\": 0.0, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 8.653192999999998 }, { \"step\": 4240, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5523584905660377, \"F1\": 0.0, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 9.090958 }, { \"step\": 4346, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5526921306948919, \"F1\": 0.0, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 9.539357999999998 }, { \"step\": 4452, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5530098831985625, \"F1\": 0.0, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 9.998288 }, { \"step\": 4558, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5508995173321632, \"F1\": 0.0, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 10.467735 }, { \"step\": 4664, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5497427101200686, \"F1\": 0.0, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 10.947871999999998 }, { \"step\": 4770, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5505241090146751, \"F1\": 0.0, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 11.43843 }, { \"step\": 4876, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5518867924528302, \"F1\": 0.0, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 11.939415 }, { \"step\": 4982, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5509835407466881, \"F1\": 0.0, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 12.450955999999998 }, { \"step\": 5088, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5511006289308176, \"F1\": 0.0, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 12.972935999999995 }, { \"step\": 5194, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5514054678475163, \"F1\": 0.0, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 13.505583999999995 }, { \"step\": 5300, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5513207547169812, \"F1\": 0.0, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 14.048808999999997 }, { \"step\": 906, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.6799116997792495, \"F1\": 0.5482866043613708, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 0.144107 }, { \"step\": 1812, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.7190949227373068, \"F1\": 0.4904904904904904, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 0.426551 }, { \"step\": 2718, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.6986754966887417, \"F1\": 0.4324324324324324, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 0.845137 }, { \"step\": 3624, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.7047461368653422, \"F1\": 0.4478844169246646, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 1.400054 }, { \"step\": 4530, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.7024282560706402, \"F1\": 0.4118673647469459, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 2.091892 }, { \"step\": 5436, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.7041942604856513, \"F1\": 0.4165457184325108, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 2.9195709999999995 }, { \"step\": 6342, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.6986754966887417, \"F1\": 0.4048582995951417, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 3.882304 }, { \"step\": 7248, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.695364238410596, \"F1\": 0.3953997809419496, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 4.980137999999999 }, { \"step\": 8154, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.6873926907039489, \"F1\": 0.4084474355999072, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 6.213938999999999 }, { \"step\": 9060, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.6864238410596026, \"F1\": 0.4240827082911007, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 7.583110999999999 }, { \"step\": 9966, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.687537627934979, \"F1\": 0.4433321415802646, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 9.088205 }, { \"step\": 10872, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.6938925680647535, \"F1\": 0.4717460317460317, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 10.727541 }, { \"step\": 11778, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.6932416369502462, \"F1\": 0.4715518502267076, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 12.500958999999998 }, { \"step\": 12684, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.6944970040996531, \"F1\": 0.4755717959128434, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 14.408948999999998 }, { \"step\": 13590, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.6942604856512141, \"F1\": 0.4842993670100534, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 16.456863 }, { \"step\": 14496, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.6935016556291391, \"F1\": 0.4860613071139387, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 18.638716 }, { \"step\": 15402, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.6929619529931178, \"F1\": 0.480957084842498, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 20.95394 }, { \"step\": 16308, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.6904586705911209, \"F1\": 0.4713028906577293, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 23.402754 }, { \"step\": 17214, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.6921691646334379, \"F1\": 0.4645852278468223, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 25.9851 }, { \"step\": 18120, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.694205298013245, \"F1\": 0.4685911575716888, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 28.701738 }, { \"step\": 19026, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.6967307894460212, \"F1\": 0.467515688445921, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 31.552559 }, { \"step\": 19932, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.6958157736303432, \"F1\": 0.4737435986459509, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 34.538332 }, { \"step\": 20838, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.6933966791438718, \"F1\": 0.4696604963891426, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 37.65893 }, { \"step\": 21744, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.6968359087564385, \"F1\": 0.4670978172999191, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 40.913762 }, { \"step\": 22650, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.6977041942604857, \"F1\": 0.4643667370726746, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 44.302397 }, { \"step\": 23556, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.6952368823229751, \"F1\": 0.4573285962657797, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 47.824759 }, { \"step\": 24462, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.6978170223203336, \"F1\": 0.4597281099254495, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 51.480483 }, { \"step\": 25368, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.6976505834121728, \"F1\": 0.4612250632200056, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 55.26987 }, { \"step\": 26274, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.6983329527289336, \"F1\": 0.4614757439869547, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 59.192657 }, { \"step\": 27180, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.6959896983075791, \"F1\": 0.4576304561864129, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 63.254198 }, { \"step\": 28086, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.695649077832372, \"F1\": 0.4559572301425662, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 67.449702 }, { \"step\": 28992, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.6952262693156733, \"F1\": 0.4515207945375543, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 71.778902 }, { \"step\": 29898, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.6939260151180681, \"F1\": 0.4465678863017841, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 76.241703 }, { \"step\": 30804, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.6941630957018569, \"F1\": 0.4433020150091591, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 80.838056 }, { \"step\": 31710, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.6917376222011984, \"F1\": 0.4368266405484819, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 85.568011 }, { \"step\": 32616, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.6893549178317391, \"F1\": 0.4316805025802109, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 90.431842 }, { \"step\": 33522, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.688353916830738, \"F1\": 0.4290944860374884, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 95.429482 }, { \"step\": 34428, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.6863599395840595, \"F1\": 0.4245363461948412, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 100.561249 }, { \"step\": 35334, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.6869304352748061, \"F1\": 0.4212013394725827, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 105.826946 }, { \"step\": 36240, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.6911147902869758, \"F1\": 0.4267718148299877, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 111.170297 }, { \"step\": 37146, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.6919722177354224, \"F1\": 0.4269831730769231, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 116.582555 }, { \"step\": 38052, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.6944181646168401, \"F1\": 0.431171118285882, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 122.063741 }, { \"step\": 38958, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.6937727809435803, \"F1\": 0.4308206106870229, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 127.61367 }, { \"step\": 39864, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.6930814770218744, \"F1\": 0.4344288818009522, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 133.232984 }, { \"step\": 40770, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.6924208977189109, \"F1\": 0.4391771019677997, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 138.92121699999998 }, { \"step\": 41676, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.6933966791438718, \"F1\": 0.4472227028897733, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 144.67862699999998 }, { \"step\": 42582, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.6956225635244939, \"F1\": 0.4550767290309018, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 150.505407 }, { \"step\": 43488, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.6962150478292862, \"F1\": 0.4576097220511558, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 156.401784 }, { \"step\": 44394, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.6963103122043519, \"F1\": 0.4557564992733731, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 162.367508 }, { \"step\": 45300, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.697439293598234, \"F1\": 0.4596278189560006, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 168.40291499999998 }, { \"step\": 25, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.52, \"F1\": 0.3333333333333333, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 0.006954 }, { \"step\": 50, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.56, \"F1\": 0.2142857142857142, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 0.018084 }, { \"step\": 75, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.5866666666666667, \"F1\": 0.3404255319148936, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 0.032476 }, { \"step\": 100, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.6, \"F1\": 0.375, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 0.049891 }, { \"step\": 125, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.64, \"F1\": 0.4705882352941176, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 0.070336 }, { \"step\": 150, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.62, \"F1\": 0.4466019417475728, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 0.0938939999999999 }, { \"step\": 175, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.6342857142857142, \"F1\": 0.4181818181818181, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 0.120402 }, { \"step\": 200, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.63, \"F1\": 0.4126984126984127, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 0.149914 }, { \"step\": 225, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.6488888888888888, \"F1\": 0.4316546762589928, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 0.1823739999999999 }, { \"step\": 250, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.648, \"F1\": 0.4358974358974359, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 0.218053 }, { \"step\": 275, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.6618181818181819, \"F1\": 0.4561403508771929, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 0.257041 }, { \"step\": 300, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.6733333333333333, \"F1\": 0.4615384615384615, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 0.299108 }, { \"step\": 325, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.683076923076923, \"F1\": 0.4663212435233161, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 0.34419 }, { \"step\": 350, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.6942857142857143, \"F1\": 0.4780487804878048, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 0.39236 }, { \"step\": 375, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.7013333333333334, \"F1\": 0.4909090909090909, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 0.44361 }, { \"step\": 400, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.705, \"F1\": 0.4913793103448276, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 0.497978 }, { \"step\": 425, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.7105882352941176, \"F1\": 0.4896265560165975, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 0.555381 }, { \"step\": 450, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.7222222222222222, \"F1\": 0.5098039215686275, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 0.615936 }, { \"step\": 475, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.7157894736842105, \"F1\": 0.5054945054945055, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 0.679555 }, { \"step\": 500, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.718, \"F1\": 0.5252525252525252, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 0.74642 }, { \"step\": 525, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.7257142857142858, \"F1\": 0.5294117647058824, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 0.816531 }, { \"step\": 550, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.7218181818181818, \"F1\": 0.5233644859813085, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 0.889759 }, { \"step\": 575, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.7217391304347827, \"F1\": 0.5209580838323353, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 0.966149 }, { \"step\": 600, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.7283333333333334, \"F1\": 0.5275362318840581, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 1.04577 }, { \"step\": 625, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.7376, \"F1\": 0.5340909090909091, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 1.128489 }, { \"step\": 650, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.7369230769230769, \"F1\": 0.5415549597855228, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 1.2144389999999998 }, { \"step\": 675, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.7333333333333333, \"F1\": 0.5477386934673367, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 1.3035119999999998 }, { \"step\": 700, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.74, \"F1\": 0.5560975609756097, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 1.3960879999999998 }, { \"step\": 725, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.743448275862069, \"F1\": 0.5753424657534246, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 1.4918939999999998 }, { \"step\": 750, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.7453333333333333, \"F1\": 0.5820568927789934, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 1.590912 }, { \"step\": 775, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.7470967741935484, \"F1\": 0.5847457627118644, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 1.693051 }, { \"step\": 800, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.74625, \"F1\": 0.5915492957746479, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 1.7983589999999998 }, { \"step\": 825, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.7490909090909091, \"F1\": 0.602687140115163, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 1.906819 }, { \"step\": 850, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.7541176470588236, \"F1\": 0.6122448979591837, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 2.01847 }, { \"step\": 875, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.7554285714285714, \"F1\": 0.6123188405797102, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 2.13332 }, { \"step\": 900, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.7566666666666667, \"F1\": 0.6123893805309735, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 2.251249 }, { \"step\": 925, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.76, \"F1\": 0.6237288135593221, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 2.372436 }, { \"step\": 950, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.7589473684210526, \"F1\": 0.6288492706645057, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 2.496743 }, { \"step\": 975, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.7610256410256411, \"F1\": 0.631911532385466, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 2.624394 }, { \"step\": 1000, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.761, \"F1\": 0.6328725038402457, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 2.755204 }, { \"step\": 1025, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.7609756097560976, \"F1\": 0.635958395245171, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 2.889431 }, { \"step\": 1050, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.7638095238095238, \"F1\": 0.6436781609195402, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 3.026807 }, { \"step\": 1075, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.7665116279069767, \"F1\": 0.651872399445215, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 3.1673590000000003 }, { \"step\": 1100, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.77, \"F1\": 0.6594885598923284, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 3.311033 }, { \"step\": 1125, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.768, \"F1\": 0.6597131681877444, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 3.457802 }, { \"step\": 1150, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.7695652173913043, \"F1\": 0.6615581098339719, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 3.607738 }, { \"step\": 1175, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.7702127659574468, \"F1\": 0.6633416458852868, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 3.760779 }, { \"step\": 1200, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.7741666666666667, \"F1\": 0.6691086691086692, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 3.917058 }, { \"step\": 1225, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.7771428571428571, \"F1\": 0.6746126340882003, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 4.076379 }, { \"step\": 1250, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.7736, \"F1\": 0.6697782963827306, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 4.239016 }, { \"step\": 1903, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 0.209651 }, { \"step\": 3806, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 0.63219 }, { \"step\": 5709, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 1.262199 }, { \"step\": 7612, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 2.097272 }, { \"step\": 9515, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 3.138863 }, { \"step\": 11418, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 4.386558 }, { \"step\": 13321, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 5.839991 }, { \"step\": 15224, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9992774566473988, \"F1\": 0.0, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 7.498149999999999 }, { \"step\": 17127, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999299351900508, \"F1\": 0.1428571428571428, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 9.361126 }, { \"step\": 19030, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9993694167104572, \"F1\": 0.1428571428571428, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 11.45241 }, { \"step\": 20933, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999426742464052, \"F1\": 0.1428571428571428, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 13.765639999999998 }, { \"step\": 22836, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999474513925381, \"F1\": 0.1428571428571428, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 16.300417999999997 }, { \"step\": 24739, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999514935931121, \"F1\": 0.1428571428571428, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 19.05618 }, { \"step\": 26642, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9995120486449968, \"F1\": 0.1333333333333333, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 22.032826 }, { \"step\": 28545, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9995445787353302, \"F1\": 0.1333333333333333, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 25.228375 }, { \"step\": 30448, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999573042564372, \"F1\": 0.1333333333333333, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 28.637878 }, { \"step\": 32351, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9995981577076444, \"F1\": 0.1333333333333333, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 32.260672 }, { \"step\": 34254, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996204822794418, \"F1\": 0.1333333333333333, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 36.096896 }, { \"step\": 36157, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996404568963132, \"F1\": 0.1333333333333333, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 40.148391 }, { \"step\": 38060, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996584340514976, \"F1\": 0.1333333333333333, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 44.38058100000001 }, { \"step\": 39963, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996746990966644, \"F1\": 0.1333333333333333, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 48.731359000000005 }, { \"step\": 41866, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996894855013616, \"F1\": 0.1333333333333333, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 53.205186000000005 }, { \"step\": 43769, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999702986131737, \"F1\": 0.1333333333333333, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 57.777449 }, { \"step\": 45672, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997153617095814, \"F1\": 0.1333333333333333, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 62.448002 }, { \"step\": 47575, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999726747241198, \"F1\": 0.1333333333333333, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 67.217049 }, { \"step\": 49478, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997372569626904, \"F1\": 0.1333333333333333, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 72.084119 }, { \"step\": 51381, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997080632918784, \"F1\": 0.1176470588235294, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 77.049615 }, { \"step\": 53284, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997184896028828, \"F1\": 0.1176470588235294, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 82.113263 }, { \"step\": 55187, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997281968579556, \"F1\": 0.1176470588235294, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 87.275593 }, { \"step\": 57090, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9995796111403048, \"F1\": 0.1428571428571428, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 92.536371 }, { \"step\": 58993, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9995931720712626, \"F1\": 0.1428571428571428, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 97.895723 }, { \"step\": 60896, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996058854440356, \"F1\": 0.1428571428571428, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 103.35374 }, { \"step\": 62799, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999585980668482, \"F1\": 0.1333333333333333, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 108.910478 }, { \"step\": 64702, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9995981577076444, \"F1\": 0.1333333333333333, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 114.565649 }, { \"step\": 66605, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996096389159972, \"F1\": 0.1333333333333333, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 120.319466 }, { \"step\": 68508, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9995912886086296, \"F1\": 0.125, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 126.171806 }, { \"step\": 70411, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996023348624504, \"F1\": 0.125, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 132.122918 }, { \"step\": 72314, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996127997344912, \"F1\": 0.125, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 138.18042 }, { \"step\": 74217, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996227279464274, \"F1\": 0.125, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 144.337343 }, { \"step\": 76120, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996321597477666, \"F1\": 0.125, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 150.592744 }, { \"step\": 78023, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996411314612358, \"F1\": 0.125, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 156.94675700000002 }, { \"step\": 79926, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999649675950254, \"F1\": 0.125, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 163.39908200000002 }, { \"step\": 81829, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996578230211784, \"F1\": 0.125, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 169.949778 }, { \"step\": 83732, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999665599770697, \"F1\": 0.125, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 176.598846 }, { \"step\": 85635, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996730308869036, \"F1\": 0.125, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 183.346496 }, { \"step\": 87538, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996801389111014, \"F1\": 0.125, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 190.192536 }, { \"step\": 89441, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996757639114052, \"F1\": 0.1212121212121212, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 197.137274 }, { \"step\": 91344, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996825188299177, \"F1\": 0.1212121212121212, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 204.180628 }, { \"step\": 93247, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996889980374704, \"F1\": 0.1212121212121212, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 211.32291 }, { \"step\": 95150, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999695218076721, \"F1\": 0.1212121212121212, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 218.563617 }, { \"step\": 106, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5333333333333333, \"F1\": 0.4615384615384615, \"Memory in Mb\": 0.0140247344970703, \"Time in s\": 0.027532 }, { \"step\": 212, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5592417061611374, \"F1\": 0.5026737967914437, \"Memory in Mb\": 0.0140247344970703, \"Time in s\": 0.072437 }, { \"step\": 318, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Bananas\", \"Accuracy\": 0.555205047318612, \"F1\": 0.5154639175257733, \"Memory in Mb\": 0.0140247344970703, \"Time in s\": 0.134216 }, { \"step\": 424, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5626477541371159, \"F1\": 0.5066666666666667, \"Memory in Mb\": 0.0140247344970703, \"Time in s\": 0.212262 }, { \"step\": 530, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5689981096408318, \"F1\": 0.4818181818181818, \"Memory in Mb\": 0.0140247344970703, \"Time in s\": 0.306946 }, { \"step\": 636, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5716535433070866, \"F1\": 0.4645669291338582, \"Memory in Mb\": 0.0140247344970703, \"Time in s\": 0.418348 }, { \"step\": 742, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5870445344129555, \"F1\": 0.4555160142348755, \"Memory in Mb\": 0.0140247344970703, \"Time in s\": 0.54645 }, { \"step\": 848, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5962219598583235, \"F1\": 0.4554140127388535, \"Memory in Mb\": 0.0140247344970703, \"Time in s\": 0.690987 }, { \"step\": 954, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6002098635886673, \"F1\": 0.4454148471615721, \"Memory in Mb\": 0.0140247344970703, \"Time in s\": 0.852195 }, { \"step\": 1060, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6090651558073654, \"F1\": 0.4405405405405405, \"Memory in Mb\": 0.0140247344970703, \"Time in s\": 1.029786 }, { \"step\": 1166, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6068669527896996, \"F1\": 0.4260651629072681, \"Memory in Mb\": 0.0140247344970703, \"Time in s\": 1.2235980000000002 }, { \"step\": 1272, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6136900078678206, \"F1\": 0.433679354094579, \"Memory in Mb\": 0.0140247344970703, \"Time in s\": 1.4337380000000002 }, { \"step\": 1378, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6143790849673203, \"F1\": 0.419672131147541, \"Memory in Mb\": 0.0140247344970703, \"Time in s\": 1.660063 }, { \"step\": 1484, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6142953472690492, \"F1\": 0.4127310061601643, \"Memory in Mb\": 0.0140247344970703, \"Time in s\": 1.902843 }, { \"step\": 1590, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6135934550031467, \"F1\": 0.4061895551257253, \"Memory in Mb\": 0.0140247344970703, \"Time in s\": 2.161936 }, { \"step\": 1696, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6141592920353982, \"F1\": 0.4010989010989011, \"Memory in Mb\": 0.0140247344970703, \"Time in s\": 2.437209 }, { \"step\": 1802, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Bananas\", \"Accuracy\": 0.614658523042754, \"F1\": 0.4037800687285223, \"Memory in Mb\": 0.0140247344970703, \"Time in s\": 2.728596 }, { \"step\": 1908, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6151022548505506, \"F1\": 0.4080645161290322, \"Memory in Mb\": 0.0140247344970703, \"Time in s\": 3.0363329999999995 }, { \"step\": 2014, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6100347739692003, \"F1\": 0.4048521607278241, \"Memory in Mb\": 0.0140247344970703, \"Time in s\": 3.360227 }, { \"step\": 2120, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Bananas\", \"Accuracy\": 0.608305804624823, \"F1\": 0.4071428571428571, \"Memory in Mb\": 0.0140247344970703, \"Time in s\": 3.700257 }, { \"step\": 2226, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6089887640449438, \"F1\": 0.4089673913043478, \"Memory in Mb\": 0.0140247344970703, \"Time in s\": 4.056792 }, { \"step\": 2332, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6096096096096096, \"F1\": 0.4098573281452659, \"Memory in Mb\": 0.0140247344970703, \"Time in s\": 4.429501999999999 }, { \"step\": 2438, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6101764464505539, \"F1\": 0.4084682440846824, \"Memory in Mb\": 0.0140247344970703, \"Time in s\": 4.818382999999999 }, { \"step\": 2544, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6114825009830909, \"F1\": 0.4153846153846153, \"Memory in Mb\": 0.0140247344970703, \"Time in s\": 5.223696999999999 }, { \"step\": 2650, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6100415251038127, \"F1\": 0.41273450824332, \"Memory in Mb\": 0.0140247344970703, \"Time in s\": 5.645150999999999 }, { \"step\": 2756, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6076225045372051, \"F1\": 0.4070213933077345, \"Memory in Mb\": 0.0140247344970703, \"Time in s\": 6.082884999999998 }, { \"step\": 2862, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6085284865431667, \"F1\": 0.4092827004219409, \"Memory in Mb\": 0.0140247344970703, \"Time in s\": 6.537074999999998 }, { \"step\": 2968, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6083586113919784, \"F1\": 0.4065372829417773, \"Memory in Mb\": 0.0140247344970703, \"Time in s\": 7.007774999999998 }, { \"step\": 3074, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Bananas\", \"Accuracy\": 0.60624796615685, \"F1\": 0.4062806673209028, \"Memory in Mb\": 0.0140247344970703, \"Time in s\": 7.494666999999998 }, { \"step\": 3180, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6071091538219566, \"F1\": 0.4077761972498815, \"Memory in Mb\": 0.0140247344970703, \"Time in s\": 7.997970999999998 }, { \"step\": 3286, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6063926940639269, \"F1\": 0.4049700874367234, \"Memory in Mb\": 0.0140247344970703, \"Time in s\": 8.517511999999998 }, { \"step\": 3392, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6048363314656443, \"F1\": 0.4060283687943262, \"Memory in Mb\": 0.0140247344970703, \"Time in s\": 9.053437 }, { \"step\": 3498, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6065198741778668, \"F1\": 0.4053586862575626, \"Memory in Mb\": 0.0140247344970703, \"Time in s\": 9.605597 }, { \"step\": 3604, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6086594504579517, \"F1\": 0.4090528080469404, \"Memory in Mb\": 0.0140247344970703, \"Time in s\": 10.173998 }, { \"step\": 3710, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6085198166621731, \"F1\": 0.4078303425774878, \"Memory in Mb\": 0.0140247344970703, \"Time in s\": 10.759293 }, { \"step\": 3816, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6070773263433814, \"F1\": 0.4049225883287018, \"Memory in Mb\": 0.0140247344970703, \"Time in s\": 11.360927 }, { \"step\": 3922, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6067329762815609, \"F1\": 0.4027885360185902, \"Memory in Mb\": 0.0140247344970703, \"Time in s\": 11.978992 }, { \"step\": 4028, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6088899925502855, \"F1\": 0.405436013590034, \"Memory in Mb\": 0.0140247344970703, \"Time in s\": 12.613729 }, { \"step\": 4134, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6106944108395839, \"F1\": 0.4078027235921972, \"Memory in Mb\": 0.0140247344970703, \"Time in s\": 13.264737 }, { \"step\": 4240, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Bananas\", \"Accuracy\": 0.611936777541873, \"F1\": 0.4118698605648909, \"Memory in Mb\": 0.0140247344970703, \"Time in s\": 13.931975 }, { \"step\": 4346, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6131185270425776, \"F1\": 0.4128536500174642, \"Memory in Mb\": 0.0140247344970703, \"Time in s\": 14.615508 }, { \"step\": 4452, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6137946528869916, \"F1\": 0.413510747185261, \"Memory in Mb\": 0.0140247344970703, \"Time in s\": 15.315549 }, { \"step\": 4558, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6122448979591837, \"F1\": 0.4115884115884116, \"Memory in Mb\": 0.0140247344970703, \"Time in s\": 16.031772 }, { \"step\": 4664, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6126956894702981, \"F1\": 0.4124918672739102, \"Memory in Mb\": 0.0140247344970703, \"Time in s\": 16.764316 }, { \"step\": 4770, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6143845669951772, \"F1\": 0.4130226619853175, \"Memory in Mb\": 0.0140247344970703, \"Time in s\": 17.512985 }, { \"step\": 4876, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6153846153846154, \"F1\": 0.4131455399061033, \"Memory in Mb\": 0.0140247344970703, \"Time in s\": 18.277781 }, { \"step\": 4982, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6163420999799237, \"F1\": 0.4168446750076289, \"Memory in Mb\": 0.0140247344970703, \"Time in s\": 19.058798 }, { \"step\": 5088, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6150973068606251, \"F1\": 0.4141232794733692, \"Memory in Mb\": 0.0140247344970703, \"Time in s\": 19.855913 }, { \"step\": 5194, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6146735990756788, \"F1\": 0.4133685136323659, \"Memory in Mb\": 0.0140247344970703, \"Time in s\": 20.669466 }, { \"step\": 5300, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6152104170598226, \"F1\": 0.4139120436907157, \"Memory in Mb\": 0.0140247344970703, \"Time in s\": 21.499334 }, { \"step\": 906, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8187845303867404, \"F1\": 0.8284518828451883, \"Memory in Mb\": 0.0510377883911132, \"Time in s\": 0.17926 }, { \"step\": 1812, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8023191606847045, \"F1\": 0.7475317348377998, \"Memory in Mb\": 0.0510377883911132, \"Time in s\": 0.525852 }, { \"step\": 2718, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Elec2\", \"Accuracy\": 0.784688995215311, \"F1\": 0.706177800100452, \"Memory in Mb\": 0.0510377883911132, \"Time in s\": 1.0396 }, { \"step\": 3624, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8032017664918576, \"F1\": 0.7356321839080461, \"Memory in Mb\": 0.0510377883911132, \"Time in s\": 1.7208420000000002 }, { \"step\": 4530, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Elec2\", \"Accuracy\": 0.7979686465003312, \"F1\": 0.7073872721458268, \"Memory in Mb\": 0.0510377883911132, \"Time in s\": 2.5693340000000005 }, { \"step\": 5436, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Elec2\", \"Accuracy\": 0.7937442502299908, \"F1\": 0.6972724817715366, \"Memory in Mb\": 0.0510377883911132, \"Time in s\": 3.5852350000000004 }, { \"step\": 6342, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Elec2\", \"Accuracy\": 0.7982967986122063, \"F1\": 0.7065840789171829, \"Memory in Mb\": 0.0510377883911132, \"Time in s\": 4.768595 }, { \"step\": 7248, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Elec2\", \"Accuracy\": 0.790396025941769, \"F1\": 0.6875128574367414, \"Memory in Mb\": 0.0510377883911132, \"Time in s\": 6.119432000000001 }, { \"step\": 8154, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Elec2\", \"Accuracy\": 0.7841285416411137, \"F1\": 0.6888260254596887, \"Memory in Mb\": 0.0510377883911132, \"Time in s\": 7.637661 }, { \"step\": 9060, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Elec2\", \"Accuracy\": 0.7897118887294403, \"F1\": 0.7086710506193606, \"Memory in Mb\": 0.0510377883911132, \"Time in s\": 9.323211 }, { \"step\": 9966, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Elec2\", \"Accuracy\": 0.793176116407426, \"F1\": 0.7240594457089301, \"Memory in Mb\": 0.0510377883911132, \"Time in s\": 11.17581 }, { \"step\": 10872, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Elec2\", \"Accuracy\": 0.7960629196946003, \"F1\": 0.7361656551231703, \"Memory in Mb\": 0.0510377883911132, \"Time in s\": 13.198889 }, { \"step\": 11778, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Elec2\", \"Accuracy\": 0.792137216608644, \"F1\": 0.7295027624309391, \"Memory in Mb\": 0.0510377883911132, \"Time in s\": 15.38848 }, { \"step\": 12684, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Elec2\", \"Accuracy\": 0.7820704880548766, \"F1\": 0.7260111022997621, \"Memory in Mb\": 0.0510377883911132, \"Time in s\": 17.743988 }, { \"step\": 13590, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Elec2\", \"Accuracy\": 0.7858562072264331, \"F1\": 0.7383564107174968, \"Memory in Mb\": 0.0510377883911132, \"Time in s\": 20.265390000000004 }, { \"step\": 14496, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Elec2\", \"Accuracy\": 0.7866850638151086, \"F1\": 0.7435727317963178, \"Memory in Mb\": 0.0510377883911132, \"Time in s\": 22.952880000000004 }, { \"step\": 15402, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Elec2\", \"Accuracy\": 0.785728199467567, \"F1\": 0.738593155893536, \"Memory in Mb\": 0.0510377883911132, \"Time in s\": 25.806911000000003 }, { \"step\": 16308, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Elec2\", \"Accuracy\": 0.7806463481940271, \"F1\": 0.7274666666666666, \"Memory in Mb\": 0.0510377883911132, \"Time in s\": 28.828021000000003 }, { \"step\": 17214, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Elec2\", \"Accuracy\": 0.7788880497298554, \"F1\": 0.7181158346911569, \"Memory in Mb\": 0.0510377883911132, \"Time in s\": 32.016234000000004 }, { \"step\": 18120, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Elec2\", \"Accuracy\": 0.7728903361112645, \"F1\": 0.7138983522213725, \"Memory in Mb\": 0.0510377883911132, \"Time in s\": 35.376132000000005 }, { \"step\": 19026, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Elec2\", \"Accuracy\": 0.7701445466491459, \"F1\": 0.7094931242941608, \"Memory in Mb\": 0.0510377883911132, \"Time in s\": 38.860646 }, { \"step\": 19932, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Elec2\", \"Accuracy\": 0.7628317696051378, \"F1\": 0.702236220472441, \"Memory in Mb\": 0.0510377883911132, \"Time in s\": 42.445598 }, { \"step\": 20838, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Elec2\", \"Accuracy\": 0.7537553390603254, \"F1\": 0.6903626817934946, \"Memory in Mb\": 0.0510377883911132, \"Time in s\": 46.130991 }, { \"step\": 21744, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Elec2\", \"Accuracy\": 0.7508163546888654, \"F1\": 0.6836389115964032, \"Memory in Mb\": 0.0510377883911132, \"Time in s\": 49.916905 }, { \"step\": 22650, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Elec2\", \"Accuracy\": 0.7509823833281822, \"F1\": 0.6798001589644601, \"Memory in Mb\": 0.0510377883911132, \"Time in s\": 53.803318 }, { \"step\": 23556, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Elec2\", \"Accuracy\": 0.7457015495648482, \"F1\": 0.668217569513681, \"Memory in Mb\": 0.0510377883911132, \"Time in s\": 57.790137 }, { \"step\": 24462, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Elec2\", \"Accuracy\": 0.7466170638976329, \"F1\": 0.665839982747466, \"Memory in Mb\": 0.0510377883911132, \"Time in s\": 61.87940199999999 }, { \"step\": 25368, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Elec2\", \"Accuracy\": 0.7447865336854969, \"F1\": 0.6611180904522613, \"Memory in Mb\": 0.0510377883911132, \"Time in s\": 66.06947 }, { \"step\": 26274, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Elec2\", \"Accuracy\": 0.7448711605069843, \"F1\": 0.6581322996888865, \"Memory in Mb\": 0.0510377883911132, \"Time in s\": 70.360011 }, { \"step\": 27180, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Elec2\", \"Accuracy\": 0.741123661650539, \"F1\": 0.650402464473815, \"Memory in Mb\": 0.0510377883911132, \"Time in s\": 74.751133 }, { \"step\": 28086, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Elec2\", \"Accuracy\": 0.7390065871461634, \"F1\": 0.6440019426906265, \"Memory in Mb\": 0.0510377883911132, \"Time in s\": 79.242684 }, { \"step\": 28992, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Elec2\", \"Accuracy\": 0.7358145631402849, \"F1\": 0.6343280019097637, \"Memory in Mb\": 0.0510377883911132, \"Time in s\": 83.83464099999999 }, { \"step\": 29898, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Elec2\", \"Accuracy\": 0.7320466936481921, \"F1\": 0.6243023964732918, \"Memory in Mb\": 0.0510377883911132, \"Time in s\": 88.52693 }, { \"step\": 30804, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Elec2\", \"Accuracy\": 0.7297990455475116, \"F1\": 0.6158319870759289, \"Memory in Mb\": 0.0510377883911132, \"Time in s\": 93.319472 }, { \"step\": 31710, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Elec2\", \"Accuracy\": 0.7256930209088902, \"F1\": 0.6059617649723658, \"Memory in Mb\": 0.0510377883911132, \"Time in s\": 98.21253099999998 }, { \"step\": 32616, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Elec2\", \"Accuracy\": 0.7215391690939752, \"F1\": 0.596427301813011, \"Memory in Mb\": 0.0510377883911132, \"Time in s\": 103.20618199999998 }, { \"step\": 33522, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Elec2\", \"Accuracy\": 0.7176695205990274, \"F1\": 0.5867248908296943, \"Memory in Mb\": 0.0510377883911132, \"Time in s\": 108.30076099999998 }, { \"step\": 34428, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Elec2\", \"Accuracy\": 0.7142359194818021, \"F1\": 0.5779493779493778, \"Memory in Mb\": 0.0510377883911132, \"Time in s\": 113.49672599999998 }, { \"step\": 35334, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Elec2\", \"Accuracy\": 0.7138369229898395, \"F1\": 0.5724554949469323, \"Memory in Mb\": 0.0510377883911132, \"Time in s\": 118.79330799999998 }, { \"step\": 36240, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Elec2\", \"Accuracy\": 0.7174866856149452, \"F1\": 0.5752924583091347, \"Memory in Mb\": 0.0510377883911132, \"Time in s\": 124.19030599999998 }, { \"step\": 37146, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Elec2\", \"Accuracy\": 0.7169740207295733, \"F1\": 0.5716148486206756, \"Memory in Mb\": 0.0510377883911132, \"Time in s\": 129.68778099999997 }, { \"step\": 38052, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Elec2\", \"Accuracy\": 0.7183516858952459, \"F1\": 0.573859795618116, \"Memory in Mb\": 0.0510377883911132, \"Time in s\": 135.28579999999997 }, { \"step\": 38958, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Elec2\", \"Accuracy\": 0.7206407064198989, \"F1\": 0.5799529121154812, \"Memory in Mb\": 0.0510377883911132, \"Time in s\": 140.98460499999996 }, { \"step\": 39864, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Elec2\", \"Accuracy\": 0.7217720693374808, \"F1\": 0.5866964784795975, \"Memory in Mb\": 0.0510377883911132, \"Time in s\": 146.78445599999995 }, { \"step\": 40770, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Elec2\", \"Accuracy\": 0.7228776766660944, \"F1\": 0.5923065819861432, \"Memory in Mb\": 0.0510377883911132, \"Time in s\": 152.68634499999996 }, { \"step\": 41676, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Elec2\", \"Accuracy\": 0.724127174565087, \"F1\": 0.5973170817134251, \"Memory in Mb\": 0.0510377883911132, \"Time in s\": 158.68998499999995 }, { \"step\": 42582, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Elec2\", \"Accuracy\": 0.7260280406754186, \"F1\": 0.6013259517462921, \"Memory in Mb\": 0.0510377883911132, \"Time in s\": 164.79474299999995 }, { \"step\": 43488, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Elec2\", \"Accuracy\": 0.7277117299422816, \"F1\": 0.6045222270465248, \"Memory in Mb\": 0.0510377883911132, \"Time in s\": 170.99996699999994 }, { \"step\": 44394, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Elec2\", \"Accuracy\": 0.7273894532921857, \"F1\": 0.6015933631814591, \"Memory in Mb\": 0.0510377883911132, \"Time in s\": 177.30586899999994 }, { \"step\": 45300, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Elec2\", \"Accuracy\": 0.7287136581381487, \"F1\": 0.6038234630387828, \"Memory in Mb\": 0.0510377883911132, \"Time in s\": 183.71242299999997 }, { \"step\": 25, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Phishing\", \"Accuracy\": 0.5833333333333334, \"F1\": 0.7058823529411764, \"Memory in Mb\": 0.057229995727539, \"Time in s\": 0.01864 }, { \"step\": 50, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Phishing\", \"Accuracy\": 0.7346938775510204, \"F1\": 0.7636363636363637, \"Memory in Mb\": 0.057229995727539, \"Time in s\": 0.044672 }, { \"step\": 75, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Phishing\", \"Accuracy\": 0.7837837837837838, \"F1\": 0.8048780487804877, \"Memory in Mb\": 0.057229995727539, \"Time in s\": 0.0769 }, { \"step\": 100, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8080808080808081, \"F1\": 0.819047619047619, \"Memory in Mb\": 0.057229995727539, \"Time in s\": 0.1152479999999999 }, { \"step\": 125, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8145161290322581, \"F1\": 0.8217054263565893, \"Memory in Mb\": 0.057229995727539, \"Time in s\": 0.1596739999999999 }, { \"step\": 150, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8187919463087249, \"F1\": 0.830188679245283, \"Memory in Mb\": 0.057229995727539, \"Time in s\": 0.2100959999999999 }, { \"step\": 175, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8333333333333334, \"F1\": 0.8323699421965318, \"Memory in Mb\": 0.057229995727539, \"Time in s\": 0.266544 }, { \"step\": 200, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8341708542713567, \"F1\": 0.83248730964467, \"Memory in Mb\": 0.057229995727539, \"Time in s\": 0.329068 }, { \"step\": 225, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8303571428571429, \"F1\": 0.8240740740740741, \"Memory in Mb\": 0.057229995727539, \"Time in s\": 0.397573 }, { \"step\": 250, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8313253012048193, \"F1\": 0.825, \"Memory in Mb\": 0.057229995727539, \"Time in s\": 0.472283 }, { \"step\": 275, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8321167883211679, \"F1\": 0.8244274809160306, \"Memory in Mb\": 0.057229995727539, \"Time in s\": 0.553059 }, { \"step\": 300, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8394648829431438, \"F1\": 0.8285714285714285, \"Memory in Mb\": 0.057229995727539, \"Time in s\": 0.639906 }, { \"step\": 325, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Phishing\", \"Accuracy\": 0.845679012345679, \"F1\": 0.8299319727891157, \"Memory in Mb\": 0.057229995727539, \"Time in s\": 0.732828 }, { \"step\": 350, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8510028653295129, \"F1\": 0.8322580645161292, \"Memory in Mb\": 0.057229995727539, \"Time in s\": 0.831756 }, { \"step\": 375, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8529411764705882, \"F1\": 0.8318042813455658, \"Memory in Mb\": 0.057229995727539, \"Time in s\": 0.937046 }, { \"step\": 400, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8546365914786967, \"F1\": 0.8313953488372093, \"Memory in Mb\": 0.057229995727539, \"Time in s\": 1.048453 }, { \"step\": 425, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8561320754716981, \"F1\": 0.8291316526610645, \"Memory in Mb\": 0.057229995727539, \"Time in s\": 1.165941 }, { \"step\": 450, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8596881959910914, \"F1\": 0.8310991957104559, \"Memory in Mb\": 0.057229995727539, \"Time in s\": 1.289511 }, { \"step\": 475, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8565400843881856, \"F1\": 0.8291457286432161, \"Memory in Mb\": 0.057229995727539, \"Time in s\": 1.419108 }, { \"step\": 500, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8577154308617234, \"F1\": 0.8337236533957845, \"Memory in Mb\": 0.057229995727539, \"Time in s\": 1.554885 }, { \"step\": 525, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8587786259541985, \"F1\": 0.8310502283105022, \"Memory in Mb\": 0.057229995727539, \"Time in s\": 1.696767 }, { \"step\": 550, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8579234972677595, \"F1\": 0.8311688311688311, \"Memory in Mb\": 0.057229995727539, \"Time in s\": 1.844693 }, { \"step\": 575, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8606271777003485, \"F1\": 0.8340248962655602, \"Memory in Mb\": 0.057229995727539, \"Time in s\": 1.998693 }, { \"step\": 600, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8647746243739566, \"F1\": 0.8363636363636363, \"Memory in Mb\": 0.057229995727539, \"Time in s\": 2.1587810000000003 }, { \"step\": 625, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8669871794871795, \"F1\": 0.8356435643564357, \"Memory in Mb\": 0.057229995727539, \"Time in s\": 2.3249540000000004 }, { \"step\": 650, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8705701078582434, \"F1\": 0.8426966292134833, \"Memory in Mb\": 0.057229995727539, \"Time in s\": 2.4971910000000004 }, { \"step\": 675, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Phishing\", \"Accuracy\": 0.870919881305638, \"F1\": 0.8465608465608465, \"Memory in Mb\": 0.057229995727539, \"Time in s\": 2.675498 }, { \"step\": 700, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8755364806866953, \"F1\": 0.8502581755593803, \"Memory in Mb\": 0.057229995727539, \"Time in s\": 2.85988 }, { \"step\": 725, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8784530386740331, \"F1\": 0.8562091503267973, \"Memory in Mb\": 0.057229995727539, \"Time in s\": 3.050473 }, { \"step\": 750, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8798397863818425, \"F1\": 0.8584905660377359, \"Memory in Mb\": 0.057229995727539, \"Time in s\": 3.247084 }, { \"step\": 775, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8798449612403101, \"F1\": 0.8580152671755725, \"Memory in Mb\": 0.057229995727539, \"Time in s\": 3.4497720000000003 }, { \"step\": 800, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8798498122653317, \"F1\": 0.8596491228070174, \"Memory in Mb\": 0.057229995727539, \"Time in s\": 3.658789000000001 }, { \"step\": 825, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8798543689320388, \"F1\": 0.860759493670886, \"Memory in Mb\": 0.057229995727539, \"Time in s\": 3.8738800000000007 }, { \"step\": 850, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8798586572438163, \"F1\": 0.8602739726027396, \"Memory in Mb\": 0.057229995727539, \"Time in s\": 4.094979 }, { \"step\": 875, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8832951945080092, \"F1\": 0.8636363636363635, \"Memory in Mb\": 0.057229995727539, \"Time in s\": 4.322174 }, { \"step\": 900, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8809788654060067, \"F1\": 0.8608582574772432, \"Memory in Mb\": 0.057229995727539, \"Time in s\": 4.555426000000001 }, { \"step\": 925, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8820346320346321, \"F1\": 0.8635794743429286, \"Memory in Mb\": 0.057229995727539, \"Time in s\": 4.7947690000000005 }, { \"step\": 950, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8819810326659642, \"F1\": 0.8650602409638554, \"Memory in Mb\": 0.057229995727539, \"Time in s\": 5.040106000000001 }, { \"step\": 975, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8829568788501027, \"F1\": 0.8661971830985915, \"Memory in Mb\": 0.057229995727539, \"Time in s\": 5.291615 }, { \"step\": 1000, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8808808808808809, \"F1\": 0.8643101482326111, \"Memory in Mb\": 0.057229995727539, \"Time in s\": 5.549186000000001 }, { \"step\": 1025, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Phishing\", \"Accuracy\": 0.880859375, \"F1\": 0.8647450110864746, \"Memory in Mb\": 0.057229995727539, \"Time in s\": 5.812868000000001 }, { \"step\": 1050, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Phishing\", \"Accuracy\": 0.882745471877979, \"F1\": 0.8673139158576052, \"Memory in Mb\": 0.057229995727539, \"Time in s\": 6.082483000000001 }, { \"step\": 1075, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8817504655493482, \"F1\": 0.8672936259143157, \"Memory in Mb\": 0.057229995727539, \"Time in s\": 6.358049000000001 }, { \"step\": 1100, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8835304822565969, \"F1\": 0.8693877551020409, \"Memory in Mb\": 0.057229995727539, \"Time in s\": 6.639582000000001 }, { \"step\": 1125, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8861209964412812, \"F1\": 0.8735177865612648, \"Memory in Mb\": 0.057229995727539, \"Time in s\": 6.927116000000001 }, { \"step\": 1150, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8859878154917319, \"F1\": 0.8731848983543079, \"Memory in Mb\": 0.057229995727539, \"Time in s\": 7.220572000000001 }, { \"step\": 1175, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8850085178875639, \"F1\": 0.8717948717948718, \"Memory in Mb\": 0.057229995727539, \"Time in s\": 7.520004000000001 }, { \"step\": 1200, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8865721434528774, \"F1\": 0.8731343283582089, \"Memory in Mb\": 0.057229995727539, \"Time in s\": 7.825445000000001 }, { \"step\": 1225, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Phishing\", \"Accuracy\": 0.886437908496732, \"F1\": 0.8728270814272644, \"Memory in Mb\": 0.057229995727539, \"Time in s\": 8.137127000000001 }, { \"step\": 1250, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8847077662129704, \"F1\": 0.8714285714285714, \"Memory in Mb\": 0.057229995727539, \"Time in s\": 8.454914 }, { \"step\": 1903, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.0107755661010742, \"Time in s\": 0.257042 }, { \"step\": 3806, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.0107755661010742, \"Time in s\": 0.764431 }, { \"step\": 5709, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.0107755661010742, \"Time in s\": 1.522309 }, { \"step\": 7612, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.0107755661010742, \"Time in s\": 2.530537 }, { \"step\": 9515, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.0107755661010742, \"Time in s\": 3.789363 }, { \"step\": 11418, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.0107755661010742, \"Time in s\": 5.298413 }, { \"step\": 13321, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.0107755661010742, \"Time in s\": 7.057599 }, { \"step\": 15224, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997372397030808, \"F1\": 0.7777777777777778, \"Memory in Mb\": 0.0201406478881835, \"Time in s\": 9.078055 }, { \"step\": 17127, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997664369963798, \"F1\": 0.8181818181818181, \"Memory in Mb\": 0.0201406478881835, \"Time in s\": 11.376218 }, { \"step\": 19030, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997897945241474, \"F1\": 0.8181818181818181, \"Memory in Mb\": 0.0201406478881835, \"Time in s\": 13.952029 }, { \"step\": 20933, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998089050257978, \"F1\": 0.8181818181818181, \"Memory in Mb\": 0.0201406478881835, \"Time in s\": 16.806125 }, { \"step\": 22836, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998248303043572, \"F1\": 0.8181818181818181, \"Memory in Mb\": 0.0201406478881835, \"Time in s\": 19.939484 }, { \"step\": 24739, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999838305441022, \"F1\": 0.8181818181818181, \"Memory in Mb\": 0.0201406478881835, \"Time in s\": 23.347474 }, { \"step\": 26642, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998498554859052, \"F1\": 0.8333333333333333, \"Memory in Mb\": 0.0201406478881835, \"Time in s\": 26.939926 }, { \"step\": 28545, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999859865470852, \"F1\": 0.8333333333333333, \"Memory in Mb\": 0.0201406478881835, \"Time in s\": 30.696722 }, { \"step\": 30448, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998686241665844, \"F1\": 0.8333333333333333, \"Memory in Mb\": 0.0201406478881835, \"Time in s\": 34.617556 }, { \"step\": 32351, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998763523956724, \"F1\": 0.8333333333333333, \"Memory in Mb\": 0.0201406478881835, \"Time in s\": 38.704559 }, { \"step\": 34254, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998832219075702, \"F1\": 0.8333333333333333, \"Memory in Mb\": 0.0201406478881835, \"Time in s\": 42.956191 }, { \"step\": 36157, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998893682929528, \"F1\": 0.8333333333333333, \"Memory in Mb\": 0.0201406478881835, \"Time in s\": 47.37231800000001 }, { \"step\": 38060, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998949000236474, \"F1\": 0.8333333333333333, \"Memory in Mb\": 0.0201406478881835, \"Time in s\": 51.95229900000001 }, { \"step\": 39963, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998999049096642, \"F1\": 0.8333333333333333, \"Memory in Mb\": 0.0201406478881835, \"Time in s\": 56.69619700000001 }, { \"step\": 41866, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999904454795175, \"F1\": 0.8333333333333333, \"Memory in Mb\": 0.0201406478881835, \"Time in s\": 61.60400300000001 }, { \"step\": 43769, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999908609029428, \"F1\": 0.8333333333333333, \"Memory in Mb\": 0.0201406478881835, \"Time in s\": 66.676032 }, { \"step\": 45672, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9999124170699132, \"F1\": 0.8333333333333333, \"Memory in Mb\": 0.0201406478881835, \"Time in s\": 71.913656 }, { \"step\": 47575, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9999159204607558, \"F1\": 0.8333333333333333, \"Memory in Mb\": 0.0201406478881835, \"Time in s\": 77.315763 }, { \"step\": 49478, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9999191543545486, \"F1\": 0.8333333333333333, \"Memory in Mb\": 0.0201406478881835, \"Time in s\": 82.88181300000001 }, { \"step\": 51381, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9999026858699884, \"F1\": 0.8275862068965517, \"Memory in Mb\": 0.0201406478881835, \"Time in s\": 88.61249600000001 }, { \"step\": 53284, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9999061614398588, \"F1\": 0.8275862068965517, \"Memory in Mb\": 0.0201406478881835, \"Time in s\": 94.508066 }, { \"step\": 55187, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998912767730946, \"F1\": 0.7999999999999999, \"Memory in Mb\": 0.0201406478881835, \"Time in s\": 100.56946700000002 }, { \"step\": 57090, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9993869221741492, \"F1\": 0.4444444444444444, \"Memory in Mb\": 0.0201406478881835, \"Time in s\": 106.795227 }, { \"step\": 58993, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9988473013289938, \"F1\": 0.2916666666666666, \"Memory in Mb\": 0.0201406478881835, \"Time in s\": 113.185031 }, { \"step\": 60896, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9986369981115034, \"F1\": 0.2522522522522523, \"Memory in Mb\": 0.0201406478881835, \"Time in s\": 119.73505000000002 }, { \"step\": 62799, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9979139463040224, \"F1\": 0.1761006289308176, \"Memory in Mb\": 0.0201406478881835, \"Time in s\": 126.446401 }, { \"step\": 64702, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9979443903494536, \"F1\": 0.1739130434782608, \"Memory in Mb\": 0.0201406478881835, \"Time in s\": 133.31736600000002 }, { \"step\": 66605, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9977478830100296, \"F1\": 0.1573033707865168, \"Memory in Mb\": 0.0201406478881835, \"Time in s\": 140.34740000000002 }, { \"step\": 68508, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9967302611411972, \"F1\": 0.125, \"Memory in Mb\": 0.0201406478881835, \"Time in s\": 147.535714 }, { \"step\": 70411, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9964777730436016, \"F1\": 0.1142857142857143, \"Memory in Mb\": 0.0201406478881835, \"Time in s\": 154.879523 }, { \"step\": 72314, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9964045192427364, \"F1\": 0.1095890410958904, \"Memory in Mb\": 0.0201406478881835, \"Time in s\": 162.37251700000002 }, { \"step\": 74217, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9958230031260106, \"F1\": 0.0935672514619883, \"Memory in Mb\": 0.0201406478881835, \"Time in s\": 170.01463900000002 }, { \"step\": 76120, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9956515456062218, \"F1\": 0.0881542699724517, \"Memory in Mb\": 0.0201406478881835, \"Time in s\": 177.804854 }, { \"step\": 78023, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9951936633257288, \"F1\": 0.0786240786240786, \"Memory in Mb\": 0.0201406478881835, \"Time in s\": 185.743198 }, { \"step\": 79926, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9946700031279324, \"F1\": 0.0698689956331877, \"Memory in Mb\": 0.0201406478881835, \"Time in s\": 193.829526 }, { \"step\": 81829, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9945862052109302, \"F1\": 0.0673684210526315, \"Memory in Mb\": 0.0201406478881835, \"Time in s\": 202.057449 }, { \"step\": 83732, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9945539883675102, \"F1\": 0.0655737704918032, \"Memory in Mb\": 0.0201406478881835, \"Time in s\": 210.444774 }, { \"step\": 85635, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9939860335847912, \"F1\": 0.0585009140767824, \"Memory in Mb\": 0.0201406478881835, \"Time in s\": 218.97044600000004 }, { \"step\": 87538, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9938540274398254, \"F1\": 0.0561403508771929, \"Memory in Mb\": 0.0201406478881835, \"Time in s\": 227.63481900000005 }, { \"step\": 89441, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9938618067978532, \"F1\": 0.0550774526678141, \"Memory in Mb\": 0.0201406478881835, \"Time in s\": 236.43819800000009 }, { \"step\": 91344, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9939677917300724, \"F1\": 0.0548885077186964, \"Memory in Mb\": 0.0201406478881835, \"Time in s\": 245.38462600000005 }, { \"step\": 93247, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"SMTP\", \"Accuracy\": 0.993543958990198, \"F1\": 0.0504731861198738, \"Memory in Mb\": 0.0201406478881835, \"Time in s\": 254.47095700000008 }, { \"step\": 95150, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"SMTP\", \"Accuracy\": 0.993483904192372, \"F1\": 0.0490797546012269, \"Memory in Mb\": 0.0201406478881835, \"Time in s\": 263.6963870000001 }, { \"step\": 106, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.4952380952380952, \"F1\": 0.208955223880597, \"Memory in Mb\": 0.0192947387695312, \"Time in s\": 0.019877 }, { \"step\": 212, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5213270142180095, \"F1\": 0.3129251700680272, \"Memory in Mb\": 0.019317626953125, \"Time in s\": 0.0520889999999999 }, { \"step\": 318, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5299684542586751, \"F1\": 0.4063745019920318, \"Memory in Mb\": 0.019317626953125, \"Time in s\": 0.095483 }, { \"step\": 424, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5437352245862884, \"F1\": 0.4238805970149253, \"Memory in Mb\": 0.019317626953125, \"Time in s\": 0.150343 }, { \"step\": 530, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.553875236294896, \"F1\": 0.4099999999999999, \"Memory in Mb\": 0.019317626953125, \"Time in s\": 0.216684 }, { \"step\": 636, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5590551181102362, \"F1\": 0.4017094017094017, \"Memory in Mb\": 0.019317626953125, \"Time in s\": 0.295132 }, { \"step\": 742, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5762483130904184, \"F1\": 0.3984674329501916, \"Memory in Mb\": 0.019317626953125, \"Time in s\": 0.3850179999999999 }, { \"step\": 848, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5867768595041323, \"F1\": 0.4047619047619047, \"Memory in Mb\": 0.019317626953125, \"Time in s\": 0.486959 }, { \"step\": 954, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5918153200419727, \"F1\": 0.3987635239567234, \"Memory in Mb\": 0.019317626953125, \"Time in s\": 0.601178 }, { \"step\": 1060, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6015108593012276, \"F1\": 0.3971428571428571, \"Memory in Mb\": 0.019317626953125, \"Time in s\": 0.727681 }, { \"step\": 1166, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6, \"F1\": 0.3852242744063324, \"Memory in Mb\": 0.019317626953125, \"Time in s\": 0.865767 }, { \"step\": 1272, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6073957513768686, \"F1\": 0.3966142684401451, \"Memory in Mb\": 0.019317626953125, \"Time in s\": 1.0159 }, { \"step\": 1378, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6085693536673928, \"F1\": 0.384, \"Memory in Mb\": 0.019317626953125, \"Time in s\": 1.177562 }, { \"step\": 1484, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6089008766014835, \"F1\": 0.3790149892933619, \"Memory in Mb\": 0.019317626953125, \"Time in s\": 1.351249 }, { \"step\": 1590, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6085588420390182, \"F1\": 0.3742454728370221, \"Memory in Mb\": 0.019317626953125, \"Time in s\": 1.5365639999999998 }, { \"step\": 1696, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6094395280235988, \"F1\": 0.370722433460076, \"Memory in Mb\": 0.019317626953125, \"Time in s\": 1.73385 }, { \"step\": 1802, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6102165463631316, \"F1\": 0.3754448398576512, \"Memory in Mb\": 0.019317626953125, \"Time in s\": 1.943126 }, { \"step\": 1908, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.610907184058731, \"F1\": 0.3816666666666667, \"Memory in Mb\": 0.019317626953125, \"Time in s\": 2.16423 }, { \"step\": 2014, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6060606060606061, \"F1\": 0.3799843627834245, \"Memory in Mb\": 0.019317626953125, \"Time in s\": 2.39723 }, { \"step\": 2120, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6045304388862671, \"F1\": 0.3838235294117647, \"Memory in Mb\": 0.019317626953125, \"Time in s\": 2.641805 }, { \"step\": 2226, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6053932584269663, \"F1\": 0.3868715083798882, \"Memory in Mb\": 0.019317626953125, \"Time in s\": 2.898512 }, { \"step\": 2332, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6061776061776062, \"F1\": 0.388814913448735, \"Memory in Mb\": 0.019317626953125, \"Time in s\": 3.166739 }, { \"step\": 2438, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.606893721789085, \"F1\": 0.388250319284802, \"Memory in Mb\": 0.019317626953125, \"Time in s\": 3.447054 }, { \"step\": 2544, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.608336610302792, \"F1\": 0.3963636363636363, \"Memory in Mb\": 0.019317626953125, \"Time in s\": 3.73904 }, { \"step\": 2650, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6070215175537939, \"F1\": 0.3944153577661431, \"Memory in Mb\": 0.019317626953125, \"Time in s\": 4.043011 }, { \"step\": 2756, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6047186932849364, \"F1\": 0.3892316320807628, \"Memory in Mb\": 0.019317626953125, \"Time in s\": 4.35852 }, { \"step\": 2862, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6057322614470465, \"F1\": 0.3922413793103448, \"Memory in Mb\": 0.019317626953125, \"Time in s\": 4.686463999999999 }, { \"step\": 2968, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6056622851365016, \"F1\": 0.3899895724713243, \"Memory in Mb\": 0.019317626953125, \"Time in s\": 5.02596 }, { \"step\": 3074, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6036446469248291, \"F1\": 0.3903903903903904, \"Memory in Mb\": 0.019317626953125, \"Time in s\": 5.377489 }, { \"step\": 3180, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6045926391947153, \"F1\": 0.3924601256645723, \"Memory in Mb\": 0.019317626953125, \"Time in s\": 5.740578999999999 }, { \"step\": 3286, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6039573820395738, \"F1\": 0.3900609470229723, \"Memory in Mb\": 0.019317626953125, \"Time in s\": 6.115373999999999 }, { \"step\": 3392, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6024771453848422, \"F1\": 0.391696750902527, \"Memory in Mb\": 0.019317626953125, \"Time in s\": 6.501596999999999 }, { \"step\": 3498, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6030883614526737, \"F1\": 0.3933566433566433, \"Memory in Mb\": 0.0351476669311523, \"Time in s\": 6.903521 }, { \"step\": 3604, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6069941715237303, \"F1\": 0.4035383319292334, \"Memory in Mb\": 0.0351476669311523, \"Time in s\": 7.317575 }, { \"step\": 3710, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6079805877595039, \"F1\": 0.4079804560260586, \"Memory in Mb\": 0.0351476669311523, \"Time in s\": 7.744409999999999 }, { \"step\": 3816, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6107470511140236, \"F1\": 0.4146629877808436, \"Memory in Mb\": 0.0351476669311523, \"Time in s\": 8.183644999999999 }, { \"step\": 3922, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6123437898495282, \"F1\": 0.4180704441041348, \"Memory in Mb\": 0.0446500778198242, \"Time in s\": 8.636627999999998 }, { \"step\": 4028, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6143531164638689, \"F1\": 0.4246017043349389, \"Memory in Mb\": 0.0508146286010742, \"Time in s\": 9.102372 }, { \"step\": 4134, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.617227195741592, \"F1\": 0.4321608040201005, \"Memory in Mb\": 0.0508146286010742, \"Time in s\": 9.580540999999998 }, { \"step\": 4240, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6218447747110167, \"F1\": 0.4439819632327437, \"Memory in Mb\": 0.0508146286010742, \"Time in s\": 10.071727 }, { \"step\": 4346, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6239355581127733, \"F1\": 0.4513096037609133, \"Memory in Mb\": 0.0508146286010742, \"Time in s\": 10.575599 }, { \"step\": 4452, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6259267580319029, \"F1\": 0.4567699836867862, \"Memory in Mb\": 0.0508146286010742, \"Time in s\": 11.092444999999998 }, { \"step\": 4558, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6276058810621022, \"F1\": 0.4638230647709321, \"Memory in Mb\": 0.0508146286010742, \"Time in s\": 11.621769 }, { \"step\": 4664, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6283508470941453, \"F1\": 0.4695439240893787, \"Memory in Mb\": 0.0508146286010742, \"Time in s\": 12.163799 }, { \"step\": 4770, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6288530090165653, \"F1\": 0.471641791044776, \"Memory in Mb\": 0.0602216720581054, \"Time in s\": 12.720221 }, { \"step\": 4876, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6311794871794871, \"F1\": 0.475801749271137, \"Memory in Mb\": 0.0602674484252929, \"Time in s\": 13.289238 }, { \"step\": 4982, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6336077092953222, \"F1\": 0.484026010743568, \"Memory in Mb\": 0.0602674484252929, \"Time in s\": 13.87141 }, { \"step\": 5088, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6361313151169649, \"F1\": 0.4905037159372419, \"Memory in Mb\": 0.0602674484252929, \"Time in s\": 14.466033 }, { \"step\": 5194, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6383593298671288, \"F1\": 0.495703544575725, \"Memory in Mb\": 0.0602674484252929, \"Time in s\": 15.073179 }, { \"step\": 5300, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6421966408756369, \"F1\": 0.5034049240440022, \"Memory in Mb\": 0.0602674484252929, \"Time in s\": 15.693544 }, { \"step\": 906, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8530386740331491, \"F1\": 0.8513966480446927, \"Memory in Mb\": 0.178506851196289, \"Time in s\": 0.136823 }, { \"step\": 1812, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8663721700717836, \"F1\": 0.8393094289508632, \"Memory in Mb\": 0.2121829986572265, \"Time in s\": 0.372341 }, { \"step\": 2718, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8369525211630475, \"F1\": 0.810926163038839, \"Memory in Mb\": 0.2367534637451172, \"Time in s\": 0.715196 }, { \"step\": 3624, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8459839911675407, \"F1\": 0.8219527760051053, \"Memory in Mb\": 0.2367534637451172, \"Time in s\": 1.157087 }, { \"step\": 4530, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8511812762199161, \"F1\": 0.8165487207403377, \"Memory in Mb\": 0.2367000579833984, \"Time in s\": 1.692418 }, { \"step\": 5436, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8404783808647655, \"F1\": 0.8027303754266211, \"Memory in Mb\": 0.2367000579833984, \"Time in s\": 2.325246 }, { \"step\": 6342, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8334647531935025, \"F1\": 0.7973128598848368, \"Memory in Mb\": 0.2367000579833984, \"Time in s\": 3.058544 }, { \"step\": 7248, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8330343590451221, \"F1\": 0.7918100481761873, \"Memory in Mb\": 0.2367000579833984, \"Time in s\": 3.886934 }, { \"step\": 8154, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8344167790997179, \"F1\": 0.8018203170874927, \"Memory in Mb\": 0.2367000579833984, \"Time in s\": 4.816345 }, { \"step\": 9060, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8403797328623468, \"F1\": 0.8133711925658234, \"Memory in Mb\": 0.3037357330322265, \"Time in s\": 5.843331 }, { \"step\": 9966, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8398394380331159, \"F1\": 0.8174748398902103, \"Memory in Mb\": 0.3038501739501953, \"Time in s\": 6.967896 }, { \"step\": 10872, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.840493054916751, \"F1\": 0.8203108808290155, \"Memory in Mb\": 0.3038501739501953, \"Time in s\": 8.190139 }, { \"step\": 11778, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8404517279442982, \"F1\": 0.8186818488854579, \"Memory in Mb\": 0.3877925872802734, \"Time in s\": 9.510034 }, { \"step\": 12684, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8397066939998423, \"F1\": 0.8187572434697333, \"Memory in Mb\": 0.3877925872802734, \"Time in s\": 10.932755 }, { \"step\": 13590, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8422253293104717, \"F1\": 0.8231023102310231, \"Memory in Mb\": 0.3877925872802734, \"Time in s\": 12.458737 }, { \"step\": 14496, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8440841669541221, \"F1\": 0.8261270964763809, \"Memory in Mb\": 0.3890361785888672, \"Time in s\": 14.085848 }, { \"step\": 15402, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8445555483410169, \"F1\": 0.8248207229620957, \"Memory in Mb\": 0.3890628814697265, \"Time in s\": 15.806373999999998 }, { \"step\": 16308, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8382289814190225, \"F1\": 0.8146430578976953, \"Memory in Mb\": 0.4148235321044922, \"Time in s\": 17.623171 }, { \"step\": 17214, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8344855632370882, \"F1\": 0.8052764677739047, \"Memory in Mb\": 0.4148235321044922, \"Time in s\": 19.541046 }, { \"step\": 18120, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8333793255698438, \"F1\": 0.8031044153133764, \"Memory in Mb\": 0.4155101776123047, \"Time in s\": 21.569665 }, { \"step\": 19026, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8341655716162943, \"F1\": 0.8009086893418313, \"Memory in Mb\": 0.4168033599853515, \"Time in s\": 23.708109 }, { \"step\": 19932, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8308163162912047, \"F1\": 0.7980112615310889, \"Memory in Mb\": 0.5072460174560547, \"Time in s\": 25.964372999999995 }, { \"step\": 20838, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8293900273551855, \"F1\": 0.7961699443839229, \"Memory in Mb\": 0.5655117034912109, \"Time in s\": 28.344831 }, { \"step\": 21744, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8298302902083429, \"F1\": 0.7941012799109627, \"Memory in Mb\": 0.6239414215087891, \"Time in s\": 30.844750999999995 }, { \"step\": 22650, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8288224645679722, \"F1\": 0.7904437597967678, \"Memory in Mb\": 0.6005496978759766, \"Time in s\": 33.457669 }, { \"step\": 23556, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8244109530885162, \"F1\": 0.7830920914621354, \"Memory in Mb\": 0.6264286041259766, \"Time in s\": 36.193139 }, { \"step\": 24462, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8225747107640734, \"F1\": 0.7806973218797373, \"Memory in Mb\": 0.6265659332275391, \"Time in s\": 39.050648 }, { \"step\": 25368, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8180707218039185, \"F1\": 0.7764375333042677, \"Memory in Mb\": 0.6265659332275391, \"Time in s\": 42.027952 }, { \"step\": 26274, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8183306055646481, \"F1\": 0.7761572011443043, \"Memory in Mb\": 0.6265659332275391, \"Time in s\": 45.127396 }, { \"step\": 27180, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8178005077449502, \"F1\": 0.7755416553349651, \"Memory in Mb\": 0.6265659332275391, \"Time in s\": 48.341337 }, { \"step\": 28086, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8154174826419797, \"F1\": 0.771830985915493, \"Memory in Mb\": 0.6266345977783203, \"Time in s\": 51.670155 }, { \"step\": 28992, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.81342485598979, \"F1\": 0.7672447179310641, \"Memory in Mb\": 0.6266345977783203, \"Time in s\": 55.116457 }, { \"step\": 29898, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8088771448640332, \"F1\": 0.763747622591582, \"Memory in Mb\": 0.6848773956298828, \"Time in s\": 58.686244 }, { \"step\": 30804, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8062526377300913, \"F1\": 0.7606673083092718, \"Memory in Mb\": 0.6848773956298828, \"Time in s\": 62.38209 }, { \"step\": 31710, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8052603361821565, \"F1\": 0.759212322090076, \"Memory in Mb\": 0.7534084320068359, \"Time in s\": 66.209893 }, { \"step\": 32616, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8025755020695998, \"F1\": 0.7564951026736755, \"Memory in Mb\": 0.7792224884033203, \"Time in s\": 70.16157899999999 }, { \"step\": 33522, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8007517675487008, \"F1\": 0.7563742476746306, \"Memory in Mb\": 0.8375339508056641, \"Time in s\": 74.24016799999998 }, { \"step\": 34428, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.7980945188369594, \"F1\": 0.7537464130088213, \"Memory in Mb\": 0.8621463775634766, \"Time in s\": 78.43750399999999 }, { \"step\": 35334, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.797186765912886, \"F1\": 0.7523842432619212, \"Memory in Mb\": 0.9281406402587892, \"Time in s\": 82.758889 }, { \"step\": 36240, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.7972350230414746, \"F1\": 0.7516392888528357, \"Memory in Mb\": 0.9552211761474608, \"Time in s\": 87.200985 }, { \"step\": 37146, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.7969847893390766, \"F1\": 0.7511960143851661, \"Memory in Mb\": 1.0704402923583984, \"Time in s\": 91.761102 }, { \"step\": 38052, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.7938293343144727, \"F1\": 0.748388338304628, \"Memory in Mb\": 1.0715465545654297, \"Time in s\": 96.434966 }, { \"step\": 38958, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.7918730908437508, \"F1\": 0.7483706784184718, \"Memory in Mb\": 1.0715465545654297, \"Time in s\": 101.219833 }, { \"step\": 39864, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.7913854953214761, \"F1\": 0.7506596306068601, \"Memory in Mb\": 1.1051769256591797, \"Time in s\": 106.118884 }, { \"step\": 40770, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.7935686428413746, \"F1\": 0.7554199360650974, \"Memory in Mb\": 1.1051769256591797, \"Time in s\": 111.1286 }, { \"step\": 41676, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.7953209358128375, \"F1\": 0.7590667721161451, \"Memory in Mb\": 1.129629135131836, \"Time in s\": 116.244938 }, { \"step\": 42582, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.7966933608886593, \"F1\": 0.7607572198424761, \"Memory in Mb\": 1.129629135131836, \"Time in s\": 121.471609 }, { \"step\": 43488, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.7973647296893325, \"F1\": 0.7615800865800865, \"Memory in Mb\": 1.129629135131836, \"Time in s\": 126.806064 }, { \"step\": 44394, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.7960714527065078, \"F1\": 0.7581933278132429, \"Memory in Mb\": 1.129629135131836, \"Time in s\": 132.253655 }, { \"step\": 45300, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.7969933111106206, \"F1\": 0.7591535278403435, \"Memory in Mb\": 1.1878719329833984, \"Time in s\": 137.81786 }, { \"step\": 25, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.5833333333333334, \"F1\": 0.6428571428571429, \"Memory in Mb\": 0.0693511962890625, \"Time in s\": 0.016417 }, { \"step\": 50, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.7346938775510204, \"F1\": 0.7346938775510203, \"Memory in Mb\": 0.0693511962890625, \"Time in s\": 0.037579 }, { \"step\": 75, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.7837837837837838, \"F1\": 0.7894736842105262, \"Memory in Mb\": 0.0693511962890625, \"Time in s\": 0.062555 }, { \"step\": 100, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8080808080808081, \"F1\": 0.8080808080808081, \"Memory in Mb\": 0.0693511962890625, \"Time in s\": 0.093468 }, { \"step\": 125, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8145161290322581, \"F1\": 0.8130081300813008, \"Memory in Mb\": 0.0693511962890625, \"Time in s\": 0.128067 }, { \"step\": 150, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8187919463087249, \"F1\": 0.8235294117647058, \"Memory in Mb\": 0.0693511962890625, \"Time in s\": 0.166217 }, { \"step\": 175, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8333333333333334, \"F1\": 0.8263473053892215, \"Memory in Mb\": 0.0693511962890625, \"Time in s\": 0.207895 }, { \"step\": 200, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8341708542713567, \"F1\": 0.8272251308900525, \"Memory in Mb\": 0.0693740844726562, \"Time in s\": 0.254237 }, { \"step\": 225, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8303571428571429, \"F1\": 0.8190476190476189, \"Memory in Mb\": 0.0693740844726562, \"Time in s\": 0.304152 }, { \"step\": 250, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8313253012048193, \"F1\": 0.8205128205128206, \"Memory in Mb\": 0.0693740844726562, \"Time in s\": 0.3576809999999999 }, { \"step\": 275, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8321167883211679, \"F1\": 0.8203125000000001, \"Memory in Mb\": 0.0693740844726562, \"Time in s\": 0.4147329999999999 }, { \"step\": 300, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8394648829431438, \"F1\": 0.8248175182481753, \"Memory in Mb\": 0.0693740844726562, \"Time in s\": 0.4754899999999999 }, { \"step\": 325, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.845679012345679, \"F1\": 0.8263888888888888, \"Memory in Mb\": 0.0693740844726562, \"Time in s\": 0.539997 }, { \"step\": 350, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8510028653295129, \"F1\": 0.8289473684210527, \"Memory in Mb\": 0.0693740844726562, \"Time in s\": 0.608045 }, { \"step\": 375, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8529411764705882, \"F1\": 0.8286604361370716, \"Memory in Mb\": 0.0693740844726562, \"Time in s\": 0.679831 }, { \"step\": 400, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8546365914786967, \"F1\": 0.8284023668639053, \"Memory in Mb\": 0.0693740844726562, \"Time in s\": 0.75623 }, { \"step\": 425, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8561320754716981, \"F1\": 0.8262108262108262, \"Memory in Mb\": 0.0693740844726562, \"Time in s\": 0.836328 }, { \"step\": 450, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8596881959910914, \"F1\": 0.8283378746594006, \"Memory in Mb\": 0.0693740844726562, \"Time in s\": 0.920157 }, { \"step\": 475, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8565400843881856, \"F1\": 0.826530612244898, \"Memory in Mb\": 0.0693740844726562, \"Time in s\": 1.007627 }, { \"step\": 500, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8577154308617234, \"F1\": 0.8313539192399049, \"Memory in Mb\": 0.0693740844726562, \"Time in s\": 1.098858 }, { \"step\": 525, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8587786259541985, \"F1\": 0.8287037037037036, \"Memory in Mb\": 0.0693740844726562, \"Time in s\": 1.193786 }, { \"step\": 550, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8579234972677595, \"F1\": 0.8289473684210527, \"Memory in Mb\": 0.0693740844726562, \"Time in s\": 1.292398 }, { \"step\": 575, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8606271777003485, \"F1\": 0.8319327731092437, \"Memory in Mb\": 0.0693740844726562, \"Time in s\": 1.395047 }, { \"step\": 600, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8647746243739566, \"F1\": 0.834355828220859, \"Memory in Mb\": 0.0693740844726562, \"Time in s\": 1.502529 }, { \"step\": 625, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8669871794871795, \"F1\": 0.8336673346693387, \"Memory in Mb\": 0.0693740844726562, \"Time in s\": 1.61388 }, { \"step\": 650, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8705701078582434, \"F1\": 0.8409090909090909, \"Memory in Mb\": 0.0693740844726562, \"Time in s\": 1.728993 }, { \"step\": 675, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.870919881305638, \"F1\": 0.8449197860962566, \"Memory in Mb\": 0.0693740844726562, \"Time in s\": 1.847898 }, { \"step\": 700, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8755364806866953, \"F1\": 0.8486956521739131, \"Memory in Mb\": 0.0693740844726562, \"Time in s\": 1.970613 }, { \"step\": 725, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8784530386740331, \"F1\": 0.8547854785478548, \"Memory in Mb\": 0.0693740844726562, \"Time in s\": 2.097195 }, { \"step\": 750, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8798397863818425, \"F1\": 0.8571428571428571, \"Memory in Mb\": 0.0693740844726562, \"Time in s\": 2.227565 }, { \"step\": 775, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8798449612403101, \"F1\": 0.8567026194144837, \"Memory in Mb\": 0.0693740844726562, \"Time in s\": 2.361715 }, { \"step\": 800, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8798498122653317, \"F1\": 0.8584070796460177, \"Memory in Mb\": 0.0058956146240234, \"Time in s\": 2.500804 }, { \"step\": 825, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8786407766990292, \"F1\": 0.8575498575498576, \"Memory in Mb\": 0.1346960067749023, \"Time in s\": 2.646368 }, { \"step\": 850, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8798586572438163, \"F1\": 0.8579387186629527, \"Memory in Mb\": 0.1347188949584961, \"Time in s\": 2.795782 }, { \"step\": 875, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8810068649885584, \"F1\": 0.8583106267029972, \"Memory in Mb\": 0.1347188949584961, \"Time in s\": 2.94896 }, { \"step\": 900, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.882091212458287, \"F1\": 0.8590425531914893, \"Memory in Mb\": 0.1347417831420898, \"Time in s\": 3.105941 }, { \"step\": 925, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8831168831168831, \"F1\": 0.8611825192802056, \"Memory in Mb\": 0.1347417831420898, \"Time in s\": 3.266898 }, { \"step\": 950, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.880927291886196, \"F1\": 0.8599752168525404, \"Memory in Mb\": 0.1347417831420898, \"Time in s\": 3.431805 }, { \"step\": 975, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8819301848049281, \"F1\": 0.8609431680773881, \"Memory in Mb\": 0.1347417831420898, \"Time in s\": 3.600575 }, { \"step\": 1000, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8828828828828829, \"F1\": 0.8621908127208481, \"Memory in Mb\": 0.1347417831420898, \"Time in s\": 3.773271 }, { \"step\": 1025, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8818359375, \"F1\": 0.8613974799541809, \"Memory in Mb\": 0.1347417831420898, \"Time in s\": 3.949737 }, { \"step\": 1050, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8836987607244995, \"F1\": 0.8641425389755011, \"Memory in Mb\": 0.1347417831420898, \"Time in s\": 4.130098 }, { \"step\": 1075, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8845437616387337, \"F1\": 0.8658008658008659, \"Memory in Mb\": 0.1347417831420898, \"Time in s\": 4.314272 }, { \"step\": 1100, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8844404003639672, \"F1\": 0.8656084656084656, \"Memory in Mb\": 0.1347417831420898, \"Time in s\": 4.502263 }, { \"step\": 1125, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8816725978647687, \"F1\": 0.8630278063851698, \"Memory in Mb\": 0.1347417831420898, \"Time in s\": 4.694139 }, { \"step\": 1150, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8807658833768495, \"F1\": 0.8614762386248735, \"Memory in Mb\": 0.1347417831420898, \"Time in s\": 4.889705 }, { \"step\": 1175, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.879045996592845, \"F1\": 0.8594059405940594, \"Memory in Mb\": 0.1347417831420898, \"Time in s\": 5.0886830000000005 }, { \"step\": 1200, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8807339449541285, \"F1\": 0.8610301263362489, \"Memory in Mb\": 0.1347417831420898, \"Time in s\": 5.292268000000001 }, { \"step\": 1225, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.880718954248366, \"F1\": 0.8609523809523809, \"Memory in Mb\": 0.1347417831420898, \"Time in s\": 5.499479000000001 }, { \"step\": 1250, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8799039231385108, \"F1\": 0.8605947955390334, \"Memory in Mb\": 0.1347417831420898, \"Time in s\": 5.711295000000001 }, { \"step\": 1903, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.0172128677368164, \"Time in s\": 0.17714 }, { \"step\": 3806, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.0172128677368164, \"Time in s\": 0.53242 }, { \"step\": 5709, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.0172128677368164, \"Time in s\": 1.05881 }, { \"step\": 7612, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.0172128677368164, \"Time in s\": 1.755561 }, { \"step\": 9515, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.0172128677368164, \"Time in s\": 2.619724 }, { \"step\": 11418, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.0172128677368164, \"Time in s\": 3.65225 }, { \"step\": 13321, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.0172128677368164, \"Time in s\": 4.852976 }, { \"step\": 15224, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9995401694803916, \"F1\": 0.5882352941176471, \"Memory in Mb\": 0.0401153564453125, \"Time in s\": 6.232766 }, { \"step\": 17127, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9992409202382344, \"F1\": 0.48, \"Memory in Mb\": 0.0401153564453125, \"Time in s\": 7.809704 }, { \"step\": 19030, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9993168322034788, \"F1\": 0.48, \"Memory in Mb\": 0.0401153564453125, \"Time in s\": 9.583916 }, { \"step\": 20933, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999378941333843, \"F1\": 0.48, \"Memory in Mb\": 0.0401153564453125, \"Time in s\": 11.564329 }, { \"step\": 22836, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9994306984891612, \"F1\": 0.48, \"Memory in Mb\": 0.0401153564453125, \"Time in s\": 13.76196 }, { \"step\": 24739, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9994744926833212, \"F1\": 0.48, \"Memory in Mb\": 0.0401153564453125, \"Time in s\": 16.175204 }, { \"step\": 26642, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999512030329192, \"F1\": 0.5185185185185186, \"Memory in Mb\": 0.0401382446289062, \"Time in s\": 18.806955 }, { \"step\": 28545, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999544562780269, \"F1\": 0.5185185185185186, \"Memory in Mb\": 0.0401382446289062, \"Time in s\": 21.653103 }, { \"step\": 30448, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9995730285413998, \"F1\": 0.5185185185185186, \"Memory in Mb\": 0.0401382446289062, \"Time in s\": 24.713929 }, { \"step\": 32351, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999598145285935, \"F1\": 0.5185185185185186, \"Memory in Mb\": 0.0401382446289062, \"Time in s\": 27.986042 }, { \"step\": 34254, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999620471199603, \"F1\": 0.5185185185185186, \"Memory in Mb\": 0.0401382446289062, \"Time in s\": 31.466943 }, { \"step\": 36157, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996404469520964, \"F1\": 0.5185185185185186, \"Memory in Mb\": 0.0401382446289062, \"Time in s\": 35.15479 }, { \"step\": 38060, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996584250768544, \"F1\": 0.5185185185185186, \"Memory in Mb\": 0.0321388244628906, \"Time in s\": 39.051373 }, { \"step\": 39963, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996746909564086, \"F1\": 0.5185185185185186, \"Memory in Mb\": 0.0321388244628906, \"Time in s\": 43.155234 }, { \"step\": 41866, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996894780843186, \"F1\": 0.5185185185185186, \"Memory in Mb\": 0.0321388244628906, \"Time in s\": 47.461241 }, { \"step\": 43769, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997029793456408, \"F1\": 0.5185185185185186, \"Memory in Mb\": 0.0321388244628906, \"Time in s\": 51.972515 }, { \"step\": 45672, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997153554772176, \"F1\": 0.5185185185185186, \"Memory in Mb\": 0.0321388244628906, \"Time in s\": 56.689378000000005 }, { \"step\": 47575, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996847017278344, \"F1\": 0.4827586206896552, \"Memory in Mb\": 0.0443153381347656, \"Time in s\": 61.613517 }, { \"step\": 49478, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996766174181944, \"F1\": 0.4666666666666667, \"Memory in Mb\": 0.0443153381347656, \"Time in s\": 66.740207 }, { \"step\": 51381, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996691319579604, \"F1\": 0.5142857142857142, \"Memory in Mb\": 0.0535621643066406, \"Time in s\": 72.066529 }, { \"step\": 53284, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996809488955202, \"F1\": 0.5142857142857142, \"Memory in Mb\": 0.0535621643066406, \"Time in s\": 77.596723 }, { \"step\": 55187, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999673830319284, \"F1\": 0.5, \"Memory in Mb\": 0.0535621643066406, \"Time in s\": 83.324048 }, { \"step\": 57090, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9995270542486292, \"F1\": 0.4489795918367347, \"Memory in Mb\": 0.0805435180664062, \"Time in s\": 89.177762 }, { \"step\": 58993, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9995423108218064, \"F1\": 0.4489795918367347, \"Memory in Mb\": 0.0805435180664062, \"Time in s\": 95.152342 }, { \"step\": 60896, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9995566138435013, \"F1\": 0.4489795918367347, \"Memory in Mb\": 0.0805435180664062, \"Time in s\": 101.246537 }, { \"step\": 62799, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9995222777795472, \"F1\": 0.4230769230769231, \"Memory in Mb\": 0.0897903442382812, \"Time in s\": 107.458379 }, { \"step\": 64702, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9995363286502528, \"F1\": 0.4230769230769231, \"Memory in Mb\": 0.0897903442382812, \"Time in s\": 113.786816 }, { \"step\": 66605, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999549576602006, \"F1\": 0.4230769230769231, \"Memory in Mb\": 0.0897903442382812, \"Time in s\": 120.231179 }, { \"step\": 68508, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999562088545696, \"F1\": 0.4642857142857143, \"Memory in Mb\": 0.0897903442382812, \"Time in s\": 126.79213 }, { \"step\": 70411, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9995739241585002, \"F1\": 0.4642857142857143, \"Memory in Mb\": 0.0897903442382812, \"Time in s\": 133.468631 }, { \"step\": 72314, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9995851368357004, \"F1\": 0.4642857142857143, \"Memory in Mb\": 0.0897903442382812, \"Time in s\": 140.25926299999998 }, { \"step\": 74217, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9995823003126012, \"F1\": 0.456140350877193, \"Memory in Mb\": 0.0897903442382812, \"Time in s\": 147.163887 }, { \"step\": 76120, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9995927429419724, \"F1\": 0.456140350877193, \"Memory in Mb\": 0.0897903442382812, \"Time in s\": 154.18114 }, { \"step\": 78023, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996026761682604, \"F1\": 0.456140350877193, \"Memory in Mb\": 0.0897903442382812, \"Time in s\": 161.310049 }, { \"step\": 79926, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996121363778544, \"F1\": 0.456140350877193, \"Memory in Mb\": 0.0897903442382812, \"Time in s\": 168.551426 }, { \"step\": 81829, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996211565723224, \"F1\": 0.456140350877193, \"Memory in Mb\": 0.0897903442382812, \"Time in s\": 175.901071 }, { \"step\": 83732, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996178237450885, \"F1\": 0.4482758620689655, \"Memory in Mb\": 0.0897903442382812, \"Time in s\": 183.360737 }, { \"step\": 85635, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996146390452392, \"F1\": 0.4406779661016949, \"Memory in Mb\": 0.0940589904785156, \"Time in s\": 190.930796 }, { \"step\": 87538, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996230165530005, \"F1\": 0.4406779661016949, \"Memory in Mb\": 0.0940589904785156, \"Time in s\": 198.60706 }, { \"step\": 89441, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996198568872988, \"F1\": 0.4333333333333333, \"Memory in Mb\": 0.1032600402832031, \"Time in s\": 206.389291 }, { \"step\": 91344, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999616828875776, \"F1\": 0.4262295081967213, \"Memory in Mb\": 0.1032600402832031, \"Time in s\": 214.276883 }, { \"step\": 93247, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996139244578855, \"F1\": 0.4193548387096774, \"Memory in Mb\": 0.1032600402832031, \"Time in s\": 222.269881 }, { \"step\": 95150, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996216460498796, \"F1\": 0.4193548387096774, \"Memory in Mb\": 0.1032600402832031, \"Time in s\": 230.368202 }, { \"step\": 106, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5714285714285714, \"F1\": 0.628099173553719, \"Memory in Mb\": 0.0257539749145507, \"Time in s\": 0.030279 }, { \"step\": 212, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5592417061611374, \"F1\": 0.5903083700440529, \"Memory in Mb\": 0.0258378982543945, \"Time in s\": 0.079572 }, { \"step\": 318, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5615141955835962, \"F1\": 0.5947521865889213, \"Memory in Mb\": 0.0258989334106445, \"Time in s\": 0.147698 }, { \"step\": 424, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5555555555555556, \"F1\": 0.5822222222222222, \"Memory in Mb\": 0.0258989334106445, \"Time in s\": 0.234734 }, { \"step\": 530, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.555765595463138, \"F1\": 0.5506692160611854, \"Memory in Mb\": 0.0258989334106445, \"Time in s\": 0.340374 }, { \"step\": 636, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5543307086614173, \"F1\": 0.5291181364392679, \"Memory in Mb\": 0.0259599685668945, \"Time in s\": 0.465898 }, { \"step\": 742, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5708502024291497, \"F1\": 0.5167173252279634, \"Memory in Mb\": 0.0259599685668945, \"Time in s\": 0.6107940000000001 }, { \"step\": 848, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5761511216056671, \"F1\": 0.510231923601637, \"Memory in Mb\": 0.0259599685668945, \"Time in s\": 0.774445 }, { \"step\": 954, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5844700944386149, \"F1\": 0.505, \"Memory in Mb\": 0.0259599685668945, \"Time in s\": 0.95753 }, { \"step\": 1060, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5920679886685553, \"F1\": 0.4953271028037382, \"Memory in Mb\": 0.0259599685668945, \"Time in s\": 1.15978 }, { \"step\": 1166, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.590557939914163, \"F1\": 0.478688524590164, \"Memory in Mb\": 0.0259599685668945, \"Time in s\": 1.380797 }, { \"step\": 1272, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5971675845790716, \"F1\": 0.4807302231237322, \"Memory in Mb\": 0.0259599685668945, \"Time in s\": 1.620256 }, { \"step\": 1378, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.599128540305011, \"F1\": 0.4661508704061895, \"Memory in Mb\": 0.0260210037231445, \"Time in s\": 1.881163 }, { \"step\": 1484, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5994605529332434, \"F1\": 0.458029197080292, \"Memory in Mb\": 0.0260210037231445, \"Time in s\": 2.166507 }, { \"step\": 1590, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5997482693517936, \"F1\": 0.4517241379310345, \"Memory in Mb\": 0.0260210037231445, \"Time in s\": 2.464497 }, { \"step\": 1696, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6011799410029498, \"F1\": 0.4459016393442623, \"Memory in Mb\": 0.0260210037231445, \"Time in s\": 2.7711840000000003 }, { \"step\": 1802, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6018878400888396, \"F1\": 0.445475638051044, \"Memory in Mb\": 0.0260210037231445, \"Time in s\": 3.0861490000000003 }, { \"step\": 1908, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6030414263240692, \"F1\": 0.4470416362308254, \"Memory in Mb\": 0.0260210037231445, \"Time in s\": 3.4094810000000004 }, { \"step\": 2014, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5986090412319921, \"F1\": 0.443526170798898, \"Memory in Mb\": 0.0260210037231445, \"Time in s\": 3.741125 }, { \"step\": 2120, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5960358659745163, \"F1\": 0.4427083333333333, \"Memory in Mb\": 0.0260210037231445, \"Time in s\": 4.081019 }, { \"step\": 2226, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5968539325842697, \"F1\": 0.4425108763206961, \"Memory in Mb\": 0.0260210037231445, \"Time in s\": 4.429484 }, { \"step\": 2332, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5975975975975976, \"F1\": 0.4423305588585017, \"Memory in Mb\": 0.0260210037231445, \"Time in s\": 4.786285 }, { \"step\": 2438, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5982765695527288, \"F1\": 0.4396107613050944, \"Memory in Mb\": 0.0260210037231445, \"Time in s\": 5.150978 }, { \"step\": 2544, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5973259929217459, \"F1\": 0.4398249452954048, \"Memory in Mb\": 0.0303611755371093, \"Time in s\": 5.5260560000000005 }, { \"step\": 2650, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5956964892412231, \"F1\": 0.4436363636363636, \"Memory in Mb\": 0.0572662353515625, \"Time in s\": 5.913093000000001 }, { \"step\": 2756, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5985480943738657, \"F1\": 0.4497512437810945, \"Memory in Mb\": 0.0575103759765625, \"Time in s\": 6.312855000000001 }, { \"step\": 2862, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.600139811254806, \"F1\": 0.4536771728748806, \"Memory in Mb\": 0.0577545166015625, \"Time in s\": 6.725712000000001 }, { \"step\": 2968, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5979103471520054, \"F1\": 0.4525011473152822, \"Memory in Mb\": 0.057861328125, \"Time in s\": 7.151638 }, { \"step\": 3074, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5971363488447771, \"F1\": 0.4497777777777778, \"Memory in Mb\": 0.0579833984375, \"Time in s\": 7.588719 }, { \"step\": 3180, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6008178672538534, \"F1\": 0.4499349804941482, \"Memory in Mb\": 0.05804443359375, \"Time in s\": 8.037238 }, { \"step\": 3286, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6024353120243531, \"F1\": 0.4470787468247248, \"Memory in Mb\": 0.05816650390625, \"Time in s\": 8.49667 }, { \"step\": 3392, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6012975523444412, \"F1\": 0.444991789819376, \"Memory in Mb\": 0.0582275390625, \"Time in s\": 8.967725 }, { \"step\": 3498, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.603946239633972, \"F1\": 0.4431041415359871, \"Memory in Mb\": 0.05828857421875, \"Time in s\": 9.449361 }, { \"step\": 3604, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.607826810990841, \"F1\": 0.4452296819787986, \"Memory in Mb\": 0.05828857421875, \"Time in s\": 9.942275 }, { \"step\": 3710, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6071717444055001, \"F1\": 0.441976254308694, \"Memory in Mb\": 0.05828857421875, \"Time in s\": 10.446028 }, { \"step\": 3816, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6062909567496724, \"F1\": 0.4378742514970059, \"Memory in Mb\": 0.058349609375, \"Time in s\": 10.961132 }, { \"step\": 3922, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.606988013261923, \"F1\": 0.4353242946134115, \"Memory in Mb\": 0.05841064453125, \"Time in s\": 11.487205 }, { \"step\": 4028, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6088899925502855, \"F1\": 0.4360902255639098, \"Memory in Mb\": 0.05841064453125, \"Time in s\": 12.024686 }, { \"step\": 4134, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6082748608758771, \"F1\": 0.4341139461726669, \"Memory in Mb\": 0.0584716796875, \"Time in s\": 12.573182999999998 }, { \"step\": 4240, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6105213493748526, \"F1\": 0.4370951244459598, \"Memory in Mb\": 0.0584716796875, \"Time in s\": 13.132692999999998 }, { \"step\": 4346, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6119677790563867, \"F1\": 0.4372496662216288, \"Memory in Mb\": 0.05853271484375, \"Time in s\": 13.703478 }, { \"step\": 4452, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.614243990114581, \"F1\": 0.4387054593004249, \"Memory in Mb\": 0.05853271484375, \"Time in s\": 14.285281 }, { \"step\": 4558, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6126837831906956, \"F1\": 0.4355612408058842, \"Memory in Mb\": 0.05853271484375, \"Time in s\": 14.878356 }, { \"step\": 4664, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.613339052112374, \"F1\": 0.4360337816703159, \"Memory in Mb\": 0.05853271484375, \"Time in s\": 15.482258 }, { \"step\": 4770, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6148039421262319, \"F1\": 0.4352905010759299, \"Memory in Mb\": 0.0646743774414062, \"Time in s\": 16.097208 }, { \"step\": 4876, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6157948717948718, \"F1\": 0.4332829046898639, \"Memory in Mb\": 0.0646743774414062, \"Time in s\": 16.723433999999997 }, { \"step\": 4982, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6167436257779563, \"F1\": 0.434705359786793, \"Memory in Mb\": 0.0646743774414062, \"Time in s\": 17.360799999999998 }, { \"step\": 5088, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6158836249262827, \"F1\": 0.4313154831199068, \"Memory in Mb\": 0.0647354125976562, \"Time in s\": 18.009093999999997 }, { \"step\": 5194, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6160215674947044, \"F1\": 0.4296338672768878, \"Memory in Mb\": 0.0647964477539062, \"Time in s\": 18.668471 }, { \"step\": 5300, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6165314210228345, \"F1\": 0.4282498593134496, \"Memory in Mb\": 0.0624046325683593, \"Time in s\": 19.339784 }, { \"step\": 906, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8386740331491712, \"F1\": 0.8370535714285713, \"Memory in Mb\": 0.1590566635131836, \"Time in s\": 0.369424 }, { \"step\": 1812, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8823854224185533, \"F1\": 0.857334226389819, \"Memory in Mb\": 0.2951574325561523, \"Time in s\": 1.082516 }, { \"step\": 2718, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8715495031284505, \"F1\": 0.8438478747203579, \"Memory in Mb\": 0.1301527023315429, \"Time in s\": 2.245646 }, { \"step\": 3624, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8755175269113994, \"F1\": 0.8480970023576963, \"Memory in Mb\": 0.2537450790405273, \"Time in s\": 3.683735 }, { \"step\": 4530, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.873923603444469, \"F1\": 0.8402797202797203, \"Memory in Mb\": 0.3769788742065429, \"Time in s\": 5.436553 }, { \"step\": 5436, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8680772769089237, \"F1\": 0.8326721120186699, \"Memory in Mb\": 0.4361524581909179, \"Time in s\": 7.563805 }, { \"step\": 6342, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8667402617883615, \"F1\": 0.8319076984284862, \"Memory in Mb\": 0.2912740707397461, \"Time in s\": 10.057711 }, { \"step\": 7248, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8665654753691182, \"F1\": 0.8309144955411786, \"Memory in Mb\": 0.3158788681030273, \"Time in s\": 12.896512 }, { \"step\": 8154, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8588249724027965, \"F1\": 0.8328249818445898, \"Memory in Mb\": 0.3159399032592773, \"Time in s\": 16.103426000000002 }, { \"step\": 9060, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8594767634396733, \"F1\": 0.8384722750919934, \"Memory in Mb\": 0.3158178329467773, \"Time in s\": 19.644725 }, { \"step\": 9966, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8557952834922228, \"F1\": 0.8381938970836617, \"Memory in Mb\": 0.3156957626342773, \"Time in s\": 23.527363 }, { \"step\": 10872, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8595345414405299, \"F1\": 0.8445801526717558, \"Memory in Mb\": 0.3772764205932617, \"Time in s\": 27.740459 }, { \"step\": 11778, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8540375307803345, \"F1\": 0.8368605865046976, \"Memory in Mb\": 0.4951925277709961, \"Time in s\": 32.366043000000005 }, { \"step\": 12684, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8543719940077269, \"F1\": 0.8371970030850595, \"Memory in Mb\": 0.1922826766967773, \"Time in s\": 37.423083000000005 }, { \"step\": 13590, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8561336375009199, \"F1\": 0.8404472374112462, \"Memory in Mb\": 0.1966886520385742, \"Time in s\": 42.800608 }, { \"step\": 14496, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8560193170058641, \"F1\": 0.8406018483158941, \"Memory in Mb\": 0.1969938278198242, \"Time in s\": 48.503182 }, { \"step\": 15402, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8546198298811766, \"F1\": 0.8374119526541282, \"Memory in Mb\": 0.1967496871948242, \"Time in s\": 54.449200000000005 }, { \"step\": 16308, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8514748267615134, \"F1\": 0.8324339283243393, \"Memory in Mb\": 0.1668386459350586, \"Time in s\": 60.610803 }, { \"step\": 17214, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8503456689711265, \"F1\": 0.8286094477711246, \"Memory in Mb\": 0.1718664169311523, \"Time in s\": 66.962846 }, { \"step\": 18120, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8505436282355539, \"F1\": 0.8284555935639174, \"Memory in Mb\": 0.2014341354370117, \"Time in s\": 73.50726399999999 }, { \"step\": 19026, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8529829172141918, \"F1\": 0.8293992070753278, \"Memory in Mb\": 0.2609548568725586, \"Time in s\": 80.25162699999998 }, { \"step\": 19932, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8476744769454618, \"F1\": 0.8245289561900357, \"Memory in Mb\": 0.3200864791870117, \"Time in s\": 87.27384199999999 }, { \"step\": 20838, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8440274511685943, \"F1\": 0.8206203775251132, \"Memory in Mb\": 0.3286733627319336, \"Time in s\": 94.62519899999998 }, { \"step\": 21744, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8459734167318217, \"F1\": 0.8206693440428381, \"Memory in Mb\": 0.3269262313842773, \"Time in s\": 102.22022699999998 }, { \"step\": 22650, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8452470307739856, \"F1\": 0.8179693586081537, \"Memory in Mb\": 0.4455976486206054, \"Time in s\": 110.06165699999998 }, { \"step\": 23556, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8426236467841223, \"F1\": 0.813952321204517, \"Memory in Mb\": 0.3226041793823242, \"Time in s\": 118.13610299999998 }, { \"step\": 24462, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.83966313723887, \"F1\": 0.8094081057439986, \"Memory in Mb\": 0.3223371505737304, \"Time in s\": 126.42298599999998 }, { \"step\": 25368, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8335632908897387, \"F1\": 0.804101707498144, \"Memory in Mb\": 0.3226423263549804, \"Time in s\": 134.92385099999998 }, { \"step\": 26274, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.833859856126061, \"F1\": 0.8041634887164072, \"Memory in Mb\": 0.3227415084838867, \"Time in s\": 143.62308599999997 }, { \"step\": 27180, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8353508223260606, \"F1\": 0.8067872717067484, \"Memory in Mb\": 0.3344221115112304, \"Time in s\": 152.52188499999997 }, { \"step\": 28086, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.832366031689514, \"F1\": 0.8022679546409073, \"Memory in Mb\": 0.4427366256713867, \"Time in s\": 161.69157399999995 }, { \"step\": 28992, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8298092511469076, \"F1\": 0.7967539957159334, \"Memory in Mb\": 0.4431562423706054, \"Time in s\": 171.13838399999997 }, { \"step\": 29898, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.828912599926414, \"F1\": 0.7954572719638501, \"Memory in Mb\": 0.4460401535034179, \"Time in s\": 180.848187 }, { \"step\": 30804, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8286855176443852, \"F1\": 0.7940682926829268, \"Memory in Mb\": 0.4524259567260742, \"Time in s\": 190.785177 }, { \"step\": 31710, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8270522564571573, \"F1\": 0.7921309984080055, \"Memory in Mb\": 0.3178968429565429, \"Time in s\": 200.976285 }, { \"step\": 32616, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8257550206959988, \"F1\": 0.7908123826701513, \"Memory in Mb\": 0.5092306137084961, \"Time in s\": 211.41806699999995 }, { \"step\": 33522, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8259001819754781, \"F1\": 0.7917201998572448, \"Memory in Mb\": 0.4504041671752929, \"Time in s\": 222.10332899999997 }, { \"step\": 34428, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8258634211520028, \"F1\": 0.7914564998086757, \"Memory in Mb\": 0.3306646347045898, \"Time in s\": 233.03381899999997 }, { \"step\": 35334, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8252625024764385, \"F1\": 0.7899285471248724, \"Memory in Mb\": 0.4912481307983398, \"Time in s\": 244.20613599999996 }, { \"step\": 36240, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8261541433262507, \"F1\": 0.7894103489771359, \"Memory in Mb\": 0.5119619369506836, \"Time in s\": 255.69628199999997 }, { \"step\": 37146, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8248754879526181, \"F1\": 0.7869936802121876, \"Memory in Mb\": 0.4187917709350586, \"Time in s\": 267.45490699999993 }, { \"step\": 38052, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8248403458516202, \"F1\": 0.7864398090294465, \"Memory in Mb\": 0.4483175277709961, \"Time in s\": 279.43877899999995 }, { \"step\": 38958, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8243447904099391, \"F1\": 0.7868689070919114, \"Memory in Mb\": 0.5077886581420898, \"Time in s\": 291.70049499999993 }, { \"step\": 39864, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8249504553094348, \"F1\": 0.7887758808572466, \"Memory in Mb\": 0.1471853256225586, \"Time in s\": 304.138659 }, { \"step\": 40770, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8262159974490422, \"F1\": 0.7919908399635948, \"Memory in Mb\": 0.2297697067260742, \"Time in s\": 316.729427 }, { \"step\": 41676, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8275224955008998, \"F1\": 0.794816168074903, \"Memory in Mb\": 0.2342596054077148, \"Time in s\": 329.475528 }, { \"step\": 42582, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8281393109602875, \"F1\": 0.7956550876801073, \"Memory in Mb\": 0.3305959701538086, \"Time in s\": 342.401664 }, { \"step\": 43488, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8284774760273185, \"F1\": 0.7958619557185473, \"Memory in Mb\": 0.3348875045776367, \"Time in s\": 355.508994 }, { \"step\": 44394, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8276530083571735, \"F1\": 0.7938902508014333, \"Memory in Mb\": 0.2839117050170898, \"Time in s\": 368.88314 }, { \"step\": 45300, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8286717146073864, \"F1\": 0.795391632174211, \"Memory in Mb\": 0.3993253707885742, \"Time in s\": 382.503727 }, { \"step\": 25, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.5833333333333334, \"F1\": 0.6428571428571429, \"Memory in Mb\": 0.075688362121582, \"Time in s\": 0.0191 }, { \"step\": 50, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.7346938775510204, \"F1\": 0.7346938775510203, \"Memory in Mb\": 0.075749397277832, \"Time in s\": 0.048719 }, { \"step\": 75, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.7837837837837838, \"F1\": 0.7894736842105262, \"Memory in Mb\": 0.075749397277832, \"Time in s\": 0.086072 }, { \"step\": 100, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8080808080808081, \"F1\": 0.8080808080808081, \"Memory in Mb\": 0.075810432434082, \"Time in s\": 0.130387 }, { \"step\": 125, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8225806451612904, \"F1\": 0.819672131147541, \"Memory in Mb\": 0.075810432434082, \"Time in s\": 0.181491 }, { \"step\": 150, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.825503355704698, \"F1\": 0.8289473684210527, \"Memory in Mb\": 0.0758333206176757, \"Time in s\": 0.241937 }, { \"step\": 175, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8333333333333334, \"F1\": 0.8242424242424242, \"Memory in Mb\": 0.0758943557739257, \"Time in s\": 0.309852 }, { \"step\": 200, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8291457286432161, \"F1\": 0.8191489361702128, \"Memory in Mb\": 0.0758943557739257, \"Time in s\": 0.385335 }, { \"step\": 225, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8303571428571429, \"F1\": 0.8155339805825242, \"Memory in Mb\": 0.0758943557739257, \"Time in s\": 0.468101 }, { \"step\": 250, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8313253012048193, \"F1\": 0.817391304347826, \"Memory in Mb\": 0.0758943557739257, \"Time in s\": 0.558214 }, { \"step\": 275, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8321167883211679, \"F1\": 0.8174603174603176, \"Memory in Mb\": 0.0758943557739257, \"Time in s\": 0.656226 }, { \"step\": 300, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8361204013377926, \"F1\": 0.8178438661710038, \"Memory in Mb\": 0.0758943557739257, \"Time in s\": 0.765253 }, { \"step\": 325, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8425925925925926, \"F1\": 0.8197879858657244, \"Memory in Mb\": 0.0759553909301757, \"Time in s\": 0.883707 }, { \"step\": 350, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8481375358166189, \"F1\": 0.822742474916388, \"Memory in Mb\": 0.0759553909301757, \"Time in s\": 1.011557 }, { \"step\": 375, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8502673796791443, \"F1\": 0.8227848101265823, \"Memory in Mb\": 0.0759553909301757, \"Time in s\": 1.148821 }, { \"step\": 400, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8521303258145363, \"F1\": 0.8228228228228228, \"Memory in Mb\": 0.0759553909301757, \"Time in s\": 1.295386 }, { \"step\": 425, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8537735849056604, \"F1\": 0.8208092485549133, \"Memory in Mb\": 0.0759553909301757, \"Time in s\": 1.4534280000000002 }, { \"step\": 450, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8574610244988864, \"F1\": 0.8232044198895027, \"Memory in Mb\": 0.0759553909301757, \"Time in s\": 1.6208710000000002 }, { \"step\": 475, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8565400843881856, \"F1\": 0.8238341968911918, \"Memory in Mb\": 0.0759553909301757, \"Time in s\": 1.7976930000000002 }, { \"step\": 500, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8557114228456913, \"F1\": 0.8260869565217391, \"Memory in Mb\": 0.0759553909301757, \"Time in s\": 1.9839770000000003 }, { \"step\": 525, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8568702290076335, \"F1\": 0.823529411764706, \"Memory in Mb\": 0.0759553909301757, \"Time in s\": 2.179609 }, { \"step\": 550, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8561020036429873, \"F1\": 0.8240534521158129, \"Memory in Mb\": 0.0759553909301757, \"Time in s\": 2.3846100000000003 }, { \"step\": 575, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8554006968641115, \"F1\": 0.8230277185501066, \"Memory in Mb\": 0.1161155700683593, \"Time in s\": 2.6084530000000004 }, { \"step\": 600, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8547579298831386, \"F1\": 0.8176100628930818, \"Memory in Mb\": 0.1440086364746093, \"Time in s\": 2.84161 }, { \"step\": 625, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8573717948717948, \"F1\": 0.8172484599589321, \"Memory in Mb\": 0.1442451477050781, \"Time in s\": 3.082865 }, { \"step\": 650, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8597842835130971, \"F1\": 0.8233009708737864, \"Memory in Mb\": 0.1444129943847656, \"Time in s\": 3.332264 }, { \"step\": 675, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8590504451038575, \"F1\": 0.8263254113345521, \"Memory in Mb\": 0.1444740295410156, \"Time in s\": 3.58978 }, { \"step\": 700, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8640915593705293, \"F1\": 0.8306595365418894, \"Memory in Mb\": 0.1445350646972656, \"Time in s\": 3.855481 }, { \"step\": 725, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8646408839779005, \"F1\": 0.8344594594594595, \"Memory in Mb\": 0.1445960998535156, \"Time in s\": 4.1295 }, { \"step\": 750, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8664886515353805, \"F1\": 0.8371335504885993, \"Memory in Mb\": 0.1446571350097656, \"Time in s\": 4.411739 }, { \"step\": 775, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8643410852713178, \"F1\": 0.8330683624801273, \"Memory in Mb\": 0.1446800231933593, \"Time in s\": 4.702108 }, { \"step\": 800, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8635794743429287, \"F1\": 0.8340943683409437, \"Memory in Mb\": 0.1446800231933593, \"Time in s\": 5.000653 }, { \"step\": 825, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8628640776699029, \"F1\": 0.8345534407027819, \"Memory in Mb\": 0.1446800231933593, \"Time in s\": 5.309277 }, { \"step\": 850, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8645465253239105, \"F1\": 0.8364153627311521, \"Memory in Mb\": 0.1447410583496093, \"Time in s\": 5.626329 }, { \"step\": 875, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8672768878718535, \"F1\": 0.838888888888889, \"Memory in Mb\": 0.1447410583496093, \"Time in s\": 5.951525 }, { \"step\": 900, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8665183537263627, \"F1\": 0.8378378378378378, \"Memory in Mb\": 0.1448020935058593, \"Time in s\": 6.284981 }, { \"step\": 925, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8668831168831169, \"F1\": 0.8400520156046815, \"Memory in Mb\": 0.1448020935058593, \"Time in s\": 6.628637 }, { \"step\": 950, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8661749209694415, \"F1\": 0.8410513141426783, \"Memory in Mb\": 0.1448631286621093, \"Time in s\": 6.980485000000001 }, { \"step\": 975, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.86652977412731, \"F1\": 0.8414634146341464, \"Memory in Mb\": 0.1448631286621093, \"Time in s\": 7.342308000000001 }, { \"step\": 1000, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8638638638638638, \"F1\": 0.8392434988179669, \"Memory in Mb\": 0.1448631286621093, \"Time in s\": 7.713462000000001 }, { \"step\": 1025, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8623046875, \"F1\": 0.8377445339470656, \"Memory in Mb\": 0.1448631286621093, \"Time in s\": 8.093853000000001 }, { \"step\": 1050, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8636796949475691, \"F1\": 0.8402234636871508, \"Memory in Mb\": 0.1448631286621093, \"Time in s\": 8.485573 }, { \"step\": 1075, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8649906890130353, \"F1\": 0.8429035752979415, \"Memory in Mb\": 0.1448631286621093, \"Time in s\": 8.889089 }, { \"step\": 1100, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8671519563239308, \"F1\": 0.8456659619450316, \"Memory in Mb\": 0.1448631286621093, \"Time in s\": 9.305059 }, { \"step\": 1125, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8701067615658363, \"F1\": 0.8507157464212679, \"Memory in Mb\": 0.1448631286621093, \"Time in s\": 9.730762 }, { \"step\": 1150, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8720626631853786, \"F1\": 0.852852852852853, \"Memory in Mb\": 0.1449241638183593, \"Time in s\": 10.16685 }, { \"step\": 1175, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8713798977853492, \"F1\": 0.8521057786483839, \"Memory in Mb\": 0.1449241638183593, \"Time in s\": 10.613004 }, { \"step\": 1200, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.872393661384487, \"F1\": 0.8530259365994236, \"Memory in Mb\": 0.1449241638183593, \"Time in s\": 11.069492 }, { \"step\": 1225, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8733660130718954, \"F1\": 0.8541862652869238, \"Memory in Mb\": 0.1449851989746093, \"Time in s\": 11.536355 }, { \"step\": 1250, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8742994395516414, \"F1\": 0.8560953253895509, \"Memory in Mb\": 0.1449851989746093, \"Time in s\": 12.014925 }, { \"step\": 1903, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.0239162445068359, \"Time in s\": 0.25062 }, { \"step\": 3806, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.0239772796630859, \"Time in s\": 0.745512 }, { \"step\": 5709, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.0240383148193359, \"Time in s\": 1.4971299999999998 }, { \"step\": 7612, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.0240383148193359, \"Time in s\": 2.521262 }, { \"step\": 9515, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.0240383148193359, \"Time in s\": 3.817259 }, { \"step\": 11418, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.0240993499755859, \"Time in s\": 5.360746 }, { \"step\": 13321, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.0240993499755859, \"Time in s\": 7.021896 }, { \"step\": 15224, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996058595546212, \"F1\": 0.625, \"Memory in Mb\": 0.050175666809082, \"Time in s\": 8.821636 }, { \"step\": 17127, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9991825294873292, \"F1\": 0.4615384615384615, \"Memory in Mb\": 0.0453500747680664, \"Time in s\": 10.797386 }, { \"step\": 19030, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9992642808345158, \"F1\": 0.4615384615384615, \"Memory in Mb\": 0.0457162857055664, \"Time in s\": 12.952086 }, { \"step\": 20933, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9993311675902924, \"F1\": 0.4615384615384615, \"Memory in Mb\": 0.0458383560180664, \"Time in s\": 15.282579 }, { \"step\": 22836, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9993869060652508, \"F1\": 0.4615384615384615, \"Memory in Mb\": 0.0455942153930664, \"Time in s\": 17.788716 }, { \"step\": 24739, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9994340690435768, \"F1\": 0.4615384615384615, \"Memory in Mb\": 0.0458383560180664, \"Time in s\": 20.470769 }, { \"step\": 26642, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999474494200668, \"F1\": 0.5, \"Memory in Mb\": 0.0581369400024414, \"Time in s\": 23.327571 }, { \"step\": 28545, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999509529147982, \"F1\": 0.5, \"Memory in Mb\": 0.0581979751586914, \"Time in s\": 26.358996 }, { \"step\": 30448, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999540184583046, \"F1\": 0.5, \"Memory in Mb\": 0.0581979751586914, \"Time in s\": 29.565097 }, { \"step\": 32351, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9995672333848532, \"F1\": 0.5, \"Memory in Mb\": 0.0583200454711914, \"Time in s\": 32.946387 }, { \"step\": 34254, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9995912766764956, \"F1\": 0.5, \"Memory in Mb\": 0.0583810806274414, \"Time in s\": 36.502912 }, { \"step\": 36157, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996127890253348, \"F1\": 0.5, \"Memory in Mb\": 0.0584421157836914, \"Time in s\": 40.234574 }, { \"step\": 38060, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996321500827662, \"F1\": 0.5, \"Memory in Mb\": 0.0584421157836914, \"Time in s\": 44.142312 }, { \"step\": 39963, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996496671838246, \"F1\": 0.5, \"Memory in Mb\": 0.0584421157836914, \"Time in s\": 48.227315 }, { \"step\": 41866, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996655917831124, \"F1\": 0.5, \"Memory in Mb\": 0.0584421157836914, \"Time in s\": 52.488216 }, { \"step\": 43769, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996801316029976, \"F1\": 0.5, \"Memory in Mb\": 0.0585031509399414, \"Time in s\": 56.92422 }, { \"step\": 45672, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996934597446958, \"F1\": 0.5, \"Memory in Mb\": 0.0586252212524414, \"Time in s\": 61.535205 }, { \"step\": 47575, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996636818430236, \"F1\": 0.4666666666666667, \"Memory in Mb\": 0.0678491592407226, \"Time in s\": 66.321748 }, { \"step\": 49478, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996564060068316, \"F1\": 0.4516129032258064, \"Memory in Mb\": 0.0678491592407226, \"Time in s\": 71.282934 }, { \"step\": 51381, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999649669131958, \"F1\": 0.5, \"Memory in Mb\": 0.0679788589477539, \"Time in s\": 76.42026 }, { \"step\": 53284, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999662181183492, \"F1\": 0.5, \"Memory in Mb\": 0.0679788589477539, \"Time in s\": 81.732337 }, { \"step\": 55187, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999673830319284, \"F1\": 0.5, \"Memory in Mb\": 0.0679788589477539, \"Time in s\": 87.220308 }, { \"step\": 57090, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9995095377393192, \"F1\": 0.4166666666666666, \"Memory in Mb\": 0.1018075942993164, \"Time in s\": 92.923409 }, { \"step\": 58993, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9994914564686738, \"F1\": 0.4000000000000001, \"Memory in Mb\": 0.1022958755493164, \"Time in s\": 98.859355 }, { \"step\": 60896, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9995073487150012, \"F1\": 0.4000000000000001, \"Memory in Mb\": 0.1024179458618164, \"Time in s\": 105.024921 }, { \"step\": 62799, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9994745055575018, \"F1\": 0.3773584905660377, \"Memory in Mb\": 0.1118478775024414, \"Time in s\": 111.430038 }, { \"step\": 64702, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999489961515278, \"F1\": 0.3773584905660377, \"Memory in Mb\": 0.1119699478149414, \"Time in s\": 118.06567900000002 }, { \"step\": 66605, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9995045342622064, \"F1\": 0.3773584905660377, \"Memory in Mb\": 0.1120920181274414, \"Time in s\": 124.93220100000002 }, { \"step\": 68508, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9995182974002657, \"F1\": 0.4210526315789473, \"Memory in Mb\": 0.1121530532836914, \"Time in s\": 132.02968600000003 }, { \"step\": 70411, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9995313165743502, \"F1\": 0.4210526315789473, \"Memory in Mb\": 0.1122140884399414, \"Time in s\": 139.35861700000004 }, { \"step\": 72314, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9995436505192704, \"F1\": 0.4210526315789473, \"Memory in Mb\": 0.1123361587524414, \"Time in s\": 146.91762900000003 }, { \"step\": 74217, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9995418777622076, \"F1\": 0.4137931034482758, \"Memory in Mb\": 0.1123971939086914, \"Time in s\": 154.70829700000004 }, { \"step\": 76120, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999553330968615, \"F1\": 0.4137931034482758, \"Memory in Mb\": 0.1123971939086914, \"Time in s\": 162.73183200000005 }, { \"step\": 78023, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.99953859167927, \"F1\": 0.3999999999999999, \"Memory in Mb\": 0.1198663711547851, \"Time in s\": 170.98992200000006 }, { \"step\": 79926, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999549577729121, \"F1\": 0.3999999999999999, \"Memory in Mb\": 0.1202325820922851, \"Time in s\": 179.48569900000007 }, { \"step\": 81829, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9995600527936648, \"F1\": 0.3999999999999999, \"Memory in Mb\": 0.1203546524047851, \"Time in s\": 188.21505800000008 }, { \"step\": 83732, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9995581087052584, \"F1\": 0.3934426229508197, \"Memory in Mb\": 0.1204767227172851, \"Time in s\": 197.18003100000004 }, { \"step\": 85635, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.99956792862648, \"F1\": 0.3934426229508197, \"Memory in Mb\": 0.1204767227172851, \"Time in s\": 206.38234800000004 }, { \"step\": 87538, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9995773215897278, \"F1\": 0.3934426229508197, \"Memory in Mb\": 0.1205377578735351, \"Time in s\": 215.81852000000003 }, { \"step\": 89441, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9995751341681576, \"F1\": 0.3870967741935484, \"Memory in Mb\": 0.1298608779907226, \"Time in s\": 225.49133900000004 }, { \"step\": 91344, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999562090143744, \"F1\": 0.375, \"Memory in Mb\": 0.1298608779907226, \"Time in s\": 235.39794000000003 }, { \"step\": 93247, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9995388542135856, \"F1\": 0.3582089552238806, \"Memory in Mb\": 0.1371393203735351, \"Time in s\": 245.54396400000005 }, { \"step\": 95150, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9995480772262452, \"F1\": 0.3582089552238806, \"Memory in Mb\": 0.1372613906860351, \"Time in s\": 255.92452900000004 }, { \"step\": 106, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Bananas\", \"Accuracy\": 0.638095238095238, \"F1\": 0.5777777777777778, \"Memory in Mb\": 0.6364564895629883, \"Time in s\": 0.262298 }, { \"step\": 212, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Bananas\", \"Accuracy\": 0.7488151658767772, \"F1\": 0.7103825136612022, \"Memory in Mb\": 1.0992326736450195, \"Time in s\": 0.748759 }, { \"step\": 318, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Bananas\", \"Accuracy\": 0.7917981072555205, \"F1\": 0.7659574468085106, \"Memory in Mb\": 1.488083839416504, \"Time in s\": 1.463509 }, { \"step\": 424, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8274231678486997, \"F1\": 0.8042895442359249, \"Memory in Mb\": 1.85091495513916, \"Time in s\": 2.4097 }, { \"step\": 530, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Bananas\", \"Accuracy\": 0.831758034026465, \"F1\": 0.802660753880266, \"Memory in Mb\": 2.4203081130981445, \"Time in s\": 3.615781 }, { \"step\": 636, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8456692913385827, \"F1\": 0.8191881918819188, \"Memory in Mb\": 2.83583927154541, \"Time in s\": 5.093309 }, { \"step\": 742, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8569500674763832, \"F1\": 0.8284789644012946, \"Memory in Mb\": 3.2995615005493164, \"Time in s\": 6.854744 }, { \"step\": 848, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Bananas\", \"Accuracy\": 0.859504132231405, \"F1\": 0.8326300984528833, \"Memory in Mb\": 3.4132471084594727, \"Time in s\": 8.933942 }, { \"step\": 954, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Bananas\", \"Accuracy\": 0.863588667366212, \"F1\": 0.8370927318295739, \"Memory in Mb\": 3.8167009353637695, \"Time in s\": 11.329061 }, { \"step\": 1060, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8715769593956563, \"F1\": 0.8454545454545456, \"Memory in Mb\": 4.3293962478637695, \"Time in s\": 14.065921 }, { \"step\": 1166, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8738197424892704, \"F1\": 0.8489208633093526, \"Memory in Mb\": 4.776837348937988, \"Time in s\": 17.153408 }, { \"step\": 1272, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8749016522423289, \"F1\": 0.8512628624883068, \"Memory in Mb\": 5.179757118225098, \"Time in s\": 20.492268 }, { \"step\": 1378, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8765432098765432, \"F1\": 0.8516579406631763, \"Memory in Mb\": 5.6712846755981445, \"Time in s\": 24.00032 }, { \"step\": 1484, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8813216453135536, \"F1\": 0.8580645161290322, \"Memory in Mb\": 6.118577003479004, \"Time in s\": 27.676707 }, { \"step\": 1590, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8779106356198867, \"F1\": 0.8554396423248881, \"Memory in Mb\": 6.625298500061035, \"Time in s\": 31.525227 }, { \"step\": 1696, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Bananas\", \"Accuracy\": 0.879646017699115, \"F1\": 0.8573426573426574, \"Memory in Mb\": 5.8858842849731445, \"Time in s\": 35.585591 }, { \"step\": 1802, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8800666296501943, \"F1\": 0.8590078328981724, \"Memory in Mb\": 6.279637336730957, \"Time in s\": 39.950222 }, { \"step\": 1908, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8778185631882538, \"F1\": 0.8578401464307503, \"Memory in Mb\": 6.042496681213379, \"Time in s\": 44.519385 }, { \"step\": 2014, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Bananas\", \"Accuracy\": 0.877297565822156, \"F1\": 0.8584527220630374, \"Memory in Mb\": 6.480931282043457, \"Time in s\": 49.302272 }, { \"step\": 2120, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8801321378008494, \"F1\": 0.8631465517241379, \"Memory in Mb\": 6.779278755187988, \"Time in s\": 54.294834 }, { \"step\": 2226, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8791011235955056, \"F1\": 0.8621219887237315, \"Memory in Mb\": 7.120572090148926, \"Time in s\": 59.4987 }, { \"step\": 2332, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8781638781638782, \"F1\": 0.8611925708699902, \"Memory in Mb\": 7.709580421447754, \"Time in s\": 64.935467 }, { \"step\": 2438, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8789495281083299, \"F1\": 0.8618266978922717, \"Memory in Mb\": 8.127808570861816, \"Time in s\": 70.60280800000001 }, { \"step\": 2544, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8788832088084939, \"F1\": 0.8624999999999999, \"Memory in Mb\": 8.531121253967285, \"Time in s\": 76.48792700000001 }, { \"step\": 2650, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8803322008305021, \"F1\": 0.8645877829987185, \"Memory in Mb\": 8.842900276184082, \"Time in s\": 82.60938300000001 }, { \"step\": 2756, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8827586206896552, \"F1\": 0.8671328671328672, \"Memory in Mb\": 9.237275123596191, \"Time in s\": 88.95420100000001 }, { \"step\": 2862, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Bananas\", \"Accuracy\": 0.88325760223698, \"F1\": 0.8674603174603175, \"Memory in Mb\": 9.480591773986816, \"Time in s\": 95.534715 }, { \"step\": 2968, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8843950117964273, \"F1\": 0.8681276432141485, \"Memory in Mb\": 9.87939167022705, \"Time in s\": 102.339589 }, { \"step\": 3074, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8848031239830785, \"F1\": 0.8690828402366864, \"Memory in Mb\": 10.33882999420166, \"Time in s\": 109.378768 }, { \"step\": 3180, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8867568417741428, \"F1\": 0.8707824838478105, \"Memory in Mb\": 10.641068458557127, \"Time in s\": 116.664225 }, { \"step\": 3286, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8861491628614916, \"F1\": 0.8697771587743733, \"Memory in Mb\": 10.188206672668455, \"Time in s\": 124.173867 }, { \"step\": 3392, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8867590681214981, \"F1\": 0.8712273641851107, \"Memory in Mb\": 10.46657657623291, \"Time in s\": 131.921991 }, { \"step\": 3498, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8881898770374607, \"F1\": 0.8723473718576559, \"Memory in Mb\": 9.652070045471191, \"Time in s\": 139.895805 }, { \"step\": 3604, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8889814043852345, \"F1\": 0.8726925525143221, \"Memory in Mb\": 8.887116432189941, \"Time in s\": 148.067873 }, { \"step\": 3710, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8889188460501483, \"F1\": 0.873152709359606, \"Memory in Mb\": 9.26492404937744, \"Time in s\": 156.441655 }, { \"step\": 3816, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Bananas\", \"Accuracy\": 0.89043250327654, \"F1\": 0.875, \"Memory in Mb\": 9.5267915725708, \"Time in s\": 165.011312 }, { \"step\": 3922, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8888038765621015, \"F1\": 0.8728862973760932, \"Memory in Mb\": 9.89724826812744, \"Time in s\": 173.777209 }, { \"step\": 4028, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8872609883287808, \"F1\": 0.8710227272727272, \"Memory in Mb\": 10.25338077545166, \"Time in s\": 182.74826 }, { \"step\": 4134, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8877328816840068, \"F1\": 0.8716104039845047, \"Memory in Mb\": 10.579400062561035, \"Time in s\": 191.919884 }, { \"step\": 4240, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8886529841943854, \"F1\": 0.8727762803234501, \"Memory in Mb\": 10.81624698638916, \"Time in s\": 201.289592 }, { \"step\": 4346, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8895281933256617, \"F1\": 0.8738833420914347, \"Memory in Mb\": 10.986077308654783, \"Time in s\": 210.860402 }, { \"step\": 4452, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8890137047854415, \"F1\": 0.8732032854209446, \"Memory in Mb\": 11.276833534240724, \"Time in s\": 220.647955 }, { \"step\": 4558, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8887425938117183, \"F1\": 0.8734082397003745, \"Memory in Mb\": 11.626667976379396, \"Time in s\": 230.650874 }, { \"step\": 4664, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8889127171348917, \"F1\": 0.8740272373540856, \"Memory in Mb\": 12.038758277893066, \"Time in s\": 240.866021 }, { \"step\": 4770, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Bananas\", \"Accuracy\": 0.887188089746278, \"F1\": 0.871843735111958, \"Memory in Mb\": 12.429780006408691, \"Time in s\": 251.436375 }, { \"step\": 4876, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8875897435897436, \"F1\": 0.8720224194301728, \"Memory in Mb\": 12.7014741897583, \"Time in s\": 262.227068 }, { \"step\": 4982, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8883758281469585, \"F1\": 0.8732907930720145, \"Memory in Mb\": 12.836377143859863, \"Time in s\": 273.239924 }, { \"step\": 5088, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8875565166109691, \"F1\": 0.8722644037516749, \"Memory in Mb\": 13.209172248840332, \"Time in s\": 284.483402 }, { \"step\": 5194, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8875409204698633, \"F1\": 0.8722100656455142, \"Memory in Mb\": 13.58340549468994, \"Time in s\": 295.957708 }, { \"step\": 5300, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Bananas\", \"Accuracy\": 0.886959803736554, \"F1\": 0.8715419257988419, \"Memory in Mb\": 13.845444679260254, \"Time in s\": 307.673049 }, { \"step\": 906, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8674033149171271, \"F1\": 0.8669623059866962, \"Memory in Mb\": 3.0924072265625, \"Time in s\": 1.44234 }, { \"step\": 1812, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Elec2\", \"Accuracy\": 0.895085588072888, \"F1\": 0.8724832214765101, \"Memory in Mb\": 4.682834625244141, \"Time in s\": 4.396686 }, { \"step\": 2718, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8844313581155686, \"F1\": 0.8575317604355717, \"Memory in Mb\": 7.755058288574219, \"Time in s\": 9.151923 }, { \"step\": 3624, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8923544024289263, \"F1\": 0.8677069199457259, \"Memory in Mb\": 8.665351867675781, \"Time in s\": 15.539979 }, { \"step\": 4530, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8940163391477147, \"F1\": 0.8633257403189066, \"Memory in Mb\": 11.046634674072266, \"Time in s\": 23.437977 }, { \"step\": 5436, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8868445262189513, \"F1\": 0.8541617263457434, \"Memory in Mb\": 15.550697326660156, \"Time in s\": 33.146859 }, { \"step\": 6342, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8856647216527361, \"F1\": 0.8543884314119301, \"Memory in Mb\": 17.033367156982422, \"Time in s\": 44.650345 }, { \"step\": 7248, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8840899682627295, \"F1\": 0.8505869797225187, \"Memory in Mb\": 21.311607360839844, \"Time in s\": 57.88122 }, { \"step\": 8154, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8850729792714338, \"F1\": 0.8592881814086198, \"Memory in Mb\": 20.813556671142575, \"Time in s\": 72.837822 }, { \"step\": 9060, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8881775030356551, \"F1\": 0.866657891272871, \"Memory in Mb\": 22.198707580566406, \"Time in s\": 89.470796 }, { \"step\": 9966, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8867034621174109, \"F1\": 0.8678450193140582, \"Memory in Mb\": 25.867393493652344, \"Time in s\": 108.078283 }, { \"step\": 10872, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8887866801582192, \"F1\": 0.8723202027669237, \"Memory in Mb\": 24.029823303222656, \"Time in s\": 128.509921 }, { \"step\": 11778, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8868981913899975, \"F1\": 0.8695141065830722, \"Memory in Mb\": 23.736316680908203, \"Time in s\": 150.696171 }, { \"step\": 12684, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8834660569265946, \"F1\": 0.8662201303403331, \"Memory in Mb\": 17.480976104736328, \"Time in s\": 174.61452699999998 }, { \"step\": 13590, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8837294870851424, \"F1\": 0.8684648684648686, \"Memory in Mb\": 17.212547302246094, \"Time in s\": 200.151461 }, { \"step\": 14496, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8826491893756467, \"F1\": 0.8678013522965726, \"Memory in Mb\": 16.54094696044922, \"Time in s\": 227.262884 }, { \"step\": 15402, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8836439192260243, \"F1\": 0.8679439941046426, \"Memory in Mb\": 17.876373291015625, \"Time in s\": 255.95351 }, { \"step\": 16308, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8814006254982523, \"F1\": 0.8648119670068503, \"Memory in Mb\": 12.619304656982422, \"Time in s\": 286.224579 }, { \"step\": 17214, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8818334979376053, \"F1\": 0.8629380053908356, \"Memory in Mb\": 9.807907104492188, \"Time in s\": 317.969431 }, { \"step\": 18120, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8821126993763453, \"F1\": 0.8632872503840245, \"Memory in Mb\": 8.873615264892578, \"Time in s\": 351.00359 }, { \"step\": 19026, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Elec2\", \"Accuracy\": 0.883784494086728, \"F1\": 0.8636279528773206, \"Memory in Mb\": 11.29254150390625, \"Time in s\": 385.315707 }, { \"step\": 19932, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8832973759470172, \"F1\": 0.8638810861423221, \"Memory in Mb\": 13.30521011352539, \"Time in s\": 421.189813 }, { \"step\": 20838, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8826126601718097, \"F1\": 0.8630919064144185, \"Memory in Mb\": 13.75485610961914, \"Time in s\": 458.867106 }, { \"step\": 21744, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8820769902957274, \"F1\": 0.8605005440696408, \"Memory in Mb\": 10.461296081542969, \"Time in s\": 498.082692 }, { \"step\": 22650, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8810985032451764, \"F1\": 0.8581959875730609, \"Memory in Mb\": 10.731792449951172, \"Time in s\": 538.811106 }, { \"step\": 23556, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Elec2\", \"Accuracy\": 0.880025472298875, \"F1\": 0.8563732465948364, \"Memory in Mb\": 8.147632598876953, \"Time in s\": 581.039793 }, { \"step\": 24462, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8793998610032296, \"F1\": 0.8547799547110366, \"Memory in Mb\": 10.927783966064451, \"Time in s\": 624.8074220000001 }, { \"step\": 25368, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8777545630149407, \"F1\": 0.8528029619784497, \"Memory in Mb\": 11.403583526611328, \"Time in s\": 670.1639380000001 }, { \"step\": 26274, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8783922658242302, \"F1\": 0.8533663775299463, \"Memory in Mb\": 14.942352294921877, \"Time in s\": 717.1095470000001 }, { \"step\": 27180, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8798704882446006, \"F1\": 0.855843525100446, \"Memory in Mb\": 14.17806625366211, \"Time in s\": 765.6544720000002 }, { \"step\": 28086, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8780843866832829, \"F1\": 0.8531732418524872, \"Memory in Mb\": 12.432361602783203, \"Time in s\": 815.7617920000001 }, { \"step\": 28992, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8780656065675555, \"F1\": 0.852406997620141, \"Memory in Mb\": 10.199352264404297, \"Time in s\": 867.3023770000001 }, { \"step\": 29898, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8784158945713617, \"F1\": 0.8527684393859614, \"Memory in Mb\": 13.818798065185549, \"Time in s\": 920.296853 }, { \"step\": 30804, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8787455767295393, \"F1\": 0.8524356998933271, \"Memory in Mb\": 16.783336639404297, \"Time in s\": 975.029218 }, { \"step\": 31710, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8776057270806396, \"F1\": 0.8507250278856878, \"Memory in Mb\": 18.14492416381836, \"Time in s\": 1031.673342 }, { \"step\": 32616, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8769277939598344, \"F1\": 0.8501567866208751, \"Memory in Mb\": 18.455127716064453, \"Time in s\": 1090.274581 }, { \"step\": 33522, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8766743235583664, \"F1\": 0.8503366881471291, \"Memory in Mb\": 21.331356048583984, \"Time in s\": 1150.683688 }, { \"step\": 34428, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Elec2\", \"Accuracy\": 0.875940395619717, \"F1\": 0.8493102353314751, \"Memory in Mb\": 20.51153945922852, \"Time in s\": 1212.996992 }, { \"step\": 35334, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8751591996150907, \"F1\": 0.8476285882068464, \"Memory in Mb\": 17.70761489868164, \"Time in s\": 1277.0202020000002 }, { \"step\": 36240, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8746930102927785, \"F1\": 0.8461772975170219, \"Memory in Mb\": 18.968151092529297, \"Time in s\": 1342.643234 }, { \"step\": 37146, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8737649750975905, \"F1\": 0.8444930852651478, \"Memory in Mb\": 21.1762809753418, \"Time in s\": 1410.0019350000002 }, { \"step\": 38052, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Elec2\", \"Accuracy\": 0.874063756537279, \"F1\": 0.8444256866437246, \"Memory in Mb\": 13.57645034790039, \"Time in s\": 1479.1416360000003 }, { \"step\": 38958, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8743999794645378, \"F1\": 0.8453001991842929, \"Memory in Mb\": 13.581947326660156, \"Time in s\": 1549.8962470000004 }, { \"step\": 39864, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8743446303589795, \"F1\": 0.8465990873732889, \"Memory in Mb\": 12.123741149902344, \"Time in s\": 1622.0853150000005 }, { \"step\": 40770, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8747823100885477, \"F1\": 0.8484848484848485, \"Memory in Mb\": 12.675861358642578, \"Time in s\": 1695.6369460000003 }, { \"step\": 41676, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8752969406118776, \"F1\": 0.850217598063233, \"Memory in Mb\": 15.801628112792969, \"Time in s\": 1770.5477020000003 }, { \"step\": 42582, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Elec2\", \"Accuracy\": 0.87569573283859, \"F1\": 0.8509895554742266, \"Memory in Mb\": 16.52715301513672, \"Time in s\": 1847.0334210000003 }, { \"step\": 43488, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Elec2\", \"Accuracy\": 0.87568698691563, \"F1\": 0.8510087090728695, \"Memory in Mb\": 18.189510345458984, \"Time in s\": 1925.197809 }, { \"step\": 44394, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8757236501250197, \"F1\": 0.8505242623750305, \"Memory in Mb\": 18.102394104003903, \"Time in s\": 2005.1269950000003 }, { \"step\": 45300, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8766197929314112, \"F1\": 0.8519587847323391, \"Memory in Mb\": 20.35542678833008, \"Time in s\": 2086.7182620000003 }, { \"step\": 25, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Phishing\", \"Accuracy\": 0.625, \"F1\": 0.7096774193548387, \"Memory in Mb\": 0.4235925674438476, \"Time in s\": 0.126705 }, { \"step\": 50, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Phishing\", \"Accuracy\": 0.7346938775510204, \"F1\": 0.7450980392156864, \"Memory in Mb\": 0.6303834915161133, \"Time in s\": 0.333958 }, { \"step\": 75, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Phishing\", \"Accuracy\": 0.7837837837837838, \"F1\": 0.7999999999999999, \"Memory in Mb\": 0.8403291702270508, \"Time in s\": 0.61983 }, { \"step\": 100, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Phishing\", \"Accuracy\": 0.797979797979798, \"F1\": 0.8039215686274509, \"Memory in Mb\": 0.9226388931274414, \"Time in s\": 0.981624 }, { \"step\": 125, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Phishing\", \"Accuracy\": 0.7903225806451613, \"F1\": 0.7968749999999999, \"Memory in Mb\": 1.0709314346313477, \"Time in s\": 1.421284 }, { \"step\": 150, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8120805369127517, \"F1\": 0.8227848101265823, \"Memory in Mb\": 1.1753358840942385, \"Time in s\": 1.940335 }, { \"step\": 175, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8390804597701149, \"F1\": 0.8372093023255814, \"Memory in Mb\": 1.2494592666625977, \"Time in s\": 2.543018 }, { \"step\": 200, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8442211055276382, \"F1\": 0.8426395939086295, \"Memory in Mb\": 1.3681573867797852, \"Time in s\": 3.230103 }, { \"step\": 225, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8526785714285714, \"F1\": 0.8465116279069769, \"Memory in Mb\": 1.4882898330688477, \"Time in s\": 4.000846999999999 }, { \"step\": 250, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8433734939759037, \"F1\": 0.8354430379746836, \"Memory in Mb\": 1.6624422073364258, \"Time in s\": 4.857951999999999 }, { \"step\": 275, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Phishing\", \"Accuracy\": 0.843065693430657, \"F1\": 0.833976833976834, \"Memory in Mb\": 1.7254152297973633, \"Time in s\": 5.796970999999999 }, { \"step\": 300, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8494983277591973, \"F1\": 0.8375451263537907, \"Memory in Mb\": 1.8179521560668943, \"Time in s\": 6.820338 }, { \"step\": 325, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8580246913580247, \"F1\": 0.8424657534246577, \"Memory in Mb\": 1.875351905822754, \"Time in s\": 7.930464 }, { \"step\": 350, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8595988538681948, \"F1\": 0.8414239482200646, \"Memory in Mb\": 2.064530372619629, \"Time in s\": 9.128265 }, { \"step\": 375, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8582887700534759, \"F1\": 0.8379204892966361, \"Memory in Mb\": 2.210324287414551, \"Time in s\": 10.415761 }, { \"step\": 400, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8646616541353384, \"F1\": 0.8439306358381503, \"Memory in Mb\": 2.3119516372680664, \"Time in s\": 11.792425 }, { \"step\": 425, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8679245283018868, \"F1\": 0.8435754189944134, \"Memory in Mb\": 2.393784523010254, \"Time in s\": 13.261163 }, { \"step\": 450, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8752783964365256, \"F1\": 0.8502673796791443, \"Memory in Mb\": 2.504483222961426, \"Time in s\": 14.820437 }, { \"step\": 475, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8776371308016878, \"F1\": 0.8550000000000001, \"Memory in Mb\": 2.601761817932129, \"Time in s\": 16.462295 }, { \"step\": 500, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8797595190380761, \"F1\": 0.8598130841121494, \"Memory in Mb\": 2.5846261978149414, \"Time in s\": 18.198054000000003 }, { \"step\": 525, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8816793893129771, \"F1\": 0.859090909090909, \"Memory in Mb\": 2.742630958557129, \"Time in s\": 20.024147000000003 }, { \"step\": 550, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8779599271402551, \"F1\": 0.855291576673866, \"Memory in Mb\": 2.8854761123657227, \"Time in s\": 21.959689000000004 }, { \"step\": 575, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8763066202090593, \"F1\": 0.8530020703933747, \"Memory in Mb\": 3.0752573013305664, \"Time in s\": 24.00593300000001 }, { \"step\": 600, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8797996661101837, \"F1\": 0.8548387096774194, \"Memory in Mb\": 3.1360864639282227, \"Time in s\": 26.16192000000001 }, { \"step\": 625, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8830128205128205, \"F1\": 0.8554455445544554, \"Memory in Mb\": 3.285130500793457, \"Time in s\": 28.42139000000001 }, { \"step\": 650, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8859784283513097, \"F1\": 0.8609022556390977, \"Memory in Mb\": 3.3397645950317383, \"Time in s\": 30.794885000000008 }, { \"step\": 675, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8887240356083086, \"F1\": 0.8672566371681416, \"Memory in Mb\": 3.5764551162719727, \"Time in s\": 33.284549000000005 }, { \"step\": 700, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8927038626609443, \"F1\": 0.8704663212435233, \"Memory in Mb\": 3.464848518371582, \"Time in s\": 35.882938 }, { \"step\": 725, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Phishing\", \"Accuracy\": 0.893646408839779, \"F1\": 0.8735632183908045, \"Memory in Mb\": 3.70070743560791, \"Time in s\": 38.601152000000006 }, { \"step\": 750, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8958611481975968, \"F1\": 0.8765822784810127, \"Memory in Mb\": 3.883671760559082, \"Time in s\": 41.43692500000001 }, { \"step\": 775, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Phishing\", \"Accuracy\": 0.896640826873385, \"F1\": 0.8769230769230768, \"Memory in Mb\": 4.02083683013916, \"Time in s\": 44.39908700000001 }, { \"step\": 800, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8948685857321652, \"F1\": 0.8761061946902655, \"Memory in Mb\": 4.127713203430176, \"Time in s\": 47.483821000000006 }, { \"step\": 825, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8944174757281553, \"F1\": 0.8765957446808511, \"Memory in Mb\": 4.256714820861816, \"Time in s\": 50.69677800000001 }, { \"step\": 850, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8963486454652533, \"F1\": 0.8784530386740332, \"Memory in Mb\": 4.299836158752441, \"Time in s\": 54.03081400000001 }, { \"step\": 875, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8993135011441648, \"F1\": 0.8814016172506738, \"Memory in Mb\": 4.410748481750488, \"Time in s\": 57.488212 }, { \"step\": 900, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8987764182424917, \"F1\": 0.8804204993429698, \"Memory in Mb\": 4.566498756408691, \"Time in s\": 61.07896100000001 }, { \"step\": 925, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Phishing\", \"Accuracy\": 0.9015151515151516, \"F1\": 0.8846641318124209, \"Memory in Mb\": 4.655289649963379, \"Time in s\": 64.810524 }, { \"step\": 950, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Phishing\", \"Accuracy\": 0.9030558482613276, \"F1\": 0.8878048780487805, \"Memory in Mb\": 4.339470863342285, \"Time in s\": 68.683627 }, { \"step\": 975, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Phishing\", \"Accuracy\": 0.9034907597535934, \"F1\": 0.8880952380952382, \"Memory in Mb\": 4.029709815979004, \"Time in s\": 72.689378 }, { \"step\": 1000, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Phishing\", \"Accuracy\": 0.9029029029029028, \"F1\": 0.8876013904982619, \"Memory in Mb\": 3.6762208938598633, \"Time in s\": 76.821914 }, { \"step\": 1025, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Phishing\", \"Accuracy\": 0.9013671875, \"F1\": 0.8861330326944759, \"Memory in Mb\": 3.842530250549317, \"Time in s\": 81.06784200000001 }, { \"step\": 1050, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Phishing\", \"Accuracy\": 0.9027645376549094, \"F1\": 0.8881578947368421, \"Memory in Mb\": 3.957364082336426, \"Time in s\": 85.43608100000002 }, { \"step\": 1075, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Phishing\", \"Accuracy\": 0.9031657355679702, \"F1\": 0.8893617021276596, \"Memory in Mb\": 4.039715766906738, \"Time in s\": 89.92310000000002 }, { \"step\": 1100, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Phishing\", \"Accuracy\": 0.9044585987261148, \"F1\": 0.8911917098445594, \"Memory in Mb\": 4.059922218322754, \"Time in s\": 94.514674 }, { \"step\": 1125, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Phishing\", \"Accuracy\": 0.9065836298932384, \"F1\": 0.8946840521564694, \"Memory in Mb\": 4.122437477111816, \"Time in s\": 99.180878 }, { \"step\": 1150, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Phishing\", \"Accuracy\": 0.9077458659704092, \"F1\": 0.8958742632612966, \"Memory in Mb\": 4.38341236114502, \"Time in s\": 103.92086 }, { \"step\": 1175, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Phishing\", \"Accuracy\": 0.9063032367972744, \"F1\": 0.8940269749518305, \"Memory in Mb\": 3.936264991760254, \"Time in s\": 108.734067 }, { \"step\": 1200, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Phishing\", \"Accuracy\": 0.9074228523769808, \"F1\": 0.8949858088930936, \"Memory in Mb\": 3.502232551574707, \"Time in s\": 113.618405 }, { \"step\": 1225, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Phishing\", \"Accuracy\": 0.9084967320261438, \"F1\": 0.8961038961038962, \"Memory in Mb\": 3.7125635147094727, \"Time in s\": 118.566747 }, { \"step\": 1250, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Phishing\", \"Accuracy\": 0.9087269815852682, \"F1\": 0.8969258589511755, \"Memory in Mb\": 3.826443672180176, \"Time in s\": 123.577759 }, { \"step\": 1903, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.1726446151733398, \"Time in s\": 2.131651 }, { \"step\": 3806, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.1738653182983398, \"Time in s\": 5.506803 }, { \"step\": 5709, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.1750860214233398, \"Time in s\": 10.14235 }, { \"step\": 7612, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.1750860214233398, \"Time in s\": 15.888209 }, { \"step\": 9515, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.1750860214233398, \"Time in s\": 22.677279 }, { \"step\": 11418, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.1763067245483398, \"Time in s\": 30.407676 }, { \"step\": 13321, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.1763067245483398, \"Time in s\": 39.088803 }, { \"step\": 15224, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998686198515404, \"F1\": 0.9, \"Memory in Mb\": 0.3923320770263672, \"Time in s\": 48.816854 }, { \"step\": 17127, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998832184981898, \"F1\": 0.9166666666666666, \"Memory in Mb\": 0.4169597625732422, \"Time in s\": 59.770396 }, { \"step\": 19030, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998948972620736, \"F1\": 0.9166666666666666, \"Memory in Mb\": 0.4169597625732422, \"Time in s\": 71.94360999999999 }, { \"step\": 20933, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999904452512899, \"F1\": 0.9166666666666666, \"Memory in Mb\": 0.4181804656982422, \"Time in s\": 85.33762899999999 }, { \"step\": 22836, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9999124151521788, \"F1\": 0.9166666666666666, \"Memory in Mb\": 0.4181804656982422, \"Time in s\": 99.94903 }, { \"step\": 24739, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9999191527205108, \"F1\": 0.9166666666666666, \"Memory in Mb\": 0.4181804656982422, \"Time in s\": 115.777799 }, { \"step\": 26642, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9999249277429526, \"F1\": 0.923076923076923, \"Memory in Mb\": 0.4445209503173828, \"Time in s\": 132.832942 }, { \"step\": 28545, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999929932735426, \"F1\": 0.923076923076923, \"Memory in Mb\": 0.4446010589599609, \"Time in s\": 151.111146 }, { \"step\": 30448, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9999343120832924, \"F1\": 0.923076923076923, \"Memory in Mb\": 0.4446010589599609, \"Time in s\": 170.611749 }, { \"step\": 32351, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9999381761978362, \"F1\": 0.923076923076923, \"Memory in Mb\": 0.4446010589599609, \"Time in s\": 191.335277 }, { \"step\": 34254, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9999416109537852, \"F1\": 0.923076923076923, \"Memory in Mb\": 0.4561443328857422, \"Time in s\": 213.283336 }, { \"step\": 36157, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9999446841464764, \"F1\": 0.923076923076923, \"Memory in Mb\": 0.4642963409423828, \"Time in s\": 236.451824 }, { \"step\": 38060, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9999474500118236, \"F1\": 0.923076923076923, \"Memory in Mb\": 0.4643535614013672, \"Time in s\": 260.85276600000003 }, { \"step\": 39963, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999949952454832, \"F1\": 0.923076923076923, \"Memory in Mb\": 0.4643535614013672, \"Time in s\": 286.481847 }, { \"step\": 41866, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9999522273975876, \"F1\": 0.923076923076923, \"Memory in Mb\": 0.4655742645263672, \"Time in s\": 313.33932500000003 }, { \"step\": 43769, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999954304514714, \"F1\": 0.923076923076923, \"Memory in Mb\": 0.4655742645263672, \"Time in s\": 341.422365 }, { \"step\": 45672, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9999562085349566, \"F1\": 0.923076923076923, \"Memory in Mb\": 0.4655742645263672, \"Time in s\": 370.73064 }, { \"step\": 47575, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999957960230378, \"F1\": 0.923076923076923, \"Memory in Mb\": 0.4830074310302734, \"Time in s\": 401.265116 }, { \"step\": 49478, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9999595771772742, \"F1\": 0.923076923076923, \"Memory in Mb\": 0.491189956665039, \"Time in s\": 433.028922 }, { \"step\": 51381, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9999221486959906, \"F1\": 0.8666666666666666, \"Memory in Mb\": 0.5249767303466797, \"Time in s\": 466.02568 }, { \"step\": 53284, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9999249291518872, \"F1\": 0.8666666666666666, \"Memory in Mb\": 0.5249767303466797, \"Time in s\": 500.253815 }, { \"step\": 55187, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9999275178487298, \"F1\": 0.8666666666666666, \"Memory in Mb\": 0.5554332733154297, \"Time in s\": 535.714615 }, { \"step\": 57090, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997547688696596, \"F1\": 0.6818181818181819, \"Memory in Mb\": 0.8414325714111328, \"Time in s\": 572.520543 }, { \"step\": 58993, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999762679685381, \"F1\": 0.6818181818181819, \"Memory in Mb\": 0.8605022430419922, \"Time in s\": 610.6533019999999 }, { \"step\": 60896, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997700960670006, \"F1\": 0.6818181818181819, \"Memory in Mb\": 0.8891468048095703, \"Time in s\": 650.109621 }, { \"step\": 62799, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997452148157584, \"F1\": 0.6521739130434783, \"Memory in Mb\": 1.0364971160888672, \"Time in s\": 690.9092899999999 }, { \"step\": 64702, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999752708613468, \"F1\": 0.6521739130434783, \"Memory in Mb\": 1.061361312866211, \"Time in s\": 733.0436639999999 }, { \"step\": 66605, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997597741877364, \"F1\": 0.6521739130434783, \"Memory in Mb\": 1.0615253448486328, \"Time in s\": 776.5083099999999 }, { \"step\": 68508, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997664472243712, \"F1\": 0.68, \"Memory in Mb\": 1.1361942291259766, \"Time in s\": 821.3091039999999 }, { \"step\": 70411, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997727595512002, \"F1\": 0.68, \"Memory in Mb\": 1.136308670043945, \"Time in s\": 867.450083 }, { \"step\": 72314, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997787396457068, \"F1\": 0.68, \"Memory in Mb\": 1.1362667083740234, \"Time in s\": 914.92252 }, { \"step\": 74217, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997844130645684, \"F1\": 0.68, \"Memory in Mb\": 1.1445026397705078, \"Time in s\": 963.73181 }, { \"step\": 76120, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"SMTP\", \"Accuracy\": 0.99978980280876, \"F1\": 0.68, \"Memory in Mb\": 1.144460678100586, \"Time in s\": 1013.878015 }, { \"step\": 78023, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997949296352312, \"F1\": 0.68, \"Memory in Mb\": 1.1445598602294922, \"Time in s\": 1065.363214 }, { \"step\": 79926, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997998123240538, \"F1\": 0.68, \"Memory in Mb\": 1.154123306274414, \"Time in s\": 1118.183887 }, { \"step\": 81829, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998044679082956, \"F1\": 0.68, \"Memory in Mb\": 1.1541500091552734, \"Time in s\": 1172.339801 }, { \"step\": 83732, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998089118725442, \"F1\": 0.68, \"Memory in Mb\": 1.1554203033447266, \"Time in s\": 1227.8271129999998 }, { \"step\": 85635, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998131583249644, \"F1\": 0.68, \"Memory in Mb\": 1.1554012298583984, \"Time in s\": 1284.648826 }, { \"step\": 87538, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998172201469092, \"F1\": 0.68, \"Memory in Mb\": 1.1554012298583984, \"Time in s\": 1342.8019989999998 }, { \"step\": 89441, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999798747763864, \"F1\": 0.6538461538461539, \"Memory in Mb\": 1.2423763275146484, \"Time in s\": 1402.2973039999997 }, { \"step\": 91344, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998029405646848, \"F1\": 0.6538461538461539, \"Memory in Mb\": 1.2579975128173828, \"Time in s\": 1463.1308069999998 }, { \"step\": 93247, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998069622289428, \"F1\": 0.6538461538461539, \"Memory in Mb\": 1.3168392181396484, \"Time in s\": 1525.3166049999998 }, { \"step\": 95150, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998108230249398, \"F1\": 0.6538461538461539, \"Memory in Mb\": 1.3265628814697266, \"Time in s\": 1588.862311 }, { \"step\": 106, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5238095238095238, \"F1\": 0.4186046511627906, \"Memory in Mb\": 0.2278289794921875, \"Time in s\": 0.437833 }, { \"step\": 212, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5308056872037915, \"F1\": 0.4530386740331491, \"Memory in Mb\": 0.5808591842651367, \"Time in s\": 1.338953 }, { \"step\": 318, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6025236593059937, \"F1\": 0.5467625899280575, \"Memory in Mb\": 1.0978193283081057, \"Time in s\": 2.727335 }, { \"step\": 424, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6690307328605201, \"F1\": 0.6236559139784946, \"Memory in Mb\": 1.4776067733764648, \"Time in s\": 4.602884 }, { \"step\": 530, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Bananas\", \"Accuracy\": 0.7069943289224953, \"F1\": 0.6547884187082404, \"Memory in Mb\": 1.862696647644043, \"Time in s\": 7.038838 }, { \"step\": 636, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Bananas\", \"Accuracy\": 0.7401574803149606, \"F1\": 0.6983546617915906, \"Memory in Mb\": 2.7163190841674805, \"Time in s\": 10.152007 }, { \"step\": 742, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Bananas\", \"Accuracy\": 0.7624831309041835, \"F1\": 0.7188498402555912, \"Memory in Mb\": 3.166998863220215, \"Time in s\": 14.047877000000002 }, { \"step\": 848, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Bananas\", \"Accuracy\": 0.7792207792207793, \"F1\": 0.7399165507649514, \"Memory in Mb\": 3.2281599044799805, \"Time in s\": 18.677316 }, { \"step\": 954, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Bananas\", \"Accuracy\": 0.7911857292759706, \"F1\": 0.754017305315204, \"Memory in Mb\": 2.101862907409668, \"Time in s\": 23.96263 }, { \"step\": 1060, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8035882908404155, \"F1\": 0.7657657657657657, \"Memory in Mb\": 1.8145971298217771, \"Time in s\": 29.783676 }, { \"step\": 1166, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8077253218884121, \"F1\": 0.7723577235772358, \"Memory in Mb\": 2.202631950378418, \"Time in s\": 35.938969 }, { \"step\": 1272, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8127458693941778, \"F1\": 0.7800369685767098, \"Memory in Mb\": 2.1398725509643555, \"Time in s\": 42.34268 }, { \"step\": 1378, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8191721132897604, \"F1\": 0.7855297157622738, \"Memory in Mb\": 2.487322807312012, \"Time in s\": 48.992986 }, { \"step\": 1484, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8240053944706676, \"F1\": 0.7916999201915402, \"Memory in Mb\": 2.964076042175293, \"Time in s\": 55.892559000000006 }, { \"step\": 1590, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8244178728760226, \"F1\": 0.7934863064396744, \"Memory in Mb\": 3.359782218933105, \"Time in s\": 63.05187300000001 }, { \"step\": 1696, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8283185840707965, \"F1\": 0.797776233495483, \"Memory in Mb\": 3.637175559997559, \"Time in s\": 70.47404700000001 }, { \"step\": 1802, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8306496390893948, \"F1\": 0.802588996763754, \"Memory in Mb\": 4.031474113464356, \"Time in s\": 78.15954100000002 }, { \"step\": 1908, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8311484006292607, \"F1\": 0.8048484848484848, \"Memory in Mb\": 4.422553062438965, \"Time in s\": 86.11403800000002 }, { \"step\": 2014, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8310978638847492, \"F1\": 0.806378132118451, \"Memory in Mb\": 4.790541648864746, \"Time in s\": 94.35073800000002 }, { \"step\": 2120, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8343558282208589, \"F1\": 0.8119978575254418, \"Memory in Mb\": 4.553057670593262, \"Time in s\": 102.86313600000004 }, { \"step\": 2226, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8350561797752809, \"F1\": 0.8126595201633486, \"Memory in Mb\": 4.935397148132324, \"Time in s\": 111.64595000000004 }, { \"step\": 2332, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8361218361218361, \"F1\": 0.8140214216163584, \"Memory in Mb\": 5.269236564636231, \"Time in s\": 120.70485400000004 }, { \"step\": 2438, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8379154698399671, \"F1\": 0.8156789547363509, \"Memory in Mb\": 5.532515525817871, \"Time in s\": 130.039733 }, { \"step\": 2544, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8391663389697208, \"F1\": 0.8178173719376391, \"Memory in Mb\": 5.79874324798584, \"Time in s\": 139.65537500000002 }, { \"step\": 2650, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Bananas\", \"Accuracy\": 0.840317100792752, \"F1\": 0.81976991904559, \"Memory in Mb\": 5.978323936462402, \"Time in s\": 149.55427200000003 }, { \"step\": 2756, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8442831215970962, \"F1\": 0.8242523555919706, \"Memory in Mb\": 6.180487632751465, \"Time in s\": 159.73621300000002 }, { \"step\": 2862, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Bananas\", \"Accuracy\": 0.846906675987417, \"F1\": 0.826603325415677, \"Memory in Mb\": 6.367312431335449, \"Time in s\": 170.195916 }, { \"step\": 2968, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8490057296932929, \"F1\": 0.828352490421456, \"Memory in Mb\": 6.73636531829834, \"Time in s\": 180.938789 }, { \"step\": 3074, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8503091441588024, \"F1\": 0.8303834808259588, \"Memory in Mb\": 6.987029075622559, \"Time in s\": 191.964907 }, { \"step\": 3180, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8530984586347908, \"F1\": 0.832796276405299, \"Memory in Mb\": 7.20963191986084, \"Time in s\": 203.271668 }, { \"step\": 3286, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8535768645357686, \"F1\": 0.8329281000347343, \"Memory in Mb\": 7.467520713806152, \"Time in s\": 214.858839 }, { \"step\": 3392, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8554998525508699, \"F1\": 0.8360107095046854, \"Memory in Mb\": 7.765227317810059, \"Time in s\": 226.734099 }, { \"step\": 3498, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8573062625107235, \"F1\": 0.8374063212772891, \"Memory in Mb\": 8.026595115661621, \"Time in s\": 238.898839 }, { \"step\": 3604, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8592839300582847, \"F1\": 0.8388941849380362, \"Memory in Mb\": 8.217352867126465, \"Time in s\": 251.354959 }, { \"step\": 3710, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8603397142086816, \"F1\": 0.8405172413793103, \"Memory in Mb\": 8.39356517791748, \"Time in s\": 264.100164 }, { \"step\": 3816, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8629095674967234, \"F1\": 0.843553694286569, \"Memory in Mb\": 8.53998851776123, \"Time in s\": 277.140513 }, { \"step\": 3922, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8630451415455241, \"F1\": 0.8433945756780402, \"Memory in Mb\": 8.81647777557373, \"Time in s\": 290.481842 }, { \"step\": 4028, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8629252545319096, \"F1\": 0.8431818181818181, \"Memory in Mb\": 9.181269645690918, \"Time in s\": 304.122133 }, { \"step\": 4134, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8637793370433099, \"F1\": 0.8442600276625173, \"Memory in Mb\": 9.408194541931152, \"Time in s\": 318.06952099999995 }, { \"step\": 4240, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8655343241330502, \"F1\": 0.8464439655172414, \"Memory in Mb\": 9.550837516784668, \"Time in s\": 332.31391899999994 }, { \"step\": 4346, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8672036823935558, \"F1\": 0.8484370895718414, \"Memory in Mb\": 9.808123588562012, \"Time in s\": 346.87205299999994 }, { \"step\": 4452, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8669961806335655, \"F1\": 0.8480492813141683, \"Memory in Mb\": 10.045561790466309, \"Time in s\": 361.755126 }, { \"step\": 4558, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8670177748518763, \"F1\": 0.848424212106053, \"Memory in Mb\": 10.332926750183104, \"Time in s\": 376.958924 }, { \"step\": 4664, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8674672957323611, \"F1\": 0.8494152046783626, \"Memory in Mb\": 10.668997764587402, \"Time in s\": 392.484642 }, { \"step\": 4770, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Bananas\", \"Accuracy\": 0.866429020759069, \"F1\": 0.8480076354092102, \"Memory in Mb\": 11.001662254333496, \"Time in s\": 408.344032 }, { \"step\": 4876, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8666666666666667, \"F1\": 0.8477751756440282, \"Memory in Mb\": 11.155909538269045, \"Time in s\": 424.538143 }, { \"step\": 4982, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8678980124472997, \"F1\": 0.8495656149977137, \"Memory in Mb\": 11.33142375946045, \"Time in s\": 441.055719 }, { \"step\": 5088, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Bananas\", \"Accuracy\": 0.868291724002359, \"F1\": 0.8499776085982983, \"Memory in Mb\": 11.580191612243652, \"Time in s\": 457.901482 }, { \"step\": 5194, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Bananas\", \"Accuracy\": 0.868476795686501, \"F1\": 0.8501864443957009, \"Memory in Mb\": 11.828421592712402, \"Time in s\": 475.084789 }, { \"step\": 5300, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8690318928099642, \"F1\": 0.850816852966466, \"Memory in Mb\": 12.135478019714355, \"Time in s\": 492.613208 }, { \"step\": 906, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8839779005524862, \"F1\": 0.8810872027180068, \"Memory in Mb\": 5.370833396911621, \"Time in s\": 5.916109 }, { \"step\": 1812, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9033683048039756, \"F1\": 0.8805460750853241, \"Memory in Mb\": 9.031210899353027, \"Time in s\": 15.079536999999998 }, { \"step\": 2718, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9006256900993743, \"F1\": 0.8771610555050046, \"Memory in Mb\": 13.98334789276123, \"Time in s\": 27.621333 }, { \"step\": 3624, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9014628760695556, \"F1\": 0.8784473953013278, \"Memory in Mb\": 18.162775993347168, \"Time in s\": 43.541312 }, { \"step\": 4530, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9008611172444247, \"F1\": 0.87226173541963, \"Memory in Mb\": 21.49211406707764, \"Time in s\": 63.022082 }, { \"step\": 5436, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8943882244710212, \"F1\": 0.863398381722989, \"Memory in Mb\": 25.749701499938965, \"Time in s\": 86.108323 }, { \"step\": 6342, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8941807285917048, \"F1\": 0.86403242147923, \"Memory in Mb\": 30.03483867645264, \"Time in s\": 112.908333 }, { \"step\": 7248, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8907133986477163, \"F1\": 0.8586723768736617, \"Memory in Mb\": 31.268176078796387, \"Time in s\": 143.442151 }, { \"step\": 8154, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8912056911566295, \"F1\": 0.8662343537927913, \"Memory in Mb\": 34.28826427459717, \"Time in s\": 177.844322 }, { \"step\": 9060, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8912683519152225, \"F1\": 0.8698639186154049, \"Memory in Mb\": 37.821166038513184, \"Time in s\": 216.191973 }, { \"step\": 9966, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8896136477671851, \"F1\": 0.8707706766917294, \"Memory in Mb\": 41.62779140472412, \"Time in s\": 258.820887 }, { \"step\": 10872, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8902584858798639, \"F1\": 0.8737966783031843, \"Memory in Mb\": 45.202799797058105, \"Time in s\": 305.617005 }, { \"step\": 11778, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8899549970281057, \"F1\": 0.873015873015873, \"Memory in Mb\": 44.63830471038818, \"Time in s\": 356.880829 }, { \"step\": 12684, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8842545139162659, \"F1\": 0.8672934369915024, \"Memory in Mb\": 49.475626945495605, \"Time in s\": 412.597623 }, { \"step\": 13590, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Elec2\", \"Accuracy\": 0.884538965339613, \"F1\": 0.8694566935685165, \"Memory in Mb\": 52.02482509613037, \"Time in s\": 472.748527 }, { \"step\": 14496, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8854777509486029, \"F1\": 0.871138022046266, \"Memory in Mb\": 51.378371238708496, \"Time in s\": 537.275724 }, { \"step\": 15402, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8870203233556263, \"F1\": 0.8725461470846764, \"Memory in Mb\": 45.887526512146, \"Time in s\": 605.997132 }, { \"step\": 16308, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8855092904887472, \"F1\": 0.8701398066355985, \"Memory in Mb\": 50.45190334320069, \"Time in s\": 678.971724 }, { \"step\": 17214, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8856097135885668, \"F1\": 0.8680206448153361, \"Memory in Mb\": 42.41952419281006, \"Time in s\": 756.021088 }, { \"step\": 18120, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8854241404050996, \"F1\": 0.8677032882997705, \"Memory in Mb\": 44.88027477264404, \"Time in s\": 837.367748 }, { \"step\": 19026, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8873587385019711, \"F1\": 0.8687128591557923, \"Memory in Mb\": 30.162745475769043, \"Time in s\": 922.595636 }, { \"step\": 19932, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Elec2\", \"Accuracy\": 0.887612262304952, \"F1\": 0.8700243704305443, \"Memory in Mb\": 11.138346672058104, \"Time in s\": 1011.395865 }, { \"step\": 20838, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8870758746460623, \"F1\": 0.8696182190945864, \"Memory in Mb\": 16.41995906829834, \"Time in s\": 1103.9159909999998 }, { \"step\": 21744, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8869981143356482, \"F1\": 0.8677539157112869, \"Memory in Mb\": 20.066325187683105, \"Time in s\": 1200.011486 }, { \"step\": 22650, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8854254050951477, \"F1\": 0.8647944563121971, \"Memory in Mb\": 23.948283195495605, \"Time in s\": 1299.755313 }, { \"step\": 23556, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8836765018042878, \"F1\": 0.8620481321115698, \"Memory in Mb\": 27.3106107711792, \"Time in s\": 1403.457577 }, { \"step\": 24462, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8826294918441601, \"F1\": 0.859903381642512, \"Memory in Mb\": 30.048666954040527, \"Time in s\": 1511.388245 }, { \"step\": 25368, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8818149564394686, \"F1\": 0.8590105342362679, \"Memory in Mb\": 27.528754234313965, \"Time in s\": 1623.579427 }, { \"step\": 26274, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Elec2\", \"Accuracy\": 0.882883568682678, \"F1\": 0.8599899895345134, \"Memory in Mb\": 32.44980525970459, \"Time in s\": 1739.735045 }, { \"step\": 27180, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8840649030501491, \"F1\": 0.8618891080429543, \"Memory in Mb\": 36.050021171569824, \"Time in s\": 1859.93783 }, { \"step\": 28086, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8826063735089905, \"F1\": 0.8595287801968386, \"Memory in Mb\": 42.42660045623779, \"Time in s\": 1984.354383 }, { \"step\": 28992, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8825842502845711, \"F1\": 0.8587200132813148, \"Memory in Mb\": 47.772982597351074, \"Time in s\": 2113.006569 }, { \"step\": 29898, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8824296752182493, \"F1\": 0.8583175460518361, \"Memory in Mb\": 47.37248516082764, \"Time in s\": 2246.236218 }, { \"step\": 30804, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8822517287277213, \"F1\": 0.8574348492590699, \"Memory in Mb\": 51.80053234100342, \"Time in s\": 2383.776877 }, { \"step\": 31710, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8812324576618625, \"F1\": 0.8558965332517028, \"Memory in Mb\": 57.62951564788818, \"Time in s\": 2525.746928 }, { \"step\": 32616, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8799018856354438, \"F1\": 0.8544785823085782, \"Memory in Mb\": 57.82863521575928, \"Time in s\": 2672.122785 }, { \"step\": 33522, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8803436651651204, \"F1\": 0.8553604269589989, \"Memory in Mb\": 49.21776485443115, \"Time in s\": 2822.5668949999995 }, { \"step\": 34428, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8800360182414965, \"F1\": 0.8549248278769145, \"Memory in Mb\": 40.49333477020264, \"Time in s\": 2976.721336 }, { \"step\": 35334, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8794045226841762, \"F1\": 0.8535789148139239, \"Memory in Mb\": 46.44182109832764, \"Time in s\": 3134.389793 }, { \"step\": 36240, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8791081431606832, \"F1\": 0.8524468694217102, \"Memory in Mb\": 49.9929723739624, \"Time in s\": 3295.654522 }, { \"step\": 37146, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8779378112801185, \"F1\": 0.8505800158186132, \"Memory in Mb\": 54.79160213470459, \"Time in s\": 3460.6177849999995 }, { \"step\": 38052, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8777693096107855, \"F1\": 0.8499532212794787, \"Memory in Mb\": 58.49489498138428, \"Time in s\": 3629.3978659999993 }, { \"step\": 38958, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8784814025720666, \"F1\": 0.8511414376454312, \"Memory in Mb\": 60.34530162811279, \"Time in s\": 3802.324627 }, { \"step\": 39864, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8790106113438527, \"F1\": 0.8529528339278637, \"Memory in Mb\": 65.39763927459717, \"Time in s\": 3979.298316 }, { \"step\": 40770, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8795408275895902, \"F1\": 0.8548286972715718, \"Memory in Mb\": 71.3544225692749, \"Time in s\": 4160.658093999999 }, { \"step\": 41676, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8803359328134374, \"F1\": 0.8566995201287319, \"Memory in Mb\": 58.17950344085693, \"Time in s\": 4346.269614 }, { \"step\": 42582, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8808858411028393, \"F1\": 0.8576000898422146, \"Memory in Mb\": 62.87830638885498, \"Time in s\": 4535.872427 }, { \"step\": 43488, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8812518683744567, \"F1\": 0.8580850829943938, \"Memory in Mb\": 52.03429698944092, \"Time in s\": 4729.486494000001 }, { \"step\": 44394, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8814452729033857, \"F1\": 0.8579065309538595, \"Memory in Mb\": 55.38854122161865, \"Time in s\": 4926.889749000001 }, { \"step\": 45300, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8822490562705578, \"F1\": 0.8591125198098257, \"Memory in Mb\": 58.343642234802246, \"Time in s\": 5128.273015000001 }, { \"step\": 25, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Phishing\", \"Accuracy\": 0.75, \"F1\": 0.7692307692307692, \"Memory in Mb\": 0.6668167114257812, \"Time in s\": 0.240714 }, { \"step\": 50, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Phishing\", \"Accuracy\": 0.7959183673469388, \"F1\": 0.7826086956521738, \"Memory in Mb\": 1.0995216369628906, \"Time in s\": 0.6628620000000001 }, { \"step\": 75, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8378378378378378, \"F1\": 0.8378378378378377, \"Memory in Mb\": 1.2478713989257812, \"Time in s\": 1.252732 }, { \"step\": 100, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8686868686868687, \"F1\": 0.8686868686868686, \"Memory in Mb\": 1.3291473388671875, \"Time in s\": 2.007362 }, { \"step\": 125, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8629032258064516, \"F1\": 0.8640000000000001, \"Memory in Mb\": 1.6638565063476562, \"Time in s\": 2.948271 }, { \"step\": 150, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8657718120805369, \"F1\": 0.8701298701298702, \"Memory in Mb\": 1.6782913208007812, \"Time in s\": 4.062992 }, { \"step\": 175, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8850574712643678, \"F1\": 0.8809523809523809, \"Memory in Mb\": 1.7604293823242188, \"Time in s\": 5.350090000000001 }, { \"step\": 200, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8844221105527639, \"F1\": 0.8795811518324608, \"Memory in Mb\": 1.9450531005859373, \"Time in s\": 6.8260000000000005 }, { \"step\": 225, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8883928571428571, \"F1\": 0.8803827751196173, \"Memory in Mb\": 2.0522689819335938, \"Time in s\": 8.475057 }, { \"step\": 250, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8795180722891566, \"F1\": 0.8695652173913043, \"Memory in Mb\": 2.253402709960937, \"Time in s\": 10.305544 }, { \"step\": 275, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8795620437956204, \"F1\": 0.8685258964143425, \"Memory in Mb\": 2.2874794006347656, \"Time in s\": 12.343079 }, { \"step\": 300, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8795986622073578, \"F1\": 0.8666666666666666, \"Memory in Mb\": 2.546089172363281, \"Time in s\": 14.59325 }, { \"step\": 325, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8796296296296297, \"F1\": 0.8641114982578397, \"Memory in Mb\": 2.7360763549804688, \"Time in s\": 17.040637 }, { \"step\": 350, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8739255014326648, \"F1\": 0.8562091503267973, \"Memory in Mb\": 2.827232360839844, \"Time in s\": 19.684707 }, { \"step\": 375, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8743315508021391, \"F1\": 0.8553846153846153, \"Memory in Mb\": 3.0366439819335938, \"Time in s\": 22.544768 }, { \"step\": 400, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8721804511278195, \"F1\": 0.8513119533527697, \"Memory in Mb\": 3.1284713745117188, \"Time in s\": 25.604048 }, { \"step\": 425, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Phishing\", \"Accuracy\": 0.875, \"F1\": 0.8515406162464987, \"Memory in Mb\": 3.107044219970703, \"Time in s\": 28.861271 }, { \"step\": 450, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8752783964365256, \"F1\": 0.851063829787234, \"Memory in Mb\": 3.134662628173828, \"Time in s\": 32.309061 }, { \"step\": 475, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8776371308016878, \"F1\": 0.8557213930348259, \"Memory in Mb\": 3.1429481506347656, \"Time in s\": 35.963049 }, { \"step\": 500, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8817635270541082, \"F1\": 0.8624708624708626, \"Memory in Mb\": 3.273334503173828, \"Time in s\": 39.811295 }, { \"step\": 525, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8854961832061069, \"F1\": 0.8642533936651584, \"Memory in Mb\": 3.4039268493652344, \"Time in s\": 43.854378 }, { \"step\": 550, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8834244080145719, \"F1\": 0.8626609442060086, \"Memory in Mb\": 3.5256080627441406, \"Time in s\": 48.092981 }, { \"step\": 575, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8832752613240418, \"F1\": 0.8618556701030927, \"Memory in Mb\": 3.730621337890625, \"Time in s\": 52.525854 }, { \"step\": 600, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8864774624373957, \"F1\": 0.8634538152610441, \"Memory in Mb\": 3.6573638916015625, \"Time in s\": 57.153422000000006 }, { \"step\": 625, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8862179487179487, \"F1\": 0.8605108055009822, \"Memory in Mb\": 3.691375732421875, \"Time in s\": 61.980237 }, { \"step\": 650, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Phishing\", \"Accuracy\": 0.889060092449923, \"F1\": 0.8656716417910448, \"Memory in Mb\": 3.879222869873047, \"Time in s\": 67.019405 }, { \"step\": 675, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8887240356083086, \"F1\": 0.8681898066783831, \"Memory in Mb\": 4.001224517822266, \"Time in s\": 72.2172 }, { \"step\": 700, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8927038626609443, \"F1\": 0.8713550600343053, \"Memory in Mb\": 4.033683776855469, \"Time in s\": 77.519148 }, { \"step\": 725, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Phishing\", \"Accuracy\": 0.893646408839779, \"F1\": 0.8743882544861339, \"Memory in Mb\": 4.112117767333984, \"Time in s\": 82.931309 }, { \"step\": 750, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8958611481975968, \"F1\": 0.8773584905660378, \"Memory in Mb\": 4.362846374511719, \"Time in s\": 88.455864 }, { \"step\": 775, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8953488372093024, \"F1\": 0.8763358778625954, \"Memory in Mb\": 4.623798370361328, \"Time in s\": 94.093371 }, { \"step\": 800, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8936170212765957, \"F1\": 0.8755490483162518, \"Memory in Mb\": 4.795074462890625, \"Time in s\": 99.843891 }, { \"step\": 825, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8932038834951457, \"F1\": 0.876056338028169, \"Memory in Mb\": 5.260898590087891, \"Time in s\": 105.71761 }, { \"step\": 850, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8939929328621908, \"F1\": 0.8767123287671234, \"Memory in Mb\": 5.305454254150391, \"Time in s\": 111.707119 }, { \"step\": 875, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8958810068649885, \"F1\": 0.8781793842034805, \"Memory in Mb\": 5.302814483642578, \"Time in s\": 117.814416 }, { \"step\": 900, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8976640711902113, \"F1\": 0.8795811518324608, \"Memory in Mb\": 5.489250183105469, \"Time in s\": 124.034882 }, { \"step\": 925, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Phishing\", \"Accuracy\": 0.9004329004329005, \"F1\": 0.8838383838383839, \"Memory in Mb\": 5.587982177734375, \"Time in s\": 130.365318 }, { \"step\": 950, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Phishing\", \"Accuracy\": 0.9020021074815596, \"F1\": 0.8869987849331712, \"Memory in Mb\": 5.680080413818359, \"Time in s\": 136.807919 }, { \"step\": 975, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Phishing\", \"Accuracy\": 0.904517453798768, \"F1\": 0.889679715302491, \"Memory in Mb\": 5.6697998046875, \"Time in s\": 143.36438 }, { \"step\": 1000, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Phishing\", \"Accuracy\": 0.9049049049049048, \"F1\": 0.8901734104046244, \"Memory in Mb\": 5.689472198486328, \"Time in s\": 150.035681 }, { \"step\": 1025, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Phishing\", \"Accuracy\": 0.9052734375, \"F1\": 0.8911335578002244, \"Memory in Mb\": 5.899868011474609, \"Time in s\": 156.818681 }, { \"step\": 1050, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Phishing\", \"Accuracy\": 0.9065776930409916, \"F1\": 0.8930131004366813, \"Memory in Mb\": 6.014961242675781, \"Time in s\": 163.714224 }, { \"step\": 1075, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Phishing\", \"Accuracy\": 0.9068901303538176, \"F1\": 0.8940677966101694, \"Memory in Mb\": 6.185920715332031, \"Time in s\": 170.723809 }, { \"step\": 1100, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Phishing\", \"Accuracy\": 0.908098271155596, \"F1\": 0.8957688338493291, \"Memory in Mb\": 6.174674987792969, \"Time in s\": 177.84336199999998 }, { \"step\": 1125, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Phishing\", \"Accuracy\": 0.909252669039146, \"F1\": 0.8979999999999999, \"Memory in Mb\": 6.282234191894531, \"Time in s\": 185.077801 }, { \"step\": 1150, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Phishing\", \"Accuracy\": 0.9103568320278504, \"F1\": 0.8991185112634672, \"Memory in Mb\": 6.438121795654297, \"Time in s\": 192.421784 }, { \"step\": 1175, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Phishing\", \"Accuracy\": 0.9080068143100511, \"F1\": 0.8963531669865643, \"Memory in Mb\": 6.65753173828125, \"Time in s\": 199.885294 }, { \"step\": 1200, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Phishing\", \"Accuracy\": 0.9090909090909092, \"F1\": 0.8972667295004714, \"Memory in Mb\": 6.8576507568359375, \"Time in s\": 207.46223300000003 }, { \"step\": 1225, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Phishing\", \"Accuracy\": 0.9101307189542484, \"F1\": 0.898336414048059, \"Memory in Mb\": 6.963230133056641, \"Time in s\": 215.149468 }, { \"step\": 1250, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Phishing\", \"Accuracy\": 0.911128903122498, \"F1\": 0.8999098286744815, \"Memory in Mb\": 7.0904388427734375, \"Time in s\": 222.947789 }, { \"step\": 1903, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.169290542602539, \"Time in s\": 5.29779 }, { \"step\": 3806, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.170511245727539, \"Time in s\": 13.064902 }, { \"step\": 5709, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.171731948852539, \"Time in s\": 23.298967 }, { \"step\": 7612, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.171731948852539, \"Time in s\": 35.999469999999995 }, { \"step\": 9515, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.171731948852539, \"Time in s\": 51.163715 }, { \"step\": 11418, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.172952651977539, \"Time in s\": 68.795189 }, { \"step\": 13321, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.172952651977539, \"Time in s\": 88.896271 }, { \"step\": 15224, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998029297773108, \"F1\": 0.8421052631578948, \"Memory in Mb\": 0.457615852355957, \"Time in s\": 111.604078 }, { \"step\": 17127, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998248277472848, \"F1\": 0.8695652173913044, \"Memory in Mb\": 0.4529237747192383, \"Time in s\": 137.211141 }, { \"step\": 19030, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998423458931104, \"F1\": 0.8695652173913044, \"Memory in Mb\": 0.4529237747192383, \"Time in s\": 165.706974 }, { \"step\": 20933, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998566787693484, \"F1\": 0.8695652173913044, \"Memory in Mb\": 0.4541444778442383, \"Time in s\": 197.110825 }, { \"step\": 22836, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999868622728268, \"F1\": 0.8695652173913044, \"Memory in Mb\": 0.4541444778442383, \"Time in s\": 231.41631 }, { \"step\": 24739, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998787290807664, \"F1\": 0.8695652173913044, \"Memory in Mb\": 0.4541444778442383, \"Time in s\": 268.619505 }, { \"step\": 26642, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998498554859052, \"F1\": 0.8333333333333333, \"Memory in Mb\": 0.4768953323364258, \"Time in s\": 308.72625800000003 }, { \"step\": 28545, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999859865470852, \"F1\": 0.8333333333333333, \"Memory in Mb\": 0.4768953323364258, \"Time in s\": 351.754387 }, { \"step\": 30448, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998686241665844, \"F1\": 0.8333333333333333, \"Memory in Mb\": 0.4768953323364258, \"Time in s\": 397.678816 }, { \"step\": 32351, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998763523956724, \"F1\": 0.8333333333333333, \"Memory in Mb\": 0.4847612380981445, \"Time in s\": 446.503909 }, { \"step\": 34254, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998832219075702, \"F1\": 0.8333333333333333, \"Memory in Mb\": 0.484715461730957, \"Time in s\": 498.235456 }, { \"step\": 36157, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998893682929528, \"F1\": 0.8333333333333333, \"Memory in Mb\": 0.484715461730957, \"Time in s\": 552.8655449999999 }, { \"step\": 38060, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998949000236474, \"F1\": 0.8333333333333333, \"Memory in Mb\": 0.4847383499145508, \"Time in s\": 610.4097629999999 }, { \"step\": 39963, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998999049096642, \"F1\": 0.8333333333333333, \"Memory in Mb\": 0.4847383499145508, \"Time in s\": 670.8597949999998 }, { \"step\": 41866, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999904454795175, \"F1\": 0.8333333333333333, \"Memory in Mb\": 0.4859590530395508, \"Time in s\": 734.2226509999998 }, { \"step\": 43769, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999908609029428, \"F1\": 0.8333333333333333, \"Memory in Mb\": 0.4859590530395508, \"Time in s\": 800.4598269999998 }, { \"step\": 45672, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9999124170699132, \"F1\": 0.8333333333333333, \"Memory in Mb\": 0.4859590530395508, \"Time in s\": 869.5777599999998 }, { \"step\": 47575, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998738806911338, \"F1\": 0.7692307692307692, \"Memory in Mb\": 0.5104570388793945, \"Time in s\": 941.5744449999996 }, { \"step\": 49478, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998787315318228, \"F1\": 0.7692307692307692, \"Memory in Mb\": 0.5104570388793945, \"Time in s\": 1016.4635609999998 }, { \"step\": 51381, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999883223043986, \"F1\": 0.7999999999999999, \"Memory in Mb\": 0.593510627746582, \"Time in s\": 1094.2664539999998 }, { \"step\": 53284, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998873937278306, \"F1\": 0.7999999999999999, \"Memory in Mb\": 0.593510627746582, \"Time in s\": 1174.983157 }, { \"step\": 55187, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998912767730946, \"F1\": 0.7999999999999999, \"Memory in Mb\": 0.6139116287231445, \"Time in s\": 1258.5948749999998 }, { \"step\": 57090, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997722853789696, \"F1\": 0.6829268292682927, \"Memory in Mb\": 1.0072031021118164, \"Time in s\": 1345.245196 }, { \"step\": 58993, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997796311364252, \"F1\": 0.6829268292682927, \"Memory in Mb\": 1.0209360122680664, \"Time in s\": 1434.9545749999995 }, { \"step\": 60896, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997865177765004, \"F1\": 0.6829268292682927, \"Memory in Mb\": 1.0209360122680664, \"Time in s\": 1527.6493129999997 }, { \"step\": 62799, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997611388897736, \"F1\": 0.6511627906976744, \"Memory in Mb\": 1.1647844314575195, \"Time in s\": 1623.3843759999995 }, { \"step\": 64702, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997681643251264, \"F1\": 0.6511627906976744, \"Memory in Mb\": 1.185868263244629, \"Time in s\": 1722.1888749999998 }, { \"step\": 66605, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999774788301003, \"F1\": 0.6511627906976744, \"Memory in Mb\": 1.195298194885254, \"Time in s\": 1824.040623 }, { \"step\": 68508, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999781044272848, \"F1\": 0.6808510638297872, \"Memory in Mb\": 1.2139558792114258, \"Time in s\": 1928.948048 }, { \"step\": 70411, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"SMTP\", \"Accuracy\": 0.99978696207925, \"F1\": 0.6808510638297872, \"Memory in Mb\": 1.2139787673950195, \"Time in s\": 2036.917723 }, { \"step\": 72314, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"SMTP\", \"Accuracy\": 0.99979256841785, \"F1\": 0.6808510638297872, \"Memory in Mb\": 1.2139787673950195, \"Time in s\": 2147.927521 }, { \"step\": 74217, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997978872480328, \"F1\": 0.6808510638297872, \"Memory in Mb\": 1.2303056716918943, \"Time in s\": 2261.972689 }, { \"step\": 76120, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998029401332124, \"F1\": 0.6808510638297872, \"Memory in Mb\": 1.2302827835083008, \"Time in s\": 2379.0496510000003 }, { \"step\": 78023, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998077465330292, \"F1\": 0.6808510638297872, \"Memory in Mb\": 1.2303743362426758, \"Time in s\": 2499.17284 }, { \"step\": 79926, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998123240538004, \"F1\": 0.6808510638297872, \"Memory in Mb\": 1.2315950393676758, \"Time in s\": 2622.406245 }, { \"step\": 81829, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999816688664027, \"F1\": 0.6808510638297872, \"Memory in Mb\": 1.2398500442504885, \"Time in s\": 2748.681939 }, { \"step\": 83732, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998208548805102, \"F1\": 0.6808510638297872, \"Memory in Mb\": 1.2410707473754885, \"Time in s\": 2878.007463 }, { \"step\": 85635, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999824835929654, \"F1\": 0.6808510638297872, \"Memory in Mb\": 1.2410707473754885, \"Time in s\": 3010.373781 }, { \"step\": 87538, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998286438877276, \"F1\": 0.6808510638297872, \"Memory in Mb\": 1.2410707473754885, \"Time in s\": 3145.77577 }, { \"step\": 89441, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998211091234348, \"F1\": 0.6666666666666667, \"Memory in Mb\": 1.338292121887207, \"Time in s\": 3284.247438 }, { \"step\": 91344, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998248360574976, \"F1\": 0.6666666666666667, \"Memory in Mb\": 1.3284997940063477, \"Time in s\": 3425.773292 }, { \"step\": 93247, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998284108701714, \"F1\": 0.6666666666666667, \"Memory in Mb\": 1.4100160598754885, \"Time in s\": 3570.387533 }, { \"step\": 95150, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998318426888354, \"F1\": 0.6666666666666667, \"Memory in Mb\": 1.4101762771606443, \"Time in s\": 3718.075789 }, { \"step\": 106, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Bananas\", \"Accuracy\": 0.7619047619047619, \"F1\": 0.736842105263158, \"Memory in Mb\": 0.041853904724121, \"Time in s\": 0.035102 }, { \"step\": 212, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8199052132701422, \"F1\": 0.7978723404255319, \"Memory in Mb\": 0.0413503646850585, \"Time in s\": 0.119282 }, { \"step\": 318, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8391167192429022, \"F1\": 0.8197879858657242, \"Memory in Mb\": 0.0418806076049804, \"Time in s\": 0.252001 }, { \"step\": 424, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8581560283687943, \"F1\": 0.8412698412698413, \"Memory in Mb\": 0.0418806076049804, \"Time in s\": 0.433555 }, { \"step\": 530, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8525519848771267, \"F1\": 0.8266666666666667, \"Memory in Mb\": 0.0413770675659179, \"Time in s\": 0.663618 }, { \"step\": 636, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8488188976377953, \"F1\": 0.8222222222222222, \"Memory in Mb\": 0.0418806076049804, \"Time in s\": 0.942176 }, { \"step\": 742, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8461538461538461, \"F1\": 0.8155339805825242, \"Memory in Mb\": 0.0418806076049804, \"Time in s\": 1.268723 }, { \"step\": 848, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8488783943329398, \"F1\": 0.8217270194986072, \"Memory in Mb\": 0.0413770675659179, \"Time in s\": 1.64013 }, { \"step\": 954, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8541448058761805, \"F1\": 0.8268991282689911, \"Memory in Mb\": 0.0418806076049804, \"Time in s\": 2.060089 }, { \"step\": 1060, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8583569405099151, \"F1\": 0.8299319727891157, \"Memory in Mb\": 0.0418806076049804, \"Time in s\": 2.5284690000000003 }, { \"step\": 1166, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8549356223175966, \"F1\": 0.8263103802672147, \"Memory in Mb\": 0.0413770675659179, \"Time in s\": 3.0446800000000005 }, { \"step\": 1272, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8575924468922108, \"F1\": 0.8309990662931841, \"Memory in Mb\": 0.0418806076049804, \"Time in s\": 3.608857 }, { \"step\": 1378, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8576615831517792, \"F1\": 0.8298611111111109, \"Memory in Mb\": 0.0418806076049804, \"Time in s\": 4.221075000000001 }, { \"step\": 1484, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8604180714767363, \"F1\": 0.833467417538214, \"Memory in Mb\": 0.0413770675659179, \"Time in s\": 4.881183000000001 }, { \"step\": 1590, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8590308370044053, \"F1\": 0.8318318318318318, \"Memory in Mb\": 0.0418806076049804, \"Time in s\": 5.5893250000000005 }, { \"step\": 1696, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8613569321533924, \"F1\": 0.8341566690190544, \"Memory in Mb\": 0.0413770675659179, \"Time in s\": 6.345487 }, { \"step\": 1802, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8617434758467518, \"F1\": 0.836076366030283, \"Memory in Mb\": 0.0413770675659179, \"Time in s\": 7.149425000000001 }, { \"step\": 1908, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8573675930781332, \"F1\": 0.8329238329238329, \"Memory in Mb\": 0.0418806076049804, \"Time in s\": 8.001348 }, { \"step\": 2014, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8544461003477397, \"F1\": 0.8317059161401493, \"Memory in Mb\": 0.0413770675659179, \"Time in s\": 8.901356 }, { \"step\": 2120, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8551203397829165, \"F1\": 0.8343227199136536, \"Memory in Mb\": 0.0413770675659179, \"Time in s\": 9.849324 }, { \"step\": 2226, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8512359550561798, \"F1\": 0.8301693175987686, \"Memory in Mb\": 0.0418806076049804, \"Time in s\": 10.845354 }, { \"step\": 2332, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8511368511368511, \"F1\": 0.8304836345872008, \"Memory in Mb\": 0.0413770675659179, \"Time in s\": 11.889459 }, { \"step\": 2438, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8518670496512105, \"F1\": 0.8312295465170642, \"Memory in Mb\": 0.0413770675659179, \"Time in s\": 12.981528999999998 }, { \"step\": 2544, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8509634290208415, \"F1\": 0.8307280035730238, \"Memory in Mb\": 0.0418806076049804, \"Time in s\": 14.121683999999998 }, { \"step\": 2650, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8501321253303133, \"F1\": 0.8307036247334755, \"Memory in Mb\": 0.0413770675659179, \"Time in s\": 15.309940999999998 }, { \"step\": 2756, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8500907441016334, \"F1\": 0.8310838445807771, \"Memory in Mb\": 0.0413770675659179, \"Time in s\": 16.545870999999998 }, { \"step\": 2862, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8504019573575673, \"F1\": 0.8310970797158642, \"Memory in Mb\": 0.0418806076049804, \"Time in s\": 17.828975 }, { \"step\": 2968, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8513650151668352, \"F1\": 0.8317436093094239, \"Memory in Mb\": 0.0413770675659179, \"Time in s\": 19.159858 }, { \"step\": 3074, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8490074845427921, \"F1\": 0.8294117647058825, \"Memory in Mb\": 0.0418806076049804, \"Time in s\": 20.53851 }, { \"step\": 3180, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8499528153507392, \"F1\": 0.8298251872993221, \"Memory in Mb\": 0.0418806076049804, \"Time in s\": 21.964265 }, { \"step\": 3286, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8502283105022831, \"F1\": 0.8295218295218295, \"Memory in Mb\": 0.0413770675659179, \"Time in s\": 23.437226 }, { \"step\": 3392, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8493069890887643, \"F1\": 0.8294961628294961, \"Memory in Mb\": 0.0418806076049804, \"Time in s\": 24.957963 }, { \"step\": 3498, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8507291964541035, \"F1\": 0.830299089726918, \"Memory in Mb\": 0.0418806076049804, \"Time in s\": 26.526512 }, { \"step\": 3604, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Bananas\", \"Accuracy\": 0.852345267832362, \"F1\": 0.8313253012048194, \"Memory in Mb\": 0.0413770675659179, \"Time in s\": 28.143271 }, { \"step\": 3710, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8522512806686439, \"F1\": 0.8315918869084205, \"Memory in Mb\": 0.0418806076049804, \"Time in s\": 29.808071 }, { \"step\": 3816, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8521625163826999, \"F1\": 0.8315412186379928, \"Memory in Mb\": 0.0418806076049804, \"Time in s\": 31.520677 }, { \"step\": 3922, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8520785513899516, \"F1\": 0.8309037900874635, \"Memory in Mb\": 0.0413770675659179, \"Time in s\": 33.281115 }, { \"step\": 4028, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8505090638192203, \"F1\": 0.8290743895513913, \"Memory in Mb\": 0.0418806076049804, \"Time in s\": 35.088929 }, { \"step\": 4134, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8499879022501815, \"F1\": 0.8286346047540077, \"Memory in Mb\": 0.0418806076049804, \"Time in s\": 36.944224 }, { \"step\": 4240, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Bananas\", \"Accuracy\": 0.851380042462845, \"F1\": 0.8306451612903226, \"Memory in Mb\": 0.0413770675659179, \"Time in s\": 38.847373 }, { \"step\": 4346, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8515535097813579, \"F1\": 0.8308418568056648, \"Memory in Mb\": 0.0418806076049804, \"Time in s\": 40.797646 }, { \"step\": 4452, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8508200404403505, \"F1\": 0.8296562339661364, \"Memory in Mb\": 0.0418806076049804, \"Time in s\": 42.797836 }, { \"step\": 4558, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8507790212859337, \"F1\": 0.8302546180728907, \"Memory in Mb\": 0.0413770675659179, \"Time in s\": 44.846666000000006 }, { \"step\": 4664, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8496675959682608, \"F1\": 0.8294818778885916, \"Memory in Mb\": 0.0418806076049804, \"Time in s\": 46.942725 }, { \"step\": 4770, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Bananas\", \"Accuracy\": 0.848605577689243, \"F1\": 0.8280133396855646, \"Memory in Mb\": 0.0418806076049804, \"Time in s\": 49.086211000000006 }, { \"step\": 4876, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Bananas\", \"Accuracy\": 0.848, \"F1\": 0.8265855370933771, \"Memory in Mb\": 0.0413770675659179, \"Time in s\": 51.27634200000001 }, { \"step\": 4982, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8490262999397711, \"F1\": 0.8281535648994516, \"Memory in Mb\": 0.0418806076049804, \"Time in s\": 53.513903000000006 }, { \"step\": 5088, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Bananas\", \"Accuracy\": 0.84863377236092, \"F1\": 0.8275089605734768, \"Memory in Mb\": 0.0413770675659179, \"Time in s\": 55.79899700000001 }, { \"step\": 5194, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8488349701521278, \"F1\": 0.8278131169116035, \"Memory in Mb\": 0.0413770675659179, \"Time in s\": 58.13196400000001 }, { \"step\": 5300, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8484619739573505, \"F1\": 0.8274231678486997, \"Memory in Mb\": 0.0418806076049804, \"Time in s\": 60.51276400000001 }, { \"step\": 906, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9049723756906076, \"F1\": 0.903153153153153, \"Memory in Mb\": 0.0616054534912109, \"Time in s\": 0.342387 }, { \"step\": 1812, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9260077305356156, \"F1\": 0.9075862068965518, \"Memory in Mb\": 0.0616054534912109, \"Time in s\": 1.0313949999999998 }, { \"step\": 2718, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9120353330879648, \"F1\": 0.8907178783721992, \"Memory in Mb\": 0.0616054534912109, \"Time in s\": 2.062314 }, { \"step\": 3624, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9149875793541264, \"F1\": 0.8948087431693988, \"Memory in Mb\": 0.0621089935302734, \"Time in s\": 3.434682 }, { \"step\": 4530, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9116802826230956, \"F1\": 0.8866855524079319, \"Memory in Mb\": 0.0621089935302734, \"Time in s\": 5.146577 }, { \"step\": 5436, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9098436062557498, \"F1\": 0.8844884488448844, \"Memory in Mb\": 0.0621089935302734, \"Time in s\": 7.219511 }, { \"step\": 6342, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9090048888187984, \"F1\": 0.8844382134988984, \"Memory in Mb\": 0.0616054534912109, \"Time in s\": 9.664227 }, { \"step\": 7248, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9064440458120602, \"F1\": 0.881509961551905, \"Memory in Mb\": 0.0616054534912109, \"Time in s\": 12.478866 }, { \"step\": 8154, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9053109284925794, \"F1\": 0.8852556480380499, \"Memory in Mb\": 0.0616054534912109, \"Time in s\": 15.66161 }, { \"step\": 9060, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9076056959929352, \"F1\": 0.8903732809430256, \"Memory in Mb\": 0.0621089935302734, \"Time in s\": 19.203614 }, { \"step\": 9966, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9092824887104868, \"F1\": 0.8943431510051426, \"Memory in Mb\": 0.0621089935302734, \"Time in s\": 23.103248 }, { \"step\": 10872, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9103118388372736, \"F1\": 0.8971193415637859, \"Memory in Mb\": 0.0621089935302734, \"Time in s\": 27.356535 }, { \"step\": 11778, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Elec2\", \"Accuracy\": 0.909484588604908, \"F1\": 0.896323672437269, \"Memory in Mb\": 0.0621089935302734, \"Time in s\": 31.965636 }, { \"step\": 12684, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9086178348971063, \"F1\": 0.8953120765965135, \"Memory in Mb\": 0.0616054534912109, \"Time in s\": 36.933351 }, { \"step\": 13590, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Elec2\", \"Accuracy\": 0.908970490838178, \"F1\": 0.8970796239287795, \"Memory in Mb\": 0.0616054534912109, \"Time in s\": 42.24853099999999 }, { \"step\": 14496, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9087271472921696, \"F1\": 0.8973861785464982, \"Memory in Mb\": 0.0616054534912109, \"Time in s\": 47.913716 }, { \"step\": 15402, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9094863969872086, \"F1\": 0.8977556109725686, \"Memory in Mb\": 0.0621089935302734, \"Time in s\": 53.917574 }, { \"step\": 16308, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9067271723799596, \"F1\": 0.8941323867195656, \"Memory in Mb\": 0.0621089935302734, \"Time in s\": 60.251802 }, { \"step\": 17214, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9044907918433742, \"F1\": 0.8901656867985035, \"Memory in Mb\": 0.0621089935302734, \"Time in s\": 66.911875 }, { \"step\": 18120, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9041889729013742, \"F1\": 0.8895674300254454, \"Memory in Mb\": 0.0684490203857421, \"Time in s\": 73.89174799999999 }, { \"step\": 19026, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9049145860709592, \"F1\": 0.8893239522789844, \"Memory in Mb\": 0.0684490203857421, \"Time in s\": 81.18615299999999 }, { \"step\": 19932, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9037679995986152, \"F1\": 0.8889403590040533, \"Memory in Mb\": 0.0684490203857421, \"Time in s\": 88.788198 }, { \"step\": 20838, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9006094927292796, \"F1\": 0.8855990719770204, \"Memory in Mb\": 0.0689525604248046, \"Time in s\": 96.688698 }, { \"step\": 21744, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8996918548498367, \"F1\": 0.8828112406641234, \"Memory in Mb\": 0.0689525604248046, \"Time in s\": 104.88540299999998 }, { \"step\": 22650, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8992891518389333, \"F1\": 0.8815987542174929, \"Memory in Mb\": 0.0689525604248046, \"Time in s\": 113.378449 }, { \"step\": 23556, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8977711738484399, \"F1\": 0.8795999999999999, \"Memory in Mb\": 0.0684490203857421, \"Time in s\": 122.16799199999998 }, { \"step\": 24462, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8973059155390213, \"F1\": 0.8783652914971914, \"Memory in Mb\": 0.0684490203857421, \"Time in s\": 131.253708 }, { \"step\": 25368, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8955729885284031, \"F1\": 0.8765782975352934, \"Memory in Mb\": 0.0684490203857421, \"Time in s\": 140.635731 }, { \"step\": 26274, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8962052297034979, \"F1\": 0.8771012663932579, \"Memory in Mb\": 0.0689525604248046, \"Time in s\": 150.314023 }, { \"step\": 27180, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Elec2\", \"Accuracy\": 0.896133043894183, \"F1\": 0.8774792760730873, \"Memory in Mb\": 0.0689525604248046, \"Time in s\": 160.288049 }, { \"step\": 28086, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8954602100765533, \"F1\": 0.8762330326279403, \"Memory in Mb\": 0.0689525604248046, \"Time in s\": 170.561257 }, { \"step\": 28992, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8944500017246731, \"F1\": 0.874518166160912, \"Memory in Mb\": 0.0689525604248046, \"Time in s\": 181.130608 }, { \"step\": 29898, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8934006756530756, \"F1\": 0.8730025901574019, \"Memory in Mb\": 0.0684490203857421, \"Time in s\": 191.99638700000003 }, { \"step\": 30804, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8926403272408532, \"F1\": 0.8713780094123137, \"Memory in Mb\": 0.0684490203857421, \"Time in s\": 203.15792800000003 }, { \"step\": 31710, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8906304203853795, \"F1\": 0.8690233401314299, \"Memory in Mb\": 0.0684490203857421, \"Time in s\": 214.615558 }, { \"step\": 32616, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8895293576575195, \"F1\": 0.8679010082493126, \"Memory in Mb\": 0.0689525604248046, \"Time in s\": 226.368363 }, { \"step\": 33522, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8885773097461293, \"F1\": 0.8667926816220265, \"Memory in Mb\": 0.0689525604248046, \"Time in s\": 238.416278 }, { \"step\": 34428, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8875010892613355, \"F1\": 0.8655721772933948, \"Memory in Mb\": 0.0689525604248046, \"Time in s\": 250.759651 }, { \"step\": 35334, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Elec2\", \"Accuracy\": 0.88653666544024, \"F1\": 0.8635976999761832, \"Memory in Mb\": 0.0684490203857421, \"Time in s\": 263.39898200000005 }, { \"step\": 36240, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8864483015535749, \"F1\": 0.8623423543973505, \"Memory in Mb\": 0.0684490203857421, \"Time in s\": 276.33438000000007 }, { \"step\": 37146, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8854219948849105, \"F1\": 0.8608149650075216, \"Memory in Mb\": 0.0684490203857421, \"Time in s\": 289.5663660000001 }, { \"step\": 38052, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Elec2\", \"Accuracy\": 0.885574623531576, \"F1\": 0.8604308244646749, \"Memory in Mb\": 0.0689525604248046, \"Time in s\": 303.09499200000005 }, { \"step\": 38958, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8853864517288292, \"F1\": 0.8605515475186608, \"Memory in Mb\": 0.0689525604248046, \"Time in s\": 316.91924300000005 }, { \"step\": 39864, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8853071770815042, \"F1\": 0.8614545454545455, \"Memory in Mb\": 0.0689525604248046, \"Time in s\": 331.03949900000003 }, { \"step\": 40770, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8846672717015379, \"F1\": 0.8618034328709147, \"Memory in Mb\": 0.0689525604248046, \"Time in s\": 345.45792700000004 }, { \"step\": 41676, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8847030593881223, \"F1\": 0.862796607749636, \"Memory in Mb\": 0.0684490203857421, \"Time in s\": 360.17251400000004 }, { \"step\": 42582, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8848547474225593, \"F1\": 0.8633767102293309, \"Memory in Mb\": 0.0684490203857421, \"Time in s\": 375.183354 }, { \"step\": 43488, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8845632027962379, \"F1\": 0.86316305947773, \"Memory in Mb\": 0.0684490203857421, \"Time in s\": 390.490282 }, { \"step\": 44394, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8843511364404298, \"F1\": 0.8626023657870793, \"Memory in Mb\": 0.0689525604248046, \"Time in s\": 406.09386 }, { \"step\": 45300, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8844345349786971, \"F1\": 0.8629042817860416, \"Memory in Mb\": 0.0689525604248046, \"Time in s\": 421.994249 }, { \"step\": 25, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Phishing\", \"Accuracy\": 0.6666666666666666, \"F1\": 0.7499999999999999, \"Memory in Mb\": 0.0211858749389648, \"Time in s\": 0.008448 }, { \"step\": 50, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Phishing\", \"Accuracy\": 0.7959183673469388, \"F1\": 0.8076923076923077, \"Memory in Mb\": 0.0379018783569335, \"Time in s\": 0.023055 }, { \"step\": 75, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8513513513513513, \"F1\": 0.8641975308641976, \"Memory in Mb\": 0.0541143417358398, \"Time in s\": 0.045431 }, { \"step\": 100, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8484848484848485, \"F1\": 0.854368932038835, \"Memory in Mb\": 0.0708036422729492, \"Time in s\": 0.0780319999999999 }, { \"step\": 125, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8548387096774194, \"F1\": 0.859375, \"Memory in Mb\": 0.0712194442749023, \"Time in s\": 0.1221779999999999 }, { \"step\": 150, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8590604026845637, \"F1\": 0.8679245283018867, \"Memory in Mb\": 0.0707159042358398, \"Time in s\": 0.1780699999999999 }, { \"step\": 175, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8735632183908046, \"F1\": 0.8735632183908046, \"Memory in Mb\": 0.0712194442749023, \"Time in s\": 0.2457579999999999 }, { \"step\": 200, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8693467336683417, \"F1\": 0.8686868686868686, \"Memory in Mb\": 0.0707159042358398, \"Time in s\": 0.325372 }, { \"step\": 225, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8616071428571429, \"F1\": 0.8571428571428571, \"Memory in Mb\": 0.0712194442749023, \"Time in s\": 0.416902 }, { \"step\": 250, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8473895582329317, \"F1\": 0.8416666666666667, \"Memory in Mb\": 0.0712194442749023, \"Time in s\": 0.520874 }, { \"step\": 275, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8467153284671532, \"F1\": 0.8384615384615385, \"Memory in Mb\": 0.0709295272827148, \"Time in s\": 0.637346 }, { \"step\": 300, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8494983277591973, \"F1\": 0.8375451263537907, \"Memory in Mb\": 0.0714330673217773, \"Time in s\": 0.76611 }, { \"step\": 325, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8518518518518519, \"F1\": 0.8356164383561644, \"Memory in Mb\": 0.0709295272827148, \"Time in s\": 0.907061 }, { \"step\": 350, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8538681948424068, \"F1\": 0.8349514563106796, \"Memory in Mb\": 0.0709295272827148, \"Time in s\": 1.060329 }, { \"step\": 375, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8502673796791443, \"F1\": 0.8271604938271604, \"Memory in Mb\": 0.0714330673217773, \"Time in s\": 1.2260179999999998 }, { \"step\": 400, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Phishing\", \"Accuracy\": 0.849624060150376, \"F1\": 0.8224852071005918, \"Memory in Mb\": 0.0709295272827148, \"Time in s\": 1.4039159999999995 }, { \"step\": 425, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8514150943396226, \"F1\": 0.8194842406876792, \"Memory in Mb\": 0.0714330673217773, \"Time in s\": 1.5942239999999996 }, { \"step\": 450, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8552338530066815, \"F1\": 0.8219178082191781, \"Memory in Mb\": 0.0714330673217773, \"Time in s\": 1.7968749999999996 }, { \"step\": 475, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8481012658227848, \"F1\": 0.8134715025906737, \"Memory in Mb\": 0.0709295272827148, \"Time in s\": 2.011871 }, { \"step\": 500, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8476953907815631, \"F1\": 0.8164251207729469, \"Memory in Mb\": 0.0714330673217773, \"Time in s\": 2.23918 }, { \"step\": 525, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8492366412213741, \"F1\": 0.8141176470588235, \"Memory in Mb\": 0.0709295272827148, \"Time in s\": 2.479037 }, { \"step\": 550, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8506375227686703, \"F1\": 0.8177777777777777, \"Memory in Mb\": 0.0714330673217773, \"Time in s\": 2.731183 }, { \"step\": 575, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8519163763066202, \"F1\": 0.8187633262260127, \"Memory in Mb\": 0.0714330673217773, \"Time in s\": 2.995642 }, { \"step\": 600, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8514190317195326, \"F1\": 0.8149688149688149, \"Memory in Mb\": 0.0709295272827148, \"Time in s\": 3.272333 }, { \"step\": 625, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8509615384615384, \"F1\": 0.8105906313645621, \"Memory in Mb\": 0.0714330673217773, \"Time in s\": 3.561223 }, { \"step\": 650, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8567026194144838, \"F1\": 0.8208092485549132, \"Memory in Mb\": 0.0709295272827148, \"Time in s\": 3.862355 }, { \"step\": 675, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8590504451038575, \"F1\": 0.8275862068965517, \"Memory in Mb\": 0.0714330673217773, \"Time in s\": 4.17585 }, { \"step\": 700, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8640915593705293, \"F1\": 0.831858407079646, \"Memory in Mb\": 0.0714330673217773, \"Time in s\": 4.501572 }, { \"step\": 725, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8646408839779005, \"F1\": 0.8355704697986577, \"Memory in Mb\": 0.0709295272827148, \"Time in s\": 4.839746999999999 }, { \"step\": 750, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8624833110814419, \"F1\": 0.8341384863123994, \"Memory in Mb\": 0.0714330673217773, \"Time in s\": 5.190338999999999 }, { \"step\": 775, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8591731266149871, \"F1\": 0.8294209702660407, \"Memory in Mb\": 0.0709295272827148, \"Time in s\": 5.552793999999999 }, { \"step\": 800, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8573216520650814, \"F1\": 0.8288288288288288, \"Memory in Mb\": 0.0709295272827148, \"Time in s\": 5.927242999999999 }, { \"step\": 825, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8567961165048543, \"F1\": 0.8299711815561961, \"Memory in Mb\": 0.0714330673217773, \"Time in s\": 6.313648999999999 }, { \"step\": 850, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8598351001177856, \"F1\": 0.8330995792426368, \"Memory in Mb\": 0.0709295272827148, \"Time in s\": 6.712033999999999 }, { \"step\": 875, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8592677345537757, \"F1\": 0.8312757201646092, \"Memory in Mb\": 0.0714330673217773, \"Time in s\": 7.122354999999999 }, { \"step\": 900, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8587319243604005, \"F1\": 0.8304405874499332, \"Memory in Mb\": 0.0709295272827148, \"Time in s\": 7.544645999999999 }, { \"step\": 925, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8593073593073594, \"F1\": 0.8324742268041236, \"Memory in Mb\": 0.0709295272827148, \"Time in s\": 7.978914999999999 }, { \"step\": 950, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8577449947312961, \"F1\": 0.8327137546468402, \"Memory in Mb\": 0.0714330673217773, \"Time in s\": 8.425289 }, { \"step\": 975, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8613963039014374, \"F1\": 0.8367593712212819, \"Memory in Mb\": 0.0709295272827148, \"Time in s\": 8.883884 }, { \"step\": 1000, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8628628628628628, \"F1\": 0.8386336866902238, \"Memory in Mb\": 0.0714330673217773, \"Time in s\": 9.354461 }, { \"step\": 1025, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8623046875, \"F1\": 0.8384879725085911, \"Memory in Mb\": 0.0714330673217773, \"Time in s\": 9.837197 }, { \"step\": 1050, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Phishing\", \"Accuracy\": 0.86558627264061, \"F1\": 0.843159065628476, \"Memory in Mb\": 0.0709295272827148, \"Time in s\": 10.331855 }, { \"step\": 1075, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8640595903165735, \"F1\": 0.8426724137931035, \"Memory in Mb\": 0.0714330673217773, \"Time in s\": 10.838539999999998 }, { \"step\": 1100, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8653321201091901, \"F1\": 0.8445378151260504, \"Memory in Mb\": 0.0709295272827148, \"Time in s\": 11.357143999999998 }, { \"step\": 1125, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8674377224199288, \"F1\": 0.8484231943031536, \"Memory in Mb\": 0.0714330673217773, \"Time in s\": 11.887570999999998 }, { \"step\": 1150, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8685813751087903, \"F1\": 0.8494516450648055, \"Memory in Mb\": 0.0714330673217773, \"Time in s\": 12.429893999999996 }, { \"step\": 1175, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8679727427597955, \"F1\": 0.8484848484848486, \"Memory in Mb\": 0.0709295272827148, \"Time in s\": 12.984101999999996 }, { \"step\": 1200, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8673894912427023, \"F1\": 0.8478468899521532, \"Memory in Mb\": 0.0714330673217773, \"Time in s\": 13.550580999999996 }, { \"step\": 1225, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8676470588235294, \"F1\": 0.848030018761726, \"Memory in Mb\": 0.0709295272827148, \"Time in s\": 14.128897999999996 }, { \"step\": 1250, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8670936749399519, \"F1\": 0.847985347985348, \"Memory in Mb\": 0.0714330673217773, \"Time in s\": 14.719141999999996 }, { \"step\": 1903, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.0443115234375, \"Time in s\": 0.843623 }, { \"step\": 3806, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.0438079833984375, \"Time in s\": 2.551575 }, { \"step\": 5709, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.0438079833984375, \"Time in s\": 5.191815 }, { \"step\": 7612, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.0443115234375, \"Time in s\": 8.760857 }, { \"step\": 9515, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.0443115234375, \"Time in s\": 12.963128 }, { \"step\": 11418, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.0438079833984375, \"Time in s\": 17.55601 }, { \"step\": 13321, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.0438079833984375, \"Time in s\": 22.538869 }, { \"step\": 15224, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9999343099257704, \"F1\": 0.9523809523809524, \"Memory in Mb\": 0.0443382263183593, \"Time in s\": 27.911978 }, { \"step\": 17127, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9999416092490948, \"F1\": 0.96, \"Memory in Mb\": 0.0443382263183593, \"Time in s\": 33.678549000000004 }, { \"step\": 19030, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9999474486310368, \"F1\": 0.96, \"Memory in Mb\": 0.0438346862792968, \"Time in s\": 39.834148000000006 }, { \"step\": 20933, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9999522262564494, \"F1\": 0.96, \"Memory in Mb\": 0.0438346862792968, \"Time in s\": 46.380227000000005 }, { \"step\": 22836, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9999562075760894, \"F1\": 0.96, \"Memory in Mb\": 0.0443382263183593, \"Time in s\": 53.31643100000001 }, { \"step\": 24739, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9999595763602556, \"F1\": 0.96, \"Memory in Mb\": 0.0443382263183593, \"Time in s\": 60.64312500000001 }, { \"step\": 26642, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9999249277429526, \"F1\": 0.923076923076923, \"Memory in Mb\": 0.0438346862792968, \"Time in s\": 68.360031 }, { \"step\": 28545, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999929932735426, \"F1\": 0.923076923076923, \"Memory in Mb\": 0.0443382263183593, \"Time in s\": 76.46777800000001 }, { \"step\": 30448, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9999343120832924, \"F1\": 0.923076923076923, \"Memory in Mb\": 0.0443382263183593, \"Time in s\": 84.96788600000001 }, { \"step\": 32351, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9999381761978362, \"F1\": 0.923076923076923, \"Memory in Mb\": 0.0438346862792968, \"Time in s\": 93.862161 }, { \"step\": 34254, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9999416109537852, \"F1\": 0.923076923076923, \"Memory in Mb\": 0.0438346862792968, \"Time in s\": 103.14740500000002 }, { \"step\": 36157, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9999446841464764, \"F1\": 0.923076923076923, \"Memory in Mb\": 0.0443382263183593, \"Time in s\": 112.82379200000004 }, { \"step\": 38060, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9999474500118236, \"F1\": 0.923076923076923, \"Memory in Mb\": 0.0443382263183593, \"Time in s\": 122.89276800000002 }, { \"step\": 39963, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999949952454832, \"F1\": 0.923076923076923, \"Memory in Mb\": 0.0438346862792968, \"Time in s\": 133.353338 }, { \"step\": 41866, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9999522273975876, \"F1\": 0.923076923076923, \"Memory in Mb\": 0.0438346862792968, \"Time in s\": 144.204522 }, { \"step\": 43769, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999954304514714, \"F1\": 0.923076923076923, \"Memory in Mb\": 0.0443382263183593, \"Time in s\": 155.44662499999998 }, { \"step\": 45672, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9999562085349566, \"F1\": 0.923076923076923, \"Memory in Mb\": 0.0443382263183593, \"Time in s\": 167.07809799999998 }, { \"step\": 47575, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999957960230378, \"F1\": 0.923076923076923, \"Memory in Mb\": 0.0438346862792968, \"Time in s\": 179.099709 }, { \"step\": 49478, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9999595771772742, \"F1\": 0.923076923076923, \"Memory in Mb\": 0.0438346862792968, \"Time in s\": 191.51086 }, { \"step\": 51381, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999941611521993, \"F1\": 0.896551724137931, \"Memory in Mb\": 0.0443382263183593, \"Time in s\": 204.313165 }, { \"step\": 53284, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9999436968639152, \"F1\": 0.896551724137931, \"Memory in Mb\": 0.0443382263183593, \"Time in s\": 217.506745 }, { \"step\": 55187, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9999456383865472, \"F1\": 0.896551724137931, \"Memory in Mb\": 0.0438346862792968, \"Time in s\": 231.092317 }, { \"step\": 57090, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998423514162098, \"F1\": 0.8085106382978724, \"Memory in Mb\": 0.0443382263183593, \"Time in s\": 245.069369 }, { \"step\": 58993, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999847436940602, \"F1\": 0.8085106382978724, \"Memory in Mb\": 0.0443382263183593, \"Time in s\": 259.43782 }, { \"step\": 60896, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998522046145004, \"F1\": 0.8085106382978724, \"Memory in Mb\": 0.0438346862792968, \"Time in s\": 274.196817 }, { \"step\": 62799, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999824835185834, \"F1\": 0.7755102040816326, \"Memory in Mb\": 0.0438346862792968, \"Time in s\": 289.350286 }, { \"step\": 64702, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998299871717592, \"F1\": 0.7755102040816326, \"Memory in Mb\": 0.0443382263183593, \"Time in s\": 304.894594 }, { \"step\": 66605, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998348447540688, \"F1\": 0.7755102040816326, \"Memory in Mb\": 0.0443382263183593, \"Time in s\": 320.829102 }, { \"step\": 68508, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998248354182784, \"F1\": 0.7692307692307693, \"Memory in Mb\": 0.0438346862792968, \"Time in s\": 337.153755 }, { \"step\": 70411, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998295696634, \"F1\": 0.7692307692307693, \"Memory in Mb\": 0.0438346862792968, \"Time in s\": 353.86875 }, { \"step\": 72314, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"SMTP\", \"Accuracy\": 0.99983405473428, \"F1\": 0.7692307692307693, \"Memory in Mb\": 0.0443382263183593, \"Time in s\": 370.973732 }, { \"step\": 74217, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998383097984264, \"F1\": 0.7692307692307693, \"Memory in Mb\": 0.0443382263183593, \"Time in s\": 388.469718 }, { \"step\": 76120, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"SMTP\", \"Accuracy\": 0.99984235210657, \"F1\": 0.7692307692307693, \"Memory in Mb\": 0.0438346862792968, \"Time in s\": 406.356452 }, { \"step\": 78023, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998461972264232, \"F1\": 0.7692307692307693, \"Memory in Mb\": 0.0438346862792968, \"Time in s\": 424.633943 }, { \"step\": 79926, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998498592430404, \"F1\": 0.7692307692307693, \"Memory in Mb\": 0.0443382263183593, \"Time in s\": 443.301766 }, { \"step\": 81829, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998533509312216, \"F1\": 0.7692307692307693, \"Memory in Mb\": 0.0443382263183593, \"Time in s\": 462.36095 }, { \"step\": 83732, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998566839044082, \"F1\": 0.7692307692307693, \"Memory in Mb\": 0.0438346862792968, \"Time in s\": 481.813199 }, { \"step\": 85635, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998598687437232, \"F1\": 0.7692307692307693, \"Memory in Mb\": 0.0443382263183593, \"Time in s\": 501.656136 }, { \"step\": 87538, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999862915110182, \"F1\": 0.7692307692307693, \"Memory in Mb\": 0.0443382263183593, \"Time in s\": 521.891566 }, { \"step\": 89441, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998434704830054, \"F1\": 0.7407407407407408, \"Memory in Mb\": 0.0443382263183593, \"Time in s\": 542.516877 }, { \"step\": 91344, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998467315503105, \"F1\": 0.7407407407407408, \"Memory in Mb\": 0.0438346862792968, \"Time in s\": 563.531742 }, { \"step\": 93247, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998498595114, \"F1\": 0.7407407407407408, \"Memory in Mb\": 0.0443382263183593, \"Time in s\": 584.936913 }, { \"step\": 95150, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999852862352731, \"F1\": 0.7407407407407408, \"Memory in Mb\": 0.0443382263183593, \"Time in s\": 606.731321 }, { \"step\": 106, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.4857142857142857, \"F1\": 0.4599999999999999, \"Memory in Mb\": 0.192514419555664, \"Time in s\": 0.128533 }, { \"step\": 212, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5165876777251185, \"F1\": 0.4574468085106383, \"Memory in Mb\": 0.193307876586914, \"Time in s\": 0.37692 }, { \"step\": 318, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5205047318611987, \"F1\": 0.4722222222222222, \"Memory in Mb\": 0.1939868927001953, \"Time in s\": 0.746299 }, { \"step\": 424, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5460992907801419, \"F1\": 0.4838709677419355, \"Memory in Mb\": 0.1940326690673828, \"Time in s\": 1.240314 }, { \"step\": 530, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.55765595463138, \"F1\": 0.455813953488372, \"Memory in Mb\": 0.1940555572509765, \"Time in s\": 1.846868 }, { \"step\": 636, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5543307086614173, \"F1\": 0.4259634888438134, \"Memory in Mb\": 0.194711685180664, \"Time in s\": 2.5750140000000004 }, { \"step\": 742, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5748987854251012, \"F1\": 0.4220183486238532, \"Memory in Mb\": 0.1947574615478515, \"Time in s\": 3.4160590000000006 }, { \"step\": 848, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5785123966942148, \"F1\": 0.4232633279483037, \"Memory in Mb\": 0.1946887969970703, \"Time in s\": 4.380047 }, { \"step\": 954, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5844700944386149, \"F1\": 0.4193548387096774, \"Memory in Mb\": 0.1946659088134765, \"Time in s\": 5.464953 }, { \"step\": 1060, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5920679886685553, \"F1\": 0.4146341463414634, \"Memory in Mb\": 0.1946659088134765, \"Time in s\": 6.672680000000001 }, { \"step\": 1166, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.590557939914163, \"F1\": 0.4015056461731493, \"Memory in Mb\": 0.1946430206298828, \"Time in s\": 7.997066 }, { \"step\": 1272, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5971675845790716, \"F1\": 0.4101382488479262, \"Memory in Mb\": 0.1946430206298828, \"Time in s\": 9.441186 }, { \"step\": 1378, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.599128540305011, \"F1\": 0.3973799126637554, \"Memory in Mb\": 0.1952533721923828, \"Time in s\": 11.005176 }, { \"step\": 1484, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5994605529332434, \"F1\": 0.3926380368098159, \"Memory in Mb\": 0.1952075958251953, \"Time in s\": 12.688563 }, { \"step\": 1590, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5997482693517936, \"F1\": 0.3896353166986563, \"Memory in Mb\": 0.1951847076416015, \"Time in s\": 14.491668 }, { \"step\": 1696, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6011799410029498, \"F1\": 0.3876811594202898, \"Memory in Mb\": 0.1951847076416015, \"Time in s\": 16.418884000000002 }, { \"step\": 1802, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6013325930038868, \"F1\": 0.3904923599320882, \"Memory in Mb\": 0.1952075958251953, \"Time in s\": 18.478568000000003 }, { \"step\": 1908, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6030414263240692, \"F1\": 0.396812749003984, \"Memory in Mb\": 0.1952075958251953, \"Time in s\": 20.667158000000004 }, { \"step\": 2014, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5986090412319921, \"F1\": 0.3961136023916292, \"Memory in Mb\": 0.1952075958251953, \"Time in s\": 22.989297000000004 }, { \"step\": 2120, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5969797074091553, \"F1\": 0.3994374120956399, \"Memory in Mb\": 0.1952075958251953, \"Time in s\": 25.441149000000003 }, { \"step\": 2226, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.597752808988764, \"F1\": 0.4013377926421405, \"Memory in Mb\": 0.1951618194580078, \"Time in s\": 28.026090000000003 }, { \"step\": 2332, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5988845988845989, \"F1\": 0.4033184428844926, \"Memory in Mb\": 0.1951618194580078, \"Time in s\": 30.739590000000003 }, { \"step\": 2438, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5995075913007797, \"F1\": 0.4019607843137255, \"Memory in Mb\": 0.1951847076416015, \"Time in s\": 33.582607 }, { \"step\": 2544, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6008651199370821, \"F1\": 0.4088526499708794, \"Memory in Mb\": 0.1951847076416015, \"Time in s\": 36.555510000000005 }, { \"step\": 2650, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6002265005662514, \"F1\": 0.4073866815892558, \"Memory in Mb\": 0.1958179473876953, \"Time in s\": 39.657922000000006 }, { \"step\": 2756, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5985480943738657, \"F1\": 0.4028077753779697, \"Memory in Mb\": 0.1958179473876953, \"Time in s\": 42.88800400000001 }, { \"step\": 2862, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.599790283117791, \"F1\": 0.4051948051948052, \"Memory in Mb\": 0.1958179473876953, \"Time in s\": 46.243761000000006 }, { \"step\": 2968, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.599932591843613, \"F1\": 0.4026170105686965, \"Memory in Mb\": 0.195840835571289, \"Time in s\": 49.729156 }, { \"step\": 3074, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5977871786527823, \"F1\": 0.4023210831721469, \"Memory in Mb\": 0.195840835571289, \"Time in s\": 53.340997 }, { \"step\": 3180, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5986159169550173, \"F1\": 0.4042950513538749, \"Memory in Mb\": 0.195840835571289, \"Time in s\": 57.083209 }, { \"step\": 3286, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5981735159817352, \"F1\": 0.4021739130434782, \"Memory in Mb\": 0.1913156509399414, \"Time in s\": 60.947023 }, { \"step\": 3392, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5959893836626364, \"F1\": 0.4022687609075043, \"Memory in Mb\": 0.2501306533813476, \"Time in s\": 64.942815 }, { \"step\": 3498, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.597369173577352, \"F1\": 0.4023769100169779, \"Memory in Mb\": 0.2948274612426758, \"Time in s\": 69.00059 }, { \"step\": 3604, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6008881487649181, \"F1\": 0.4087171052631579, \"Memory in Mb\": 0.3147249221801758, \"Time in s\": 73.119483 }, { \"step\": 3710, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6012402264761392, \"F1\": 0.4086365453818472, \"Memory in Mb\": 0.3621034622192383, \"Time in s\": 77.300669 }, { \"step\": 3816, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6023591087811271, \"F1\": 0.4104158569762923, \"Memory in Mb\": 0.3922090530395508, \"Time in s\": 81.542932 }, { \"step\": 3922, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6052027543993879, \"F1\": 0.4145234493192133, \"Memory in Mb\": 0.4281816482543945, \"Time in s\": 85.85207799999999 }, { \"step\": 4028, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.608393344921778, \"F1\": 0.4195804195804196, \"Memory in Mb\": 0.4565858840942383, \"Time in s\": 90.22589 }, { \"step\": 4134, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6121461408178079, \"F1\": 0.4260651629072682, \"Memory in Mb\": 0.4708681106567383, \"Time in s\": 94.668198 }, { \"step\": 4240, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6157112526539278, \"F1\": 0.4329968673860076, \"Memory in Mb\": 0.4722070693969726, \"Time in s\": 99.175385 }, { \"step\": 4346, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6186421173762946, \"F1\": 0.4384954252795662, \"Memory in Mb\": 0.4545450210571289, \"Time in s\": 103.74793499999998 }, { \"step\": 4452, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6212087171422153, \"F1\": 0.4420913302448709, \"Memory in Mb\": 0.4548578262329101, \"Time in s\": 108.38424899999998 }, { \"step\": 4558, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6214614878209348, \"F1\": 0.4437278297323443, \"Memory in Mb\": 0.4440469741821289, \"Time in s\": 113.085683 }, { \"step\": 4664, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6219172206733863, \"F1\": 0.4454230890217049, \"Memory in Mb\": 0.4133005142211914, \"Time in s\": 117.851137 }, { \"step\": 4770, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6227720696162717, \"F1\": 0.4449244060475162, \"Memory in Mb\": 0.4420938491821289, \"Time in s\": 122.680723 }, { \"step\": 4876, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6235897435897436, \"F1\": 0.4444444444444444, \"Memory in Mb\": 0.4093866348266601, \"Time in s\": 127.574861 }, { \"step\": 4982, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6251756675366392, \"F1\": 0.4491000295072292, \"Memory in Mb\": 0.4095392227172851, \"Time in s\": 132.5326 }, { \"step\": 5088, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.624139964615687, \"F1\": 0.4467592592592592, \"Memory in Mb\": 0.4096612930297851, \"Time in s\": 137.553125 }, { \"step\": 5194, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6248796456768727, \"F1\": 0.4469051675184554, \"Memory in Mb\": 0.4100580215454101, \"Time in s\": 142.637397 }, { \"step\": 5300, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6259671636157765, \"F1\": 0.4482182628062361, \"Memory in Mb\": 0.4162149429321289, \"Time in s\": 147.78569 }, { \"step\": 906, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8651933701657458, \"F1\": 0.8685344827586208, \"Memory in Mb\": 1.6044349670410156, \"Time in s\": 1.944742 }, { \"step\": 1812, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8890115958034235, \"F1\": 0.8678500986193294, \"Memory in Mb\": 1.9405479431152344, \"Time in s\": 5.818992 }, { \"step\": 2718, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8778064041221936, \"F1\": 0.8540017590149517, \"Memory in Mb\": 1.825298309326172, \"Time in s\": 10.85362 }, { \"step\": 3624, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8857300579630141, \"F1\": 0.8630952380952381, \"Memory in Mb\": 1.6178092956542969, \"Time in s\": 16.937274 }, { \"step\": 4530, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8887171561051005, \"F1\": 0.8605423353624794, \"Memory in Mb\": 2.356822967529297, \"Time in s\": 24.006141 }, { \"step\": 5436, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.884820607175713, \"F1\": 0.8560257589696412, \"Memory in Mb\": 2.6076393127441406, \"Time in s\": 32.019509 }, { \"step\": 6342, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8821952373442674, \"F1\": 0.8529238038984053, \"Memory in Mb\": 2.2810935974121094, \"Time in s\": 40.994451 }, { \"step\": 7248, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8780184904098247, \"F1\": 0.8452380952380951, \"Memory in Mb\": 2.1880455017089844, \"Time in s\": 50.877339 }, { \"step\": 8154, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8797988470501655, \"F1\": 0.8548148148148149, \"Memory in Mb\": 2.109683990478516, \"Time in s\": 61.695664 }, { \"step\": 9060, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8818854178165361, \"F1\": 0.8612191958495461, \"Memory in Mb\": 2.1360397338867188, \"Time in s\": 73.45561000000001 }, { \"step\": 9966, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8771700953336679, \"F1\": 0.8589861751152074, \"Memory in Mb\": 1.9618644714355469, \"Time in s\": 86.17474100000001 }, { \"step\": 10872, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8781160886762948, \"F1\": 0.8612710710920323, \"Memory in Mb\": 1.9593772888183596, \"Time in s\": 99.82607500000002 }, { \"step\": 11778, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8748407913730152, \"F1\": 0.8565868846079004, \"Memory in Mb\": 2.148365020751953, \"Time in s\": 114.35339900000002 }, { \"step\": 12684, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8732161160608689, \"F1\": 0.8548736462093863, \"Memory in Mb\": 1.7726364135742188, \"Time in s\": 129.76244800000003 }, { \"step\": 13590, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8749724041504158, \"F1\": 0.8586404858973291, \"Memory in Mb\": 1.6236610412597656, \"Time in s\": 146.05238800000004 }, { \"step\": 14496, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8746464298033805, \"F1\": 0.858917617827471, \"Memory in Mb\": 1.996196746826172, \"Time in s\": 163.22720400000003 }, { \"step\": 15402, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8757223556911888, \"F1\": 0.859347442680776, \"Memory in Mb\": 1.8728065490722656, \"Time in s\": 181.335909 }, { \"step\": 16308, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8744097626786043, \"F1\": 0.8568432825387949, \"Memory in Mb\": 2.1095123291015625, \"Time in s\": 200.414984 }, { \"step\": 17214, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8735839191308894, \"F1\": 0.8532703978422117, \"Memory in Mb\": 2.2479400634765625, \"Time in s\": 220.47164 }, { \"step\": 18120, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8715712787681439, \"F1\": 0.8510338646693554, \"Memory in Mb\": 2.008777618408203, \"Time in s\": 241.502757 }, { \"step\": 19026, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.872904073587385, \"F1\": 0.8509615384615385, \"Memory in Mb\": 0.9681053161621094, \"Time in s\": 263.516635 }, { \"step\": 19932, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8660378305152777, \"F1\": 0.8433282478582326, \"Memory in Mb\": 0.8690452575683594, \"Time in s\": 286.595649 }, { \"step\": 20838, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8576090608052983, \"F1\": 0.8330050092868801, \"Memory in Mb\": 0.6502227783203125, \"Time in s\": 310.739453 }, { \"step\": 21744, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8574713700961228, \"F1\": 0.8301452452726775, \"Memory in Mb\": 0.7763557434082031, \"Time in s\": 335.928343 }, { \"step\": 22650, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8562850456973817, \"F1\": 0.8269077373039085, \"Memory in Mb\": 1.191143035888672, \"Time in s\": 362.154691 }, { \"step\": 23556, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8503502441095309, \"F1\": 0.8183645076518782, \"Memory in Mb\": 1.1728401184082031, \"Time in s\": 389.483038 }, { \"step\": 24462, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8480438248640694, \"F1\": 0.8142707240293808, \"Memory in Mb\": 1.1973991394042969, \"Time in s\": 417.874563 }, { \"step\": 25368, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8443647258248906, \"F1\": 0.8102105566772425, \"Memory in Mb\": 1.3723030090332031, \"Time in s\": 447.337578 }, { \"step\": 26274, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8447074943858714, \"F1\": 0.8100558659217878, \"Memory in Mb\": 1.314678192138672, \"Time in s\": 477.8580079999999 }, { \"step\": 27180, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8453953419919791, \"F1\": 0.8118395128067348, \"Memory in Mb\": 1.0832901000976562, \"Time in s\": 509.408593 }, { \"step\": 28086, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8421221292504896, \"F1\": 0.8068142209829209, \"Memory in Mb\": 1.0510520935058594, \"Time in s\": 541.9996819999999 }, { \"step\": 28992, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.839570901314201, \"F1\": 0.8022113544546035, \"Memory in Mb\": 1.0500526428222656, \"Time in s\": 575.6711059999999 }, { \"step\": 29898, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8373749874569355, \"F1\": 0.7989247311827957, \"Memory in Mb\": 1.017780303955078, \"Time in s\": 610.3966439999999 }, { \"step\": 30804, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8364445021588807, \"F1\": 0.7961149332254148, \"Memory in Mb\": 1.2030601501464844, \"Time in s\": 646.1707399999999 }, { \"step\": 31710, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8328234885994512, \"F1\": 0.7904660263251513, \"Memory in Mb\": 1.1833610534667969, \"Time in s\": 683.0083409999999 }, { \"step\": 32616, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8277479687260463, \"F1\": 0.7829714903809009, \"Memory in Mb\": 0.9862403869628906, \"Time in s\": 720.8979669999999 }, { \"step\": 33522, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8274216163002297, \"F1\": 0.7829675483023824, \"Memory in Mb\": 0.977802276611328, \"Time in s\": 759.8188009999999 }, { \"step\": 34428, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8249339181456415, \"F1\": 0.7797229633419832, \"Memory in Mb\": 1.1589393615722656, \"Time in s\": 799.8066919999999 }, { \"step\": 35334, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8250643873998811, \"F1\": 0.7787838660033642, \"Memory in Mb\": 1.467632293701172, \"Time in s\": 840.8572599999999 }, { \"step\": 36240, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8262093324870995, \"F1\": 0.7784422711602055, \"Memory in Mb\": 1.2339591979980469, \"Time in s\": 882.9239929999999 }, { \"step\": 37146, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8240409207161126, \"F1\": 0.7737625475943233, \"Memory in Mb\": 1.2780303955078125, \"Time in s\": 926.004901 }, { \"step\": 38052, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8238679666763029, \"F1\": 0.7722731906218145, \"Memory in Mb\": 1.326324462890625, \"Time in s\": 970.056117 }, { \"step\": 38958, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8231383320070847, \"F1\": 0.7715365740433715, \"Memory in Mb\": 1.127643585205078, \"Time in s\": 1015.162858 }, { \"step\": 39864, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8224920352206306, \"F1\": 0.7721975404030647, \"Memory in Mb\": 0.9921760559082032, \"Time in s\": 1061.343491 }, { \"step\": 40770, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8226348451028969, \"F1\": 0.774362654850688, \"Memory in Mb\": 0.6841468811035156, \"Time in s\": 1108.5828599999998 }, { \"step\": 41676, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8231073785242952, \"F1\": 0.7766331353775299, \"Memory in Mb\": 0.6752357482910156, \"Time in s\": 1156.8826949999998 }, { \"step\": 42582, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8236537422794205, \"F1\": 0.7780569266692283, \"Memory in Mb\": 0.8912887573242188, \"Time in s\": 1206.175932 }, { \"step\": 43488, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8242693218663049, \"F1\": 0.7790690951141949, \"Memory in Mb\": 0.8946151733398438, \"Time in s\": 1256.4285579999998 }, { \"step\": 44394, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8235082107539476, \"F1\": 0.7769013924086677, \"Memory in Mb\": 0.9767723083496094, \"Time in s\": 1307.6797829999998 }, { \"step\": 45300, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.823285282235811, \"F1\": 0.7772366773340753, \"Memory in Mb\": 0.7331352233886719, \"Time in s\": 1359.9622719999998 }, { \"step\": 25, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.7083333333333334, \"F1\": 0.7407407407407408, \"Memory in Mb\": 0.6845798492431641, \"Time in s\": 0.140915 }, { \"step\": 50, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8163265306122449, \"F1\": 0.8085106382978724, \"Memory in Mb\": 0.6852588653564453, \"Time in s\": 0.368444 }, { \"step\": 75, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8513513513513513, \"F1\": 0.8493150684931507, \"Memory in Mb\": 0.6852130889892578, \"Time in s\": 0.67832 }, { \"step\": 100, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8585858585858586, \"F1\": 0.8541666666666666, \"Memory in Mb\": 0.6858234405517578, \"Time in s\": 1.071639 }, { \"step\": 125, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8548387096774194, \"F1\": 0.85, \"Memory in Mb\": 0.6858234405517578, \"Time in s\": 1.547289 }, { \"step\": 150, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8523489932885906, \"F1\": 0.8533333333333335, \"Memory in Mb\": 0.6858234405517578, \"Time in s\": 2.105688 }, { \"step\": 175, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8620689655172413, \"F1\": 0.8536585365853658, \"Memory in Mb\": 0.6864566802978516, \"Time in s\": 2.749261 }, { \"step\": 200, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8592964824120602, \"F1\": 0.8510638297872339, \"Memory in Mb\": 0.6865940093994141, \"Time in s\": 3.487233 }, { \"step\": 225, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8526785714285714, \"F1\": 0.8405797101449276, \"Memory in Mb\": 0.7248620986938477, \"Time in s\": 4.314044 }, { \"step\": 250, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8473895582329317, \"F1\": 0.8347826086956521, \"Memory in Mb\": 0.7525568008422852, \"Time in s\": 5.225865 }, { \"step\": 275, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8467153284671532, \"F1\": 0.8333333333333335, \"Memory in Mb\": 0.7526025772094727, \"Time in s\": 6.222143 }, { \"step\": 300, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8528428093645485, \"F1\": 0.837037037037037, \"Memory in Mb\": 0.7526025772094727, \"Time in s\": 7.300394 }, { \"step\": 325, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8611111111111112, \"F1\": 0.8421052631578947, \"Memory in Mb\": 0.7532129287719727, \"Time in s\": 8.460934 }, { \"step\": 350, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8653295128939829, \"F1\": 0.8438538205980067, \"Memory in Mb\": 0.7532358169555664, \"Time in s\": 9.705145 }, { \"step\": 375, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8663101604278075, \"F1\": 0.8427672955974843, \"Memory in Mb\": 0.7908792495727539, \"Time in s\": 11.033679 }, { \"step\": 400, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8671679197994987, \"F1\": 0.8417910447761194, \"Memory in Mb\": 0.8290948867797852, \"Time in s\": 12.4564 }, { \"step\": 425, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8679245283018868, \"F1\": 0.839080459770115, \"Memory in Mb\": 0.8842554092407227, \"Time in s\": 13.968094999999998 }, { \"step\": 450, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8708240534521158, \"F1\": 0.8406593406593408, \"Memory in Mb\": 0.8843240737915039, \"Time in s\": 15.559030999999996 }, { \"step\": 475, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.869198312236287, \"F1\": 0.8402061855670103, \"Memory in Mb\": 0.8843927383422852, \"Time in s\": 17.224721999999996 }, { \"step\": 500, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8677354709418837, \"F1\": 0.8413461538461539, \"Memory in Mb\": 0.8844156265258789, \"Time in s\": 18.966559 }, { \"step\": 525, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8683206106870229, \"F1\": 0.8384074941451991, \"Memory in Mb\": 0.8844156265258789, \"Time in s\": 20.785336 }, { \"step\": 550, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8670309653916212, \"F1\": 0.8381374722838136, \"Memory in Mb\": 0.8844614028930664, \"Time in s\": 22.682567 }, { \"step\": 575, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.867595818815331, \"F1\": 0.8382978723404255, \"Memory in Mb\": 0.8844614028930664, \"Time in s\": 24.656325 }, { \"step\": 600, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8697829716193656, \"F1\": 0.8381742738589212, \"Memory in Mb\": 0.8844614028930664, \"Time in s\": 26.722162 }, { \"step\": 625, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8717948717948718, \"F1\": 0.8373983739837398, \"Memory in Mb\": 0.9222650527954102, \"Time in s\": 28.872863 }, { \"step\": 650, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8767334360554699, \"F1\": 0.846153846153846, \"Memory in Mb\": 0.9229669570922852, \"Time in s\": 31.097205 }, { \"step\": 675, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8753709198813057, \"F1\": 0.8478260869565216, \"Memory in Mb\": 0.9505243301391602, \"Time in s\": 33.398985 }, { \"step\": 700, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8798283261802575, \"F1\": 0.8515901060070671, \"Memory in Mb\": 0.8879518508911133, \"Time in s\": 35.778321 }, { \"step\": 725, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8825966850828729, \"F1\": 0.8576214405360134, \"Memory in Mb\": 0.9880342483520508, \"Time in s\": 38.239126 }, { \"step\": 750, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8865153538050734, \"F1\": 0.8631239935587761, \"Memory in Mb\": 1.0254030227661133, \"Time in s\": 40.785592 }, { \"step\": 775, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8875968992248062, \"F1\": 0.863849765258216, \"Memory in Mb\": 1.0804262161254885, \"Time in s\": 43.415615 }, { \"step\": 800, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8873591989987485, \"F1\": 0.8652694610778443, \"Memory in Mb\": 1.1831789016723633, \"Time in s\": 46.140861 }, { \"step\": 825, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8871359223300971, \"F1\": 0.8661870503597122, \"Memory in Mb\": 1.1837968826293943, \"Time in s\": 48.939828 }, { \"step\": 850, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8881036513545347, \"F1\": 0.8671328671328671, \"Memory in Mb\": 1.1943635940551758, \"Time in s\": 51.816843 }, { \"step\": 875, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8901601830663616, \"F1\": 0.8688524590163934, \"Memory in Mb\": 1.2218294143676758, \"Time in s\": 54.767134 }, { \"step\": 900, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8887652947719689, \"F1\": 0.8670212765957446, \"Memory in Mb\": 1.2768526077270508, \"Time in s\": 57.795454 }, { \"step\": 925, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8896103896103896, \"F1\": 0.8695652173913043, \"Memory in Mb\": 1.2769441604614258, \"Time in s\": 60.902165 }, { \"step\": 950, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8893572181243414, \"F1\": 0.8708487084870848, \"Memory in Mb\": 1.2769899368286133, \"Time in s\": 64.084493 }, { \"step\": 975, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8901437371663244, \"F1\": 0.8718562874251498, \"Memory in Mb\": 1.2770357131958008, \"Time in s\": 67.34321899999999 }, { \"step\": 1000, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8878878878878879, \"F1\": 0.8697674418604652, \"Memory in Mb\": 1.277012825012207, \"Time in s\": 70.663215 }, { \"step\": 1025, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8876953125, \"F1\": 0.8700564971751412, \"Memory in Mb\": 1.2770586013793943, \"Time in s\": 74.022235 }, { \"step\": 1050, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8894184938036225, \"F1\": 0.8725274725274725, \"Memory in Mb\": 1.2770357131958008, \"Time in s\": 77.421815 }, { \"step\": 1075, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8901303538175046, \"F1\": 0.8742004264392325, \"Memory in Mb\": 1.2770357131958008, \"Time in s\": 80.860683 }, { \"step\": 1100, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.89171974522293, \"F1\": 0.8761706555671176, \"Memory in Mb\": 1.2770357131958008, \"Time in s\": 84.339469 }, { \"step\": 1125, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8932384341637011, \"F1\": 0.8790322580645162, \"Memory in Mb\": 1.2770357131958008, \"Time in s\": 87.855325 }, { \"step\": 1150, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8938207136640557, \"F1\": 0.8794466403162056, \"Memory in Mb\": 1.2770357131958008, \"Time in s\": 91.408321 }, { \"step\": 1175, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8926746166950597, \"F1\": 0.877906976744186, \"Memory in Mb\": 1.2770357131958008, \"Time in s\": 95.001152 }, { \"step\": 1200, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8932443703085905, \"F1\": 0.8783269961977186, \"Memory in Mb\": 1.2872819900512695, \"Time in s\": 98.634357 }, { \"step\": 1225, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8929738562091504, \"F1\": 0.8779123951537745, \"Memory in Mb\": 1.3422365188598633, \"Time in s\": 102.306286 }, { \"step\": 1250, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8935148118494796, \"F1\": 0.8792007266121706, \"Memory in Mb\": 1.3423280715942385, \"Time in s\": 106.015532 }, { \"step\": 1903, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.1738767623901367, \"Time in s\": 1.523397 }, { \"step\": 3806, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.1744871139526367, \"Time in s\": 4.675035 }, { \"step\": 5709, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.1750288009643554, \"Time in s\": 8.984261 }, { \"step\": 7612, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.1750516891479492, \"Time in s\": 14.031006 }, { \"step\": 9515, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.1750516891479492, \"Time in s\": 19.82114 }, { \"step\": 11418, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.1756620407104492, \"Time in s\": 26.34929 }, { \"step\": 13321, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.1756849288940429, \"Time in s\": 33.615204 }, { \"step\": 15224, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999343099257702, \"F1\": 0.1666666666666666, \"Memory in Mb\": 0.4191761016845703, \"Time in s\": 41.690009 }, { \"step\": 17127, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9992993109891392, \"F1\": 0.1428571428571428, \"Memory in Mb\": 0.4156208038330078, \"Time in s\": 50.773074 }, { \"step\": 19030, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999369383572442, \"F1\": 0.1428571428571428, \"Memory in Mb\": 0.3918170928955078, \"Time in s\": 60.837183 }, { \"step\": 20933, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9994267150773934, \"F1\": 0.1428571428571428, \"Memory in Mb\": 0.392496109008789, \"Time in s\": 71.877708 }, { \"step\": 22836, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999474490913072, \"F1\": 0.1428571428571428, \"Memory in Mb\": 0.3925189971923828, \"Time in s\": 83.900187 }, { \"step\": 24739, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9995149163230658, \"F1\": 0.1428571428571428, \"Memory in Mb\": 0.4046955108642578, \"Time in s\": 96.90004 }, { \"step\": 26642, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9995495664577156, \"F1\": 0.25, \"Memory in Mb\": 0.421091079711914, \"Time in s\": 110.876026 }, { \"step\": 28545, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999579596412556, \"F1\": 0.25, \"Memory in Mb\": 0.4210453033447265, \"Time in s\": 125.82831600000002 }, { \"step\": 30448, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996058724997536, \"F1\": 0.25, \"Memory in Mb\": 0.4210681915283203, \"Time in s\": 141.756728 }, { \"step\": 32351, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999629057187017, \"F1\": 0.25, \"Memory in Mb\": 0.433267593383789, \"Time in s\": 158.664658 }, { \"step\": 34254, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996496657227104, \"F1\": 0.25, \"Memory in Mb\": 0.4455127716064453, \"Time in s\": 176.542575 }, { \"step\": 36157, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996681048788584, \"F1\": 0.25, \"Memory in Mb\": 0.445535659790039, \"Time in s\": 195.393511 }, { \"step\": 38060, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996847000709423, \"F1\": 0.25, \"Memory in Mb\": 0.4455127716064453, \"Time in s\": 215.224785 }, { \"step\": 39963, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996997147289926, \"F1\": 0.25, \"Memory in Mb\": 0.4455127716064453, \"Time in s\": 236.028977 }, { \"step\": 41866, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997133643855248, \"F1\": 0.25, \"Memory in Mb\": 0.4461231231689453, \"Time in s\": 257.804564 }, { \"step\": 43769, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997258270882836, \"F1\": 0.25, \"Memory in Mb\": 0.4461002349853515, \"Time in s\": 280.547182 }, { \"step\": 45672, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997372512097392, \"F1\": 0.25, \"Memory in Mb\": 0.4461002349853515, \"Time in s\": 304.26359 }, { \"step\": 47575, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997477613822676, \"F1\": 0.25, \"Memory in Mb\": 0.4643878936767578, \"Time in s\": 328.95641 }, { \"step\": 49478, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997574630636458, \"F1\": 0.25, \"Memory in Mb\": 0.4765186309814453, \"Time in s\": 354.62865 }, { \"step\": 51381, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997469832619696, \"F1\": 0.3157894736842105, \"Memory in Mb\": 0.5477771759033203, \"Time in s\": 381.285293 }, { \"step\": 53284, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999756019743633, \"F1\": 0.3157894736842105, \"Memory in Mb\": 0.5477771759033203, \"Time in s\": 408.929269 }, { \"step\": 55187, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997644330083716, \"F1\": 0.3157894736842105, \"Memory in Mb\": 0.5598621368408203, \"Time in s\": 437.552385 }, { \"step\": 57090, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996321533044896, \"F1\": 0.3225806451612903, \"Memory in Mb\": 0.9146518707275392, \"Time in s\": 467.220958 }, { \"step\": 58993, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996440195280716, \"F1\": 0.3225806451612903, \"Memory in Mb\": 0.9280223846435548, \"Time in s\": 497.92269099999993 }, { \"step\": 60896, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996551441005008, \"F1\": 0.3225806451612903, \"Memory in Mb\": 0.9402217864990234, \"Time in s\": 529.6450219999999 }, { \"step\": 62799, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996337462976528, \"F1\": 0.303030303030303, \"Memory in Mb\": 1.022481918334961, \"Time in s\": 562.409421 }, { \"step\": 64702, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996445186318604, \"F1\": 0.303030303030303, \"Memory in Mb\": 1.0225505828857422, \"Time in s\": 596.2119009999999 }, { \"step\": 66605, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996546753948712, \"F1\": 0.303030303030303, \"Memory in Mb\": 1.0225963592529297, \"Time in s\": 631.0619619999999 }, { \"step\": 68508, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996642678850336, \"F1\": 0.3783783783783784, \"Memory in Mb\": 1.036111831665039, \"Time in s\": 666.9514569999999 }, { \"step\": 70411, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.99967334185485, \"F1\": 0.3783783783783784, \"Memory in Mb\": 1.0532817840576172, \"Time in s\": 703.8889889999999 }, { \"step\": 72314, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996819382407036, \"F1\": 0.3783783783783784, \"Memory in Mb\": 1.053213119506836, \"Time in s\": 741.862274 }, { \"step\": 74217, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996900937803168, \"F1\": 0.3783783783783784, \"Memory in Mb\": 1.0532588958740234, \"Time in s\": 780.872663 }, { \"step\": 76120, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996978415375924, \"F1\": 0.3783783783783784, \"Memory in Mb\": 1.0533504486083984, \"Time in s\": 820.917591 }, { \"step\": 78023, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997052113506448, \"F1\": 0.3783783783783784, \"Memory in Mb\": 1.053327560424805, \"Time in s\": 862.00148 }, { \"step\": 79926, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997122302158272, \"F1\": 0.3783783783783784, \"Memory in Mb\": 1.0532817840576172, \"Time in s\": 904.126635 }, { \"step\": 81829, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997189226181749, \"F1\": 0.3783783783783784, \"Memory in Mb\": 1.053327560424805, \"Time in s\": 947.288015 }, { \"step\": 83732, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997253108167824, \"F1\": 0.3783783783783784, \"Memory in Mb\": 1.053937911987305, \"Time in s\": 991.476835 }, { \"step\": 85635, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997314150921364, \"F1\": 0.3783783783783784, \"Memory in Mb\": 1.0626277923583984, \"Time in s\": 1036.7058969999998 }, { \"step\": 87538, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997372539611822, \"F1\": 0.3783783783783784, \"Memory in Mb\": 1.062604904174805, \"Time in s\": 1082.9660009999998 }, { \"step\": 89441, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999731663685152, \"F1\": 0.3684210526315789, \"Memory in Mb\": 1.0989017486572266, \"Time in s\": 1130.2730419999998 }, { \"step\": 91344, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997372540862464, \"F1\": 0.3684210526315789, \"Memory in Mb\": 1.0989704132080078, \"Time in s\": 1178.6145779999997 }, { \"step\": 93247, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997426163052572, \"F1\": 0.3684210526315789, \"Memory in Mb\": 1.0987415313720703, \"Time in s\": 1227.9921859999995 }, { \"step\": 95150, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997477640332532, \"F1\": 0.3684210526315789, \"Memory in Mb\": 1.0987186431884766, \"Time in s\": 1278.4102429999998 }, { \"step\": 106, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5523809523809524, \"F1\": 0.5252525252525252, \"Memory in Mb\": 0.1770515441894531, \"Time in s\": 0.137989 }, { \"step\": 212, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5829383886255924, \"F1\": 0.5555555555555555, \"Memory in Mb\": 0.1772575378417968, \"Time in s\": 0.392857 }, { \"step\": 318, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6025236593059937, \"F1\": 0.5827814569536425, \"Memory in Mb\": 0.1773033142089843, \"Time in s\": 0.757641 }, { \"step\": 424, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6099290780141844, \"F1\": 0.5758354755784061, \"Memory in Mb\": 0.1773033142089843, \"Time in s\": 1.237112 }, { \"step\": 530, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5841209829867675, \"F1\": 0.5089285714285714, \"Memory in Mb\": 0.1772575378417968, \"Time in s\": 1.823779 }, { \"step\": 636, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5748031496062992, \"F1\": 0.4981412639405205, \"Memory in Mb\": 0.1773033142089843, \"Time in s\": 2.523282 }, { \"step\": 742, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Bananas\", \"Accuracy\": 0.582995951417004, \"F1\": 0.4892561983471074, \"Memory in Mb\": 0.1772346496582031, \"Time in s\": 3.326693 }, { \"step\": 848, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5749704840613932, \"F1\": 0.4812680115273775, \"Memory in Mb\": 0.1771888732910156, \"Time in s\": 4.242125 }, { \"step\": 954, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5760755508919203, \"F1\": 0.482051282051282, \"Memory in Mb\": 0.1771888732910156, \"Time in s\": 5.269066 }, { \"step\": 1060, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5873465533522191, \"F1\": 0.4828402366863905, \"Memory in Mb\": 0.1771888732910156, \"Time in s\": 6.408509 }, { \"step\": 1166, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5931330472103005, \"F1\": 0.4925053533190577, \"Memory in Mb\": 0.1772346496582031, \"Time in s\": 7.660058999999999 }, { \"step\": 1272, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5979543666404405, \"F1\": 0.5034013605442177, \"Memory in Mb\": 0.1772346496582031, \"Time in s\": 9.024008 }, { \"step\": 1378, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6005809731299927, \"F1\": 0.4990892531876139, \"Memory in Mb\": 0.1772346496582031, \"Time in s\": 10.499633 }, { \"step\": 1484, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6089008766014835, \"F1\": 0.5117845117845117, \"Memory in Mb\": 0.1772346496582031, \"Time in s\": 12.08531 }, { \"step\": 1590, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6091881686595343, \"F1\": 0.5121759622937941, \"Memory in Mb\": 0.1772346496582031, \"Time in s\": 13.781775 }, { \"step\": 1696, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6135693215339233, \"F1\": 0.5194424064563462, \"Memory in Mb\": 0.1772346496582031, \"Time in s\": 15.591862 }, { \"step\": 1802, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6185452526374237, \"F1\": 0.5354969574036511, \"Memory in Mb\": 0.1772346496582031, \"Time in s\": 17.520521 }, { \"step\": 1908, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6208704771893025, \"F1\": 0.5467084639498432, \"Memory in Mb\": 0.1772575378417968, \"Time in s\": 19.574794 }, { \"step\": 2014, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Bananas\", \"Accuracy\": 0.620963735717834, \"F1\": 0.5561372891215823, \"Memory in Mb\": 0.1772804260253906, \"Time in s\": 21.756325 }, { \"step\": 2120, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6252949504483247, \"F1\": 0.56941431670282, \"Memory in Mb\": 0.1772804260253906, \"Time in s\": 24.064421 }, { \"step\": 2226, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6242696629213483, \"F1\": 0.5721596724667348, \"Memory in Mb\": 0.1773033142089843, \"Time in s\": 26.498977 }, { \"step\": 2332, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6229086229086229, \"F1\": 0.5763855421686748, \"Memory in Mb\": 0.1773033142089843, \"Time in s\": 29.055623 }, { \"step\": 2438, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Bananas\", \"Accuracy\": 0.62330734509643, \"F1\": 0.5796703296703297, \"Memory in Mb\": 0.1773033142089843, \"Time in s\": 31.73345 }, { \"step\": 2544, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6244593000393236, \"F1\": 0.5860424794104898, \"Memory in Mb\": 0.1773033142089843, \"Time in s\": 34.533392 }, { \"step\": 2650, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6266515666289165, \"F1\": 0.591828312009905, \"Memory in Mb\": 0.1773490905761718, \"Time in s\": 37.454912 }, { \"step\": 2756, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6250453720508167, \"F1\": 0.5921831819976313, \"Memory in Mb\": 0.1773490905761718, \"Time in s\": 40.501518 }, { \"step\": 2862, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6249563089828731, \"F1\": 0.5927893738140417, \"Memory in Mb\": 0.1773490905761718, \"Time in s\": 43.671228 }, { \"step\": 2968, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6248736097067745, \"F1\": 0.5924569754668619, \"Memory in Mb\": 0.1773490905761718, \"Time in s\": 46.964135 }, { \"step\": 3074, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6260982753010088, \"F1\": 0.5958494548012664, \"Memory in Mb\": 0.1773490905761718, \"Time in s\": 50.377785 }, { \"step\": 3180, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Bananas\", \"Accuracy\": 0.62378106322743, \"F1\": 0.5934738273283481, \"Memory in Mb\": 0.1645193099975586, \"Time in s\": 53.91364 }, { \"step\": 3286, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6246575342465753, \"F1\": 0.5937397034596376, \"Memory in Mb\": 0.2085180282592773, \"Time in s\": 57.57642799999999 }, { \"step\": 3392, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6234149218519611, \"F1\": 0.5931825422108953, \"Memory in Mb\": 0.2435979843139648, \"Time in s\": 61.313442 }, { \"step\": 3498, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6211038032599371, \"F1\": 0.5894019212891229, \"Memory in Mb\": 0.2788152694702148, \"Time in s\": 65.12271799999999 }, { \"step\": 3604, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6194837635303914, \"F1\": 0.5866747060596926, \"Memory in Mb\": 0.3256673812866211, \"Time in s\": 69.00323599999999 }, { \"step\": 3710, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6238878403882449, \"F1\": 0.5915080527086384, \"Memory in Mb\": 0.3337392807006836, \"Time in s\": 72.95431899999998 }, { \"step\": 3816, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6277850589777195, \"F1\": 0.5970488081725313, \"Memory in Mb\": 0.3397665023803711, \"Time in s\": 76.97715599999998 }, { \"step\": 3922, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6322366743177761, \"F1\": 0.6009961261759823, \"Memory in Mb\": 0.3677358627319336, \"Time in s\": 81.06970999999999 }, { \"step\": 4028, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6354606406754407, \"F1\": 0.6034575904916262, \"Memory in Mb\": 0.3820409774780273, \"Time in s\": 85.23008099999998 }, { \"step\": 4134, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6399709654004355, \"F1\": 0.6073878627968339, \"Memory in Mb\": 0.3902044296264648, \"Time in s\": 89.45861099999999 }, { \"step\": 4240, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Bananas\", \"Accuracy\": 0.644963434772352, \"F1\": 0.6130110568269478, \"Memory in Mb\": 0.3902273178100586, \"Time in s\": 93.754136 }, { \"step\": 4346, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6508630609896433, \"F1\": 0.6185567010309279, \"Memory in Mb\": 0.3902502059936523, \"Time in s\": 98.118583 }, { \"step\": 4452, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6535609975286453, \"F1\": 0.620384047267356, \"Memory in Mb\": 0.3902044296264648, \"Time in s\": 102.55151499999998 }, { \"step\": 4558, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6570111915734036, \"F1\": 0.6243691420331651, \"Memory in Mb\": 0.3903875350952148, \"Time in s\": 107.05130299999998 }, { \"step\": 4664, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6607334334119666, \"F1\": 0.6288127639605818, \"Memory in Mb\": 0.3904333114624023, \"Time in s\": 111.61866799999996 }, { \"step\": 4770, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6630320821975257, \"F1\": 0.6303197607545433, \"Memory in Mb\": 0.4466238021850586, \"Time in s\": 116.25797499999996 }, { \"step\": 4876, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6670769230769231, \"F1\": 0.6330544879041374, \"Memory in Mb\": 0.4547872543334961, \"Time in s\": 120.96504799999995 }, { \"step\": 4982, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6707488456133307, \"F1\": 0.6378091872791519, \"Memory in Mb\": 0.4610433578491211, \"Time in s\": 125.74414799999995 }, { \"step\": 5088, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6734814232356988, \"F1\": 0.6407094959982694, \"Memory in Mb\": 0.4671621322631836, \"Time in s\": 130.59500899999998 }, { \"step\": 5194, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Bananas\", \"Accuracy\": 0.674369343346813, \"F1\": 0.6412051771695311, \"Memory in Mb\": 0.4671392440795898, \"Time in s\": 135.51973499999997 }, { \"step\": 5300, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6778637478769579, \"F1\": 0.64504054897068, \"Memory in Mb\": 0.4684514999389648, \"Time in s\": 140.51779399999998 }, { \"step\": 906, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9337016574585636, \"F1\": 0.933184855233853, \"Memory in Mb\": 1.4600162506103516, \"Time in s\": 2.214325 }, { \"step\": 1812, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Elec2\", \"Accuracy\": 0.946990612921038, \"F1\": 0.9351351351351352, \"Memory in Mb\": 2.0955753326416016, \"Time in s\": 6.240333 }, { \"step\": 2718, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9370629370629372, \"F1\": 0.9227990970654628, \"Memory in Mb\": 2.479440689086914, \"Time in s\": 11.722802 }, { \"step\": 3624, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9343085840463704, \"F1\": 0.9197031039136304, \"Memory in Mb\": 2.829832077026367, \"Time in s\": 18.401477 }, { \"step\": 4530, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9322146169132258, \"F1\": 0.9140778057654632, \"Memory in Mb\": 3.4876842498779297, \"Time in s\": 26.240317 }, { \"step\": 5436, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9262189512419504, \"F1\": 0.9061988304093568, \"Memory in Mb\": 3.730012893676758, \"Time in s\": 35.261976000000004 }, { \"step\": 6342, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Elec2\", \"Accuracy\": 0.92524838353572, \"F1\": 0.9056904098686828, \"Memory in Mb\": 4.267904281616211, \"Time in s\": 45.380796 }, { \"step\": 7248, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Elec2\", \"Accuracy\": 0.922174692976404, \"F1\": 0.9017079121645174, \"Memory in Mb\": 4.513330459594727, \"Time in s\": 56.606028 }, { \"step\": 8154, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9222372132957194, \"F1\": 0.906351550960118, \"Memory in Mb\": 4.528413772583008, \"Time in s\": 68.87909400000001 }, { \"step\": 9060, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9222872281708798, \"F1\": 0.9083810515356586, \"Memory in Mb\": 4.871156692504883, \"Time in s\": 82.16100100000001 }, { \"step\": 9966, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9173105870546914, \"F1\": 0.904252846851034, \"Memory in Mb\": 4.923883438110352, \"Time in s\": 96.59034200000002 }, { \"step\": 10872, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9180388188759084, \"F1\": 0.9064370471490076, \"Memory in Mb\": 5.256982803344727, \"Time in s\": 112.03179200000002 }, { \"step\": 11778, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Elec2\", \"Accuracy\": 0.913645240723444, \"F1\": 0.9013100436681224, \"Memory in Mb\": 5.652528762817383, \"Time in s\": 128.76319300000003 }, { \"step\": 12684, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9114562800599229, \"F1\": 0.8990743237170845, \"Memory in Mb\": 6.184247970581055, \"Time in s\": 146.63877800000003 }, { \"step\": 13590, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Elec2\", \"Accuracy\": 0.911693281330488, \"F1\": 0.9004810084591143, \"Memory in Mb\": 6.168107986450195, \"Time in s\": 165.54320300000003 }, { \"step\": 14496, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Elec2\", \"Accuracy\": 0.90845119006554, \"F1\": 0.8974893781382773, \"Memory in Mb\": 6.370748519897461, \"Time in s\": 185.57117500000004 }, { \"step\": 15402, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9089669501980392, \"F1\": 0.8977090325404932, \"Memory in Mb\": 6.689512252807617, \"Time in s\": 206.60419600000003 }, { \"step\": 16308, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Elec2\", \"Accuracy\": 0.907033789170295, \"F1\": 0.8952315134761576, \"Memory in Mb\": 7.013689041137695, \"Time in s\": 228.760607 }, { \"step\": 17214, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9056527043513624, \"F1\": 0.8922362309223624, \"Memory in Mb\": 7.149255752563477, \"Time in s\": 252.05960700000003 }, { \"step\": 18120, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9003256250344942, \"F1\": 0.8855368234250223, \"Memory in Mb\": 7.591207504272461, \"Time in s\": 276.72553400000004 }, { \"step\": 19026, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Elec2\", \"Accuracy\": 0.901287779237845, \"F1\": 0.8854738382729601, \"Memory in Mb\": 7.899053573608398, \"Time in s\": 302.410993 }, { \"step\": 19932, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9013596909337214, \"F1\": 0.8864633864633865, \"Memory in Mb\": 8.218050003051758, \"Time in s\": 329.267044 }, { \"step\": 20838, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8999376109804674, \"F1\": 0.8850669753596825, \"Memory in Mb\": 8.369176864624023, \"Time in s\": 357.490064 }, { \"step\": 21744, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8988640022076071, \"F1\": 0.8821227552934869, \"Memory in Mb\": 8.549039840698242, \"Time in s\": 386.978862 }, { \"step\": 22650, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8967283323767054, \"F1\": 0.8787517495205017, \"Memory in Mb\": 8.670183181762695, \"Time in s\": 417.760723 }, { \"step\": 23556, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8951814901294842, \"F1\": 0.8767287433221829, \"Memory in Mb\": 8.96574592590332, \"Time in s\": 449.897837 }, { \"step\": 24462, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8929724868157475, \"F1\": 0.8734287371881647, \"Memory in Mb\": 9.238008499145508, \"Time in s\": 483.328506 }, { \"step\": 25368, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8902905349469784, \"F1\": 0.8701535016096672, \"Memory in Mb\": 9.398244857788086, \"Time in s\": 518.131339 }, { \"step\": 26274, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8890115327522552, \"F1\": 0.8681497558328811, \"Memory in Mb\": 9.500497817993164, \"Time in s\": 554.2859920000001 }, { \"step\": 27180, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8889215938776261, \"F1\": 0.8685734186583084, \"Memory in Mb\": 9.676286697387695, \"Time in s\": 591.703834 }, { \"step\": 28086, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Elec2\", \"Accuracy\": 0.886629873598006, \"F1\": 0.8655291832080412, \"Memory in Mb\": 9.93019676208496, \"Time in s\": 630.490308 }, { \"step\": 28992, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8864130247318133, \"F1\": 0.8647916238965305, \"Memory in Mb\": 10.322111129760742, \"Time in s\": 670.5577900000001 }, { \"step\": 29898, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Elec2\", \"Accuracy\": 0.885774492423989, \"F1\": 0.8637977106848004, \"Memory in Mb\": 10.777273178100586, \"Time in s\": 711.8606440000001 }, { \"step\": 30804, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Elec2\", \"Accuracy\": 0.885498165763075, \"F1\": 0.8626931911083429, \"Memory in Mb\": 10.977670669555664, \"Time in s\": 754.3481340000001 }, { \"step\": 31710, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8835661799489104, \"F1\": 0.8603736479842674, \"Memory in Mb\": 11.286626815795898, \"Time in s\": 798.241067 }, { \"step\": 32616, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Elec2\", \"Accuracy\": 0.882906638049977, \"F1\": 0.8599097611973149, \"Memory in Mb\": 11.60590934753418, \"Time in s\": 843.3988 }, { \"step\": 33522, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8826109006294561, \"F1\": 0.8598995976786413, \"Memory in Mb\": 11.809194564819336, \"Time in s\": 889.816371 }, { \"step\": 34428, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8817788363784239, \"F1\": 0.8588765603328711, \"Memory in Mb\": 11.96577262878418, \"Time in s\": 937.652835 }, { \"step\": 35334, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8808762346814593, \"F1\": 0.8571816361847239, \"Memory in Mb\": 12.22038459777832, \"Time in s\": 986.872877 }, { \"step\": 36240, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8797980076712933, \"F1\": 0.8550029958058717, \"Memory in Mb\": 12.516416549682615, \"Time in s\": 1037.336075 }, { \"step\": 37146, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8785839278503164, \"F1\": 0.8529891127192124, \"Memory in Mb\": 12.637868881225586, \"Time in s\": 1089.169624 }, { \"step\": 38052, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8770071745814827, \"F1\": 0.8506891271056662, \"Memory in Mb\": 13.111181259155272, \"Time in s\": 1142.33469 }, { \"step\": 38958, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8756064378673922, \"F1\": 0.8493440278554995, \"Memory in Mb\": 13.190984725952148, \"Time in s\": 1196.840346 }, { \"step\": 39864, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8754233248877406, \"F1\": 0.8501327860936744, \"Memory in Mb\": 13.482259750366213, \"Time in s\": 1252.532055 }, { \"step\": 40770, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8749049522921828, \"F1\": 0.8505713448578962, \"Memory in Mb\": 13.769769668579102, \"Time in s\": 1309.427191 }, { \"step\": 41676, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8752729454109178, \"F1\": 0.8519510111079466, \"Memory in Mb\": 13.892538070678713, \"Time in s\": 1367.440063 }, { \"step\": 42582, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8754139170052371, \"F1\": 0.8523148019264498, \"Memory in Mb\": 14.423887252807615, \"Time in s\": 1426.636647 }, { \"step\": 43488, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8745142226412491, \"F1\": 0.8513605534824177, \"Memory in Mb\": 14.559076309204102, \"Time in s\": 1487.084751 }, { \"step\": 44394, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8743270335413241, \"F1\": 0.8507211088218767, \"Memory in Mb\": 14.714178085327148, \"Time in s\": 1548.853591 }, { \"step\": 45300, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8751186560409722, \"F1\": 0.851922623877706, \"Memory in Mb\": 14.867197036743164, \"Time in s\": 1611.802241 }, { \"step\": 25, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Phishing\", \"Accuracy\": 0.6666666666666666, \"F1\": 0.7142857142857143, \"Memory in Mb\": 0.6709518432617188, \"Time in s\": 0.096996 }, { \"step\": 50, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Phishing\", \"Accuracy\": 0.7551020408163265, \"F1\": 0.7391304347826088, \"Memory in Mb\": 0.671112060546875, \"Time in s\": 0.227732 }, { \"step\": 75, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Phishing\", \"Accuracy\": 0.7972972972972973, \"F1\": 0.7945205479452055, \"Memory in Mb\": 0.6711349487304688, \"Time in s\": 0.390548 }, { \"step\": 100, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8080808080808081, \"F1\": 0.7999999999999999, \"Memory in Mb\": 0.6711578369140625, \"Time in s\": 0.5919840000000001 }, { \"step\": 125, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8064516129032258, \"F1\": 0.8000000000000002, \"Memory in Mb\": 0.6711845397949219, \"Time in s\": 0.8358070000000001 }, { \"step\": 150, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8187919463087249, \"F1\": 0.8211920529801323, \"Memory in Mb\": 0.6711845397949219, \"Time in s\": 1.118507 }, { \"step\": 175, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8390804597701149, \"F1\": 0.8313253012048192, \"Memory in Mb\": 0.6711845397949219, \"Time in s\": 1.4342450000000002 }, { \"step\": 200, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8341708542713567, \"F1\": 0.8253968253968254, \"Memory in Mb\": 0.7092657089233398, \"Time in s\": 1.803702 }, { \"step\": 225, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8303571428571429, \"F1\": 0.8173076923076923, \"Memory in Mb\": 0.7094945907592773, \"Time in s\": 2.2161530000000003 }, { \"step\": 250, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8273092369477911, \"F1\": 0.8154506437768241, \"Memory in Mb\": 0.7095174789428711, \"Time in s\": 2.6652750000000003 }, { \"step\": 275, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8321167883211679, \"F1\": 0.8188976377952757, \"Memory in Mb\": 0.7095861434936523, \"Time in s\": 3.1578190000000004 }, { \"step\": 300, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8394648829431438, \"F1\": 0.823529411764706, \"Memory in Mb\": 0.7096090316772461, \"Time in s\": 3.688734 }, { \"step\": 325, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Phishing\", \"Accuracy\": 0.845679012345679, \"F1\": 0.8263888888888888, \"Memory in Mb\": 0.7096090316772461, \"Time in s\": 4.255895000000001 }, { \"step\": 350, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8510028653295129, \"F1\": 0.8289473684210527, \"Memory in Mb\": 0.7096090316772461, \"Time in s\": 4.8554520000000005 }, { \"step\": 375, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8502673796791443, \"F1\": 0.8260869565217391, \"Memory in Mb\": 0.7095823287963867, \"Time in s\": 5.4957590000000005 }, { \"step\": 400, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Phishing\", \"Accuracy\": 0.849624060150376, \"F1\": 0.8235294117647061, \"Memory in Mb\": 0.7096090316772461, \"Time in s\": 6.1750370000000006 }, { \"step\": 425, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8561320754716981, \"F1\": 0.8271954674220963, \"Memory in Mb\": 0.7096090316772461, \"Time in s\": 6.8921660000000005 }, { \"step\": 450, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8530066815144766, \"F1\": 0.8225806451612903, \"Memory in Mb\": 0.7096090316772461, \"Time in s\": 7.648219 }, { \"step\": 475, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8523206751054853, \"F1\": 0.8241206030150755, \"Memory in Mb\": 0.7096090316772461, \"Time in s\": 8.452871 }, { \"step\": 500, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8557114228456913, \"F1\": 0.8317757009345793, \"Memory in Mb\": 0.7096090316772461, \"Time in s\": 9.291999 }, { \"step\": 525, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8530534351145038, \"F1\": 0.8253968253968255, \"Memory in Mb\": 0.7096090316772461, \"Time in s\": 10.171949 }, { \"step\": 550, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8579234972677595, \"F1\": 0.832618025751073, \"Memory in Mb\": 0.7096090316772461, \"Time in s\": 11.089958 }, { \"step\": 575, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8588850174216028, \"F1\": 0.8336755646817249, \"Memory in Mb\": 0.7096090316772461, \"Time in s\": 12.050434 }, { \"step\": 600, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8631051752921536, \"F1\": 0.8360000000000001, \"Memory in Mb\": 0.7096090316772461, \"Time in s\": 13.044778 }, { \"step\": 625, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8621794871794872, \"F1\": 0.83203125, \"Memory in Mb\": 0.7096090316772461, \"Time in s\": 14.08632 }, { \"step\": 650, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8659476117103235, \"F1\": 0.8391866913123845, \"Memory in Mb\": 0.7096319198608398, \"Time in s\": 15.163796 }, { \"step\": 675, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8679525222551929, \"F1\": 0.8446771378708552, \"Memory in Mb\": 0.7096319198608398, \"Time in s\": 16.281047 }, { \"step\": 700, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8726752503576538, \"F1\": 0.848381601362862, \"Memory in Mb\": 0.7096319198608398, \"Time in s\": 17.426965000000003 }, { \"step\": 725, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8756906077348067, \"F1\": 0.8543689320388349, \"Memory in Mb\": 0.7096319198608398, \"Time in s\": 18.607993000000004 }, { \"step\": 750, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Phishing\", \"Accuracy\": 0.87716955941255, \"F1\": 0.8566978193146417, \"Memory in Mb\": 0.7096319198608398, \"Time in s\": 19.829159000000004 }, { \"step\": 775, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8785529715762274, \"F1\": 0.8575757575757577, \"Memory in Mb\": 0.7096319198608398, \"Time in s\": 21.096679000000005 }, { \"step\": 800, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8785982478097623, \"F1\": 0.8592162554426704, \"Memory in Mb\": 0.7489309310913086, \"Time in s\": 22.41538000000001 }, { \"step\": 825, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8798543689320388, \"F1\": 0.8619246861924686, \"Memory in Mb\": 0.7852392196655273, \"Time in s\": 23.779028000000007 }, { \"step\": 850, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8798586572438163, \"F1\": 0.8614130434782608, \"Memory in Mb\": 0.7852849960327148, \"Time in s\": 25.18231100000001 }, { \"step\": 875, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8787185354691075, \"F1\": 0.8594164456233422, \"Memory in Mb\": 0.7853536605834961, \"Time in s\": 26.619257000000005 }, { \"step\": 900, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8787541713014461, \"F1\": 0.8589909443725743, \"Memory in Mb\": 0.7853765487670898, \"Time in s\": 28.105865000000005 }, { \"step\": 925, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8809523809523809, \"F1\": 0.8628428927680798, \"Memory in Mb\": 0.7853765487670898, \"Time in s\": 29.627597000000005 }, { \"step\": 950, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8798735511064278, \"F1\": 0.8629807692307693, \"Memory in Mb\": 0.7853765487670898, \"Time in s\": 31.194797000000005 }, { \"step\": 975, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8819301848049281, \"F1\": 0.8651817116060961, \"Memory in Mb\": 0.7853765487670898, \"Time in s\": 32.795573000000005 }, { \"step\": 1000, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8828828828828829, \"F1\": 0.8662857142857143, \"Memory in Mb\": 0.7870550155639648, \"Time in s\": 34.450223 }, { \"step\": 1025, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8828125, \"F1\": 0.8666666666666666, \"Memory in Mb\": 0.8609609603881836, \"Time in s\": 36.14349 }, { \"step\": 1050, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8846520495710201, \"F1\": 0.8691891891891892, \"Memory in Mb\": 0.8610067367553711, \"Time in s\": 37.870798 }, { \"step\": 1075, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8836126629422719, \"F1\": 0.8691099476439791, \"Memory in Mb\": 0.8625936508178711, \"Time in s\": 39.648817 }, { \"step\": 1100, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8844404003639672, \"F1\": 0.8702757916241062, \"Memory in Mb\": 0.8627080917358398, \"Time in s\": 41.461037000000005 }, { \"step\": 1125, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8861209964412812, \"F1\": 0.8732673267326733, \"Memory in Mb\": 0.8989248275756836, \"Time in s\": 43.309543000000005 }, { \"step\": 1150, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8842471714534378, \"F1\": 0.8707482993197277, \"Memory in Mb\": 0.8989477157592773, \"Time in s\": 45.19996400000001 }, { \"step\": 1175, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8816013628620102, \"F1\": 0.8677450047573739, \"Memory in Mb\": 0.8990621566772461, \"Time in s\": 47.14264000000001 }, { \"step\": 1200, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8798999165971643, \"F1\": 0.8654205607476635, \"Memory in Mb\": 0.8990621566772461, \"Time in s\": 49.13049000000001 }, { \"step\": 1225, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Phishing\", \"Accuracy\": 0.880718954248366, \"F1\": 0.8660550458715598, \"Memory in Mb\": 0.8990850448608398, \"Time in s\": 51.16391500000001 }, { \"step\": 1250, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8783026421136909, \"F1\": 0.8635547576301617, \"Memory in Mb\": 0.8991079330444336, \"Time in s\": 53.25500000000001 }, { \"step\": 1903, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.1564722061157226, \"Time in s\": 0.853094 }, { \"step\": 3806, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.1565408706665039, \"Time in s\": 2.059998 }, { \"step\": 5709, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.1564950942993164, \"Time in s\": 3.616244 }, { \"step\": 7612, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.1565179824829101, \"Time in s\": 5.5300910000000005 }, { \"step\": 9515, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.1565179824829101, \"Time in s\": 7.803705000000001 }, { \"step\": 11418, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.1564950942993164, \"Time in s\": 10.437897 }, { \"step\": 13321, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.1565637588500976, \"Time in s\": 13.429757000000002 }, { \"step\": 15224, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996715496288512, \"F1\": 0.761904761904762, \"Memory in Mb\": 0.3805198669433594, \"Time in s\": 17.782309 }, { \"step\": 17127, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997080462454748, \"F1\": 0.8, \"Memory in Mb\": 0.3727455139160156, \"Time in s\": 22.84111 }, { \"step\": 19030, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997372431551842, \"F1\": 0.8, \"Memory in Mb\": 0.3727455139160156, \"Time in s\": 28.521633 }, { \"step\": 20933, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997611312822472, \"F1\": 0.8, \"Memory in Mb\": 0.3727455139160156, \"Time in s\": 34.826752 }, { \"step\": 22836, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997810378804468, \"F1\": 0.8, \"Memory in Mb\": 0.3727455139160156, \"Time in s\": 41.750952 }, { \"step\": 24739, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997978818012774, \"F1\": 0.8, \"Memory in Mb\": 0.3649024963378906, \"Time in s\": 49.282837 }, { \"step\": 26642, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998123193573816, \"F1\": 0.8148148148148148, \"Memory in Mb\": 0.4159393310546875, \"Time in s\": 57.542832 }, { \"step\": 28545, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998248318385652, \"F1\": 0.8148148148148148, \"Memory in Mb\": 0.4159393310546875, \"Time in s\": 66.418254 }, { \"step\": 30448, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998357802082308, \"F1\": 0.8148148148148148, \"Memory in Mb\": 0.4159393310546875, \"Time in s\": 75.902957 }, { \"step\": 32351, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998454404945905, \"F1\": 0.8148148148148148, \"Memory in Mb\": 0.425140380859375, \"Time in s\": 86.000011 }, { \"step\": 34254, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998540273844628, \"F1\": 0.8148148148148148, \"Memory in Mb\": 0.4250946044921875, \"Time in s\": 96.707721 }, { \"step\": 36157, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999861710366191, \"F1\": 0.8148148148148148, \"Memory in Mb\": 0.4250030517578125, \"Time in s\": 108.022851 }, { \"step\": 38060, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998686250295594, \"F1\": 0.8148148148148148, \"Memory in Mb\": 0.417205810546875, \"Time in s\": 119.949225 }, { \"step\": 39963, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998748811370802, \"F1\": 0.8148148148148148, \"Memory in Mb\": 0.417205810546875, \"Time in s\": 132.490409 }, { \"step\": 41866, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998805684939688, \"F1\": 0.8148148148148148, \"Memory in Mb\": 0.417205810546875, \"Time in s\": 145.647858 }, { \"step\": 43769, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998857612867847, \"F1\": 0.8148148148148148, \"Memory in Mb\": 0.4172286987304687, \"Time in s\": 159.41857399999998 }, { \"step\": 45672, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998905213373912, \"F1\": 0.8148148148148148, \"Memory in Mb\": 0.4172286987304687, \"Time in s\": 173.80738499999998 }, { \"step\": 47575, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998738806911338, \"F1\": 0.7857142857142857, \"Memory in Mb\": 0.4640121459960937, \"Time in s\": 189.004291 }, { \"step\": 49478, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998787315318228, \"F1\": 0.7857142857142857, \"Memory in Mb\": 0.4561920166015625, \"Time in s\": 204.850148 }, { \"step\": 51381, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998637602179836, \"F1\": 0.787878787878788, \"Memory in Mb\": 0.5220794677734375, \"Time in s\": 221.427526 }, { \"step\": 53284, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998686260158024, \"F1\": 0.787878787878788, \"Memory in Mb\": 0.5220794677734375, \"Time in s\": 238.605883 }, { \"step\": 55187, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998550356974596, \"F1\": 0.7647058823529411, \"Memory in Mb\": 0.56781005859375, \"Time in s\": 256.499342 }, { \"step\": 57090, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9995445707579392, \"F1\": 0.5666666666666667, \"Memory in Mb\": 1.0016555786132812, \"Time in s\": 277.003919 }, { \"step\": 58993, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9995592622728504, \"F1\": 0.5666666666666667, \"Memory in Mb\": 1.056976318359375, \"Time in s\": 298.475709 }, { \"step\": 60896, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999573035553001, \"F1\": 0.5666666666666667, \"Memory in Mb\": 1.0613327026367188, \"Time in s\": 320.766705 }, { \"step\": 62799, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9995382018535622, \"F1\": 0.5538461538461538, \"Memory in Mb\": 1.28204345703125, \"Time in s\": 344.04407 }, { \"step\": 64702, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9995517843619108, \"F1\": 0.5538461538461538, \"Memory in Mb\": 1.2822036743164062, \"Time in s\": 368.015979 }, { \"step\": 66605, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999549576602006, \"F1\": 0.5454545454545455, \"Memory in Mb\": 1.2944259643554688, \"Time in s\": 392.68268599999993 }, { \"step\": 68508, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999562088545696, \"F1\": 0.5714285714285714, \"Memory in Mb\": 1.2944488525390625, \"Time in s\": 418.04215199999993 }, { \"step\": 70411, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9995739241585002, \"F1\": 0.5714285714285714, \"Memory in Mb\": 1.2945404052734375, \"Time in s\": 444.072429 }, { \"step\": 72314, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999571308063557, \"F1\": 0.5633802816901408, \"Memory in Mb\": 1.2945404052734375, \"Time in s\": 470.781043 }, { \"step\": 74217, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9995823003126012, \"F1\": 0.5633802816901408, \"Memory in Mb\": 1.294586181640625, \"Time in s\": 498.253604 }, { \"step\": 76120, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9995927429419724, \"F1\": 0.5633802816901408, \"Memory in Mb\": 1.2867202758789062, \"Time in s\": 526.392846 }, { \"step\": 78023, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9995898592704622, \"F1\": 0.5555555555555556, \"Memory in Mb\": 1.2990570068359375, \"Time in s\": 555.302544 }, { \"step\": 79926, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9995996246481076, \"F1\": 0.5555555555555556, \"Memory in Mb\": 1.2990341186523438, \"Time in s\": 584.881396 }, { \"step\": 81829, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996089358165908, \"F1\": 0.5555555555555556, \"Memory in Mb\": 1.2991256713867188, \"Time in s\": 615.117937 }, { \"step\": 83732, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996178237450885, \"F1\": 0.5555555555555556, \"Memory in Mb\": 1.2991943359375, \"Time in s\": 646.0958009999999 }, { \"step\": 85635, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996263166499288, \"F1\": 0.5555555555555556, \"Memory in Mb\": 1.2991714477539062, \"Time in s\": 677.7414719999999 }, { \"step\": 87538, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996344402938186, \"F1\": 0.5555555555555556, \"Memory in Mb\": 1.2991714477539062, \"Time in s\": 710.050307 }, { \"step\": 89441, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996086762075134, \"F1\": 0.5333333333333333, \"Memory in Mb\": 1.4483489990234375, \"Time in s\": 743.2331419999999 }, { \"step\": 91344, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999616828875776, \"F1\": 0.5333333333333333, \"Memory in Mb\": 1.4483261108398438, \"Time in s\": 777.273881 }, { \"step\": 93247, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996139244578855, \"F1\": 0.5263157894736841, \"Memory in Mb\": 1.4622306823730469, \"Time in s\": 812.316683 }, { \"step\": 95150, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996216460498796, \"F1\": 0.5263157894736841, \"Memory in Mb\": 1.4664268493652344, \"Time in s\": 848.066126 }, { \"step\": 106, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.4857142857142857, \"F1\": 0.4599999999999999, \"Memory in Mb\": 0.2364511489868164, \"Time in s\": 0.160512 }, { \"step\": 212, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5165876777251185, \"F1\": 0.4574468085106383, \"Memory in Mb\": 0.2372446060180664, \"Time in s\": 0.465512 }, { \"step\": 318, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5205047318611987, \"F1\": 0.4722222222222222, \"Memory in Mb\": 0.2378625869750976, \"Time in s\": 0.912656 }, { \"step\": 424, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5460992907801419, \"F1\": 0.4838709677419355, \"Memory in Mb\": 0.2379693984985351, \"Time in s\": 1.508959 }, { \"step\": 530, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.55765595463138, \"F1\": 0.455813953488372, \"Memory in Mb\": 0.2379922866821289, \"Time in s\": 2.237684 }, { \"step\": 636, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5543307086614173, \"F1\": 0.4259634888438134, \"Memory in Mb\": 0.2384653091430664, \"Time in s\": 3.113606 }, { \"step\": 742, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5748987854251012, \"F1\": 0.4220183486238532, \"Memory in Mb\": 0.2386941909790039, \"Time in s\": 4.124442 }, { \"step\": 848, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5785123966942148, \"F1\": 0.4232633279483037, \"Memory in Mb\": 0.2386255264282226, \"Time in s\": 5.284904 }, { \"step\": 954, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5844700944386149, \"F1\": 0.4193548387096774, \"Memory in Mb\": 0.2386026382446289, \"Time in s\": 6.590759 }, { \"step\": 1060, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5920679886685553, \"F1\": 0.4146341463414634, \"Memory in Mb\": 0.2383584976196289, \"Time in s\": 8.045558 }, { \"step\": 1166, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.590557939914163, \"F1\": 0.4015056461731493, \"Memory in Mb\": 0.2384576797485351, \"Time in s\": 9.640389 }, { \"step\": 1272, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5971675845790716, \"F1\": 0.4101382488479262, \"Memory in Mb\": 0.2387628555297851, \"Time in s\": 11.37949 }, { \"step\": 1378, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.599128540305011, \"F1\": 0.3973799126637554, \"Memory in Mb\": 0.2390680313110351, \"Time in s\": 13.26876 }, { \"step\": 1484, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5994605529332434, \"F1\": 0.3926380368098159, \"Memory in Mb\": 0.2390222549438476, \"Time in s\": 15.320885 }, { \"step\": 1590, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5997482693517936, \"F1\": 0.3896353166986563, \"Memory in Mb\": 0.2389993667602539, \"Time in s\": 17.530882 }, { \"step\": 1696, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6011799410029498, \"F1\": 0.3876811594202898, \"Memory in Mb\": 0.2390604019165039, \"Time in s\": 19.899158 }, { \"step\": 1802, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6013325930038868, \"F1\": 0.3904923599320882, \"Memory in Mb\": 0.2390832901000976, \"Time in s\": 22.423363 }, { \"step\": 1908, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6030414263240692, \"F1\": 0.396812749003984, \"Memory in Mb\": 0.2390832901000976, \"Time in s\": 25.102833 }, { \"step\": 2014, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5986090412319921, \"F1\": 0.3961136023916292, \"Memory in Mb\": 0.2390832901000976, \"Time in s\": 27.945051 }, { \"step\": 2120, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5969797074091553, \"F1\": 0.3994374120956399, \"Memory in Mb\": 0.2390832901000976, \"Time in s\": 30.943527 }, { \"step\": 2226, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.597752808988764, \"F1\": 0.4013377926421405, \"Memory in Mb\": 0.2390375137329101, \"Time in s\": 34.101409999999994 }, { \"step\": 2332, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5988845988845989, \"F1\": 0.4033184428844926, \"Memory in Mb\": 0.2390985488891601, \"Time in s\": 37.41135799999999 }, { \"step\": 2438, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5995075913007797, \"F1\": 0.4019607843137255, \"Memory in Mb\": 0.2391214370727539, \"Time in s\": 40.87347599999999 }, { \"step\": 2544, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6008651199370821, \"F1\": 0.4088526499708794, \"Memory in Mb\": 0.2394876480102539, \"Time in s\": 44.48936199999999 }, { \"step\": 2650, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6002265005662514, \"F1\": 0.4073866815892558, \"Memory in Mb\": 0.2396936416625976, \"Time in s\": 48.25778699999999 }, { \"step\": 2756, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5985480943738657, \"F1\": 0.4028077753779697, \"Memory in Mb\": 0.2396936416625976, \"Time in s\": 52.17665199999999 }, { \"step\": 2862, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.599790283117791, \"F1\": 0.4051948051948052, \"Memory in Mb\": 0.2396936416625976, \"Time in s\": 56.248781 }, { \"step\": 2968, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.599932591843613, \"F1\": 0.4026170105686965, \"Memory in Mb\": 0.2397165298461914, \"Time in s\": 60.47524299999999 }, { \"step\": 3074, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5977871786527823, \"F1\": 0.4023210831721469, \"Memory in Mb\": 0.2397165298461914, \"Time in s\": 64.856117 }, { \"step\": 3180, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5986159169550173, \"F1\": 0.4042950513538749, \"Memory in Mb\": 0.2397165298461914, \"Time in s\": 69.39380299999999 }, { \"step\": 3286, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5981735159817352, \"F1\": 0.4021739130434782, \"Memory in Mb\": 0.23760986328125, \"Time in s\": 74.079456 }, { \"step\": 3392, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5959893836626364, \"F1\": 0.4022687609075043, \"Memory in Mb\": 0.3125686645507812, \"Time in s\": 78.938547 }, { \"step\": 3498, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.597369173577352, \"F1\": 0.4023769100169779, \"Memory in Mb\": 0.3672904968261719, \"Time in s\": 83.975893 }, { \"step\": 3604, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6008881487649181, \"F1\": 0.4087171052631579, \"Memory in Mb\": 0.3972740173339844, \"Time in s\": 89.206915 }, { \"step\": 3710, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6012402264761392, \"F1\": 0.4086365453818472, \"Memory in Mb\": 0.4523735046386719, \"Time in s\": 94.644851 }, { \"step\": 3816, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6023591087811271, \"F1\": 0.4104158569762923, \"Memory in Mb\": 0.4865531921386719, \"Time in s\": 100.28768 }, { \"step\": 3922, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6052027543993879, \"F1\": 0.4145234493192133, \"Memory in Mb\": 0.5345191955566406, \"Time in s\": 106.160683 }, { \"step\": 4028, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.608393344921778, \"F1\": 0.4195804195804196, \"Memory in Mb\": 0.5653800964355469, \"Time in s\": 112.191076 }, { \"step\": 4134, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6121461408178079, \"F1\": 0.4260651629072682, \"Memory in Mb\": 0.5808448791503906, \"Time in s\": 118.35405 }, { \"step\": 4240, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6157112526539278, \"F1\": 0.4329968673860076, \"Memory in Mb\": 0.5852127075195312, \"Time in s\": 124.637983 }, { \"step\": 4346, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6193325661680092, \"F1\": 0.438560760353021, \"Memory in Mb\": 0.6001129150390625, \"Time in s\": 131.04469 }, { \"step\": 4452, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6218827229835991, \"F1\": 0.4421610871726881, \"Memory in Mb\": 0.6065597534179688, \"Time in s\": 137.573932 }, { \"step\": 4558, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6219003730524468, \"F1\": 0.4429356611703847, \"Memory in Mb\": 0.6456527709960938, \"Time in s\": 144.226405 }, { \"step\": 4664, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.623203945957538, \"F1\": 0.4455664247396655, \"Memory in Mb\": 0.6509552001953125, \"Time in s\": 151.005134 }, { \"step\": 4770, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6250786328370728, \"F1\": 0.446096654275093, \"Memory in Mb\": 0.7009811401367188, \"Time in s\": 157.911799 }, { \"step\": 4876, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6266666666666667, \"F1\": 0.4468085106382978, \"Memory in Mb\": 0.7141494750976562, \"Time in s\": 164.949721 }, { \"step\": 4982, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.629592451314997, \"F1\": 0.4530091906314853, \"Memory in Mb\": 0.73150634765625, \"Time in s\": 172.116039 }, { \"step\": 5088, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6298407705917043, \"F1\": 0.4527753560011624, \"Memory in Mb\": 0.7153549194335938, \"Time in s\": 179.401331 }, { \"step\": 5194, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6321971885230118, \"F1\": 0.456459874786568, \"Memory in Mb\": 0.7158050537109375, \"Time in s\": 186.806087 }, { \"step\": 5300, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6340819022457067, \"F1\": 0.4594368553108447, \"Memory in Mb\": 0.7224349975585938, \"Time in s\": 194.332292 }, { \"step\": 906, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8629834254143647, \"F1\": 0.8663793103448276, \"Memory in Mb\": 1.7905378341674805, \"Time in s\": 2.379335 }, { \"step\": 1812, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8884594146880177, \"F1\": 0.8674540682414698, \"Memory in Mb\": 2.571917533874512, \"Time in s\": 6.1243490000000005 }, { \"step\": 2718, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8759661391240339, \"F1\": 0.8536691272253583, \"Memory in Mb\": 2.0127248764038086, \"Time in s\": 11.743707 }, { \"step\": 3624, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8843499861992824, \"F1\": 0.8625778943916038, \"Memory in Mb\": 2.5987844467163086, \"Time in s\": 18.955423 }, { \"step\": 4530, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8869507617575624, \"F1\": 0.8584070796460177, \"Memory in Mb\": 3.065375328063965, \"Time in s\": 27.785827 }, { \"step\": 5436, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8833486660533578, \"F1\": 0.8529684601113172, \"Memory in Mb\": 2.4308347702026367, \"Time in s\": 38.240294 }, { \"step\": 6342, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.882983756505283, \"F1\": 0.8526023043305523, \"Memory in Mb\": 2.7551145553588867, \"Time in s\": 50.337362 }, { \"step\": 7248, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8842279563957499, \"F1\": 0.8536032106089687, \"Memory in Mb\": 2.720797538757324, \"Time in s\": 63.955505 }, { \"step\": 8154, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8842143996075065, \"F1\": 0.8612172890326375, \"Memory in Mb\": 2.593207359313965, \"Time in s\": 79.221878 }, { \"step\": 9060, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8847554917761342, \"F1\": 0.8655678599021375, \"Memory in Mb\": 2.3350706100463867, \"Time in s\": 95.984043 }, { \"step\": 9966, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.88509784244857, \"F1\": 0.8683454064619983, \"Memory in Mb\": 2.604697227478028, \"Time in s\": 114.346975 }, { \"step\": 10872, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8848312022812989, \"F1\": 0.8691745036572621, \"Memory in Mb\": 2.73319149017334, \"Time in s\": 134.274448 }, { \"step\": 11778, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.88222807166511, \"F1\": 0.8655877507510417, \"Memory in Mb\": 2.982542991638184, \"Time in s\": 155.860777 }, { \"step\": 12684, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.878498777891666, \"F1\": 0.8616572403267798, \"Memory in Mb\": 2.5146703720092773, \"Time in s\": 179.01547000000002 }, { \"step\": 13590, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8796820958127898, \"F1\": 0.8645738424583782, \"Memory in Mb\": 2.628962516784668, \"Time in s\": 203.671512 }, { \"step\": 14496, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8797516384960331, \"F1\": 0.8652701553683234, \"Memory in Mb\": 2.473284721374512, \"Time in s\": 229.977136 }, { \"step\": 15402, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8800077917018375, \"F1\": 0.8645558487247141, \"Memory in Mb\": 2.1737966537475586, \"Time in s\": 257.791658 }, { \"step\": 16308, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8778438707303612, \"F1\": 0.861262014208107, \"Memory in Mb\": 2.5011510848999023, \"Time in s\": 287.339735 }, { \"step\": 17214, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8773601347818509, \"F1\": 0.8583506676508086, \"Memory in Mb\": 2.443587303161621, \"Time in s\": 318.628337 }, { \"step\": 18120, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8774214912522766, \"F1\": 0.8581827469510249, \"Memory in Mb\": 2.8467397689819336, \"Time in s\": 351.823445 }, { \"step\": 19026, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8781603153745072, \"F1\": 0.8573362875430822, \"Memory in Mb\": 2.840205192565918, \"Time in s\": 386.938658 }, { \"step\": 19932, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8755707189804827, \"F1\": 0.8552923328276345, \"Memory in Mb\": 2.7734594345092773, \"Time in s\": 424.419923 }, { \"step\": 20838, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8726784086000864, \"F1\": 0.8517959890508909, \"Memory in Mb\": 3.382327079772949, \"Time in s\": 464.42461 }, { \"step\": 21744, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8735225129926873, \"F1\": 0.8505921981962403, \"Memory in Mb\": 2.277277946472168, \"Time in s\": 506.250356 }, { \"step\": 22650, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8713850501125877, \"F1\": 0.8461741564133707, \"Memory in Mb\": 2.6045217514038086, \"Time in s\": 549.67693 }, { \"step\": 23556, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8655062619401401, \"F1\": 0.8378212347701444, \"Memory in Mb\": 2.0687971115112305, \"Time in s\": 595.047504 }, { \"step\": 24462, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8629246555741793, \"F1\": 0.8334740501614105, \"Memory in Mb\": 1.965815544128418, \"Time in s\": 642.194018 }, { \"step\": 25368, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8587929199353491, \"F1\": 0.8286944045911048, \"Memory in Mb\": 1.5182180404663086, \"Time in s\": 691.397121 }, { \"step\": 26274, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8580672172953222, \"F1\": 0.8274010645683869, \"Memory in Mb\": 1.035365104675293, \"Time in s\": 742.026028 }, { \"step\": 27180, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8578682070716361, \"F1\": 0.8276446705037256, \"Memory in Mb\": 1.274672508239746, \"Time in s\": 793.972348 }, { \"step\": 28086, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8549403596225743, \"F1\": 0.8230542043085476, \"Memory in Mb\": 1.6423864364624023, \"Time in s\": 847.62915 }, { \"step\": 28992, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8532648063192025, \"F1\": 0.8196846388606307, \"Memory in Mb\": 2.145480155944824, \"Time in s\": 903.499383 }, { \"step\": 29898, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8507208081078369, \"F1\": 0.8162391402808086, \"Memory in Mb\": 1.650496482849121, \"Time in s\": 961.52369 }, { \"step\": 30804, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8489108203746388, \"F1\": 0.812746439204957, \"Memory in Mb\": 1.6065664291381836, \"Time in s\": 1021.4058 }, { \"step\": 31710, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8464789176574474, \"F1\": 0.8088731841382018, \"Memory in Mb\": 1.891444206237793, \"Time in s\": 1083.110109 }, { \"step\": 32616, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8443660892227502, \"F1\": 0.8061855670103092, \"Memory in Mb\": 1.5838193893432615, \"Time in s\": 1146.603723 }, { \"step\": 33522, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8444258822827482, \"F1\": 0.8069876753395758, \"Memory in Mb\": 2.1811208724975586, \"Time in s\": 1211.933286 }, { \"step\": 34428, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8422168646701716, \"F1\": 0.8034590057167668, \"Memory in Mb\": 2.362921714782715, \"Time in s\": 1279.642459 }, { \"step\": 35334, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8416777516769026, \"F1\": 0.8019121813031161, \"Memory in Mb\": 2.295699119567871, \"Time in s\": 1349.6385269999998 }, { \"step\": 36240, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8424625403570739, \"F1\": 0.8013915463558879, \"Memory in Mb\": 2.210890769958496, \"Time in s\": 1421.6820649999995 }, { \"step\": 37146, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8423206353479606, \"F1\": 0.8005720317341414, \"Memory in Mb\": 1.5720243453979492, \"Time in s\": 1496.1115339999997 }, { \"step\": 38052, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8416335970145331, \"F1\": 0.7984750183934186, \"Memory in Mb\": 1.657557487487793, \"Time in s\": 1572.0838559999995 }, { \"step\": 38958, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8402854429242498, \"F1\": 0.7969321148825065, \"Memory in Mb\": 1.542536735534668, \"Time in s\": 1649.6649279999997 }, { \"step\": 39864, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8404786393397387, \"F1\": 0.7989503303929938, \"Memory in Mb\": 1.814925193786621, \"Time in s\": 1728.6718509999996 }, { \"step\": 40770, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8419141995143369, \"F1\": 0.8025852298832972, \"Memory in Mb\": 1.6994237899780271, \"Time in s\": 1809.157296 }, { \"step\": 41676, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8428554289142172, \"F1\": 0.8053036834438267, \"Memory in Mb\": 2.179030418395996, \"Time in s\": 1890.982895 }, { \"step\": 42582, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8435217585308001, \"F1\": 0.8066061010652193, \"Memory in Mb\": 2.626959800720215, \"Time in s\": 1974.3648729999995 }, { \"step\": 43488, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8437004162163405, \"F1\": 0.8069418013463233, \"Memory in Mb\": 2.851019859313965, \"Time in s\": 2059.407496 }, { \"step\": 44394, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8424751650034915, \"F1\": 0.804298547561078, \"Memory in Mb\": 3.612540245056152, \"Time in s\": 2146.572713 }, { \"step\": 45300, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8419391156537672, \"F1\": 0.8040932472365109, \"Memory in Mb\": 3.2033262252807617, \"Time in s\": 2236.8483089999995 }, { \"step\": 25, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.7083333333333334, \"F1\": 0.7407407407407408, \"Memory in Mb\": 0.7285165786743164, \"Time in s\": 0.081182 }, { \"step\": 50, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8163265306122449, \"F1\": 0.8085106382978724, \"Memory in Mb\": 0.7291955947875977, \"Time in s\": 0.246708 }, { \"step\": 75, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8513513513513513, \"F1\": 0.8493150684931507, \"Memory in Mb\": 0.7295160293579102, \"Time in s\": 0.489882 }, { \"step\": 100, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8585858585858586, \"F1\": 0.8541666666666666, \"Memory in Mb\": 0.7297601699829102, \"Time in s\": 0.81137 }, { \"step\": 125, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8548387096774194, \"F1\": 0.85, \"Memory in Mb\": 0.7297601699829102, \"Time in s\": 1.210797 }, { \"step\": 150, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8523489932885906, \"F1\": 0.8533333333333335, \"Memory in Mb\": 0.7300043106079102, \"Time in s\": 1.687212 }, { \"step\": 175, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8620689655172413, \"F1\": 0.8536585365853658, \"Memory in Mb\": 0.7303934097290039, \"Time in s\": 2.243398 }, { \"step\": 200, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8592964824120602, \"F1\": 0.8510638297872339, \"Memory in Mb\": 0.7305307388305664, \"Time in s\": 2.888269 }, { \"step\": 225, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8526785714285714, \"F1\": 0.8405797101449276, \"Memory in Mb\": 0.77154541015625, \"Time in s\": 3.617105000000001 }, { \"step\": 250, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8473895582329317, \"F1\": 0.8347826086956521, \"Memory in Mb\": 0.7995452880859375, \"Time in s\": 4.427161000000001 }, { \"step\": 275, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8467153284671532, \"F1\": 0.8333333333333335, \"Memory in Mb\": 0.799774169921875, \"Time in s\": 5.319298000000001 }, { \"step\": 300, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8528428093645485, \"F1\": 0.837037037037037, \"Memory in Mb\": 0.799957275390625, \"Time in s\": 6.288776 }, { \"step\": 325, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8611111111111112, \"F1\": 0.8421052631578947, \"Memory in Mb\": 0.800323486328125, \"Time in s\": 7.336677 }, { \"step\": 350, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8653295128939829, \"F1\": 0.8438538205980067, \"Memory in Mb\": 0.8004684448242188, \"Time in s\": 8.465301 }, { \"step\": 375, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8663101604278075, \"F1\": 0.8427672955974843, \"Memory in Mb\": 0.8407363891601562, \"Time in s\": 9.675054 }, { \"step\": 400, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8671679197994987, \"F1\": 0.8417910447761194, \"Memory in Mb\": 0.8817596435546875, \"Time in s\": 10.973937 }, { \"step\": 425, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8679245283018868, \"F1\": 0.839080459770115, \"Memory in Mb\": 0.937408447265625, \"Time in s\": 12.361793 }, { \"step\": 450, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8708240534521158, \"F1\": 0.8406593406593408, \"Memory in Mb\": 0.9376602172851562, \"Time in s\": 13.826239 }, { \"step\": 475, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.869198312236287, \"F1\": 0.8402061855670103, \"Memory in Mb\": 0.9379119873046876, \"Time in s\": 15.36192 }, { \"step\": 500, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8677354709418837, \"F1\": 0.8413461538461539, \"Memory in Mb\": 0.9381179809570312, \"Time in s\": 16.969597999999998 }, { \"step\": 525, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8683206106870229, \"F1\": 0.8384074941451991, \"Memory in Mb\": 0.9381790161132812, \"Time in s\": 18.650676 }, { \"step\": 550, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8670309653916212, \"F1\": 0.8381374722838136, \"Memory in Mb\": 0.9382858276367188, \"Time in s\": 20.413452 }, { \"step\": 575, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.867595818815331, \"F1\": 0.8382978723404255, \"Memory in Mb\": 0.9383468627929688, \"Time in s\": 22.261479999999995 }, { \"step\": 600, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8697829716193656, \"F1\": 0.8381742738589212, \"Memory in Mb\": 0.9384689331054688, \"Time in s\": 24.204291999999995 }, { \"step\": 625, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8717948717948718, \"F1\": 0.8373983739837398, \"Memory in Mb\": 0.9792633056640624, \"Time in s\": 26.239624 }, { \"step\": 650, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8767334360554699, \"F1\": 0.846153846153846, \"Memory in Mb\": 0.9797210693359376, \"Time in s\": 28.350959 }, { \"step\": 675, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8753709198813057, \"F1\": 0.8478260869565216, \"Memory in Mb\": 1.0075225830078125, \"Time in s\": 30.546933 }, { \"step\": 700, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8798283261802575, \"F1\": 0.8515901060070671, \"Memory in Mb\": 0.9475822448730468, \"Time in s\": 32.825613999999995 }, { \"step\": 725, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8825966850828729, \"F1\": 0.8576214405360134, \"Memory in Mb\": 1.0479621887207031, \"Time in s\": 35.192032999999995 }, { \"step\": 750, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8865153538050734, \"F1\": 0.8631239935587761, \"Memory in Mb\": 1.0882606506347656, \"Time in s\": 37.652957 }, { \"step\": 775, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8875968992248062, \"F1\": 0.863849765258216, \"Memory in Mb\": 1.1435890197753906, \"Time in s\": 40.206814 }, { \"step\": 800, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8873591989987485, \"F1\": 0.8652694610778443, \"Memory in Mb\": 1.2515907287597656, \"Time in s\": 42.859537 }, { \"step\": 825, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8871359223300971, \"F1\": 0.8661870503597122, \"Memory in Mb\": 1.2530021667480469, \"Time in s\": 45.593545 }, { \"step\": 850, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8881036513545347, \"F1\": 0.8671328671328671, \"Memory in Mb\": 1.266559600830078, \"Time in s\": 48.414666 }, { \"step\": 875, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8901601830663616, \"F1\": 0.8688524590163934, \"Memory in Mb\": 1.294574737548828, \"Time in s\": 51.318085 }, { \"step\": 900, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8887652947719689, \"F1\": 0.8670212765957446, \"Memory in Mb\": 1.3499031066894531, \"Time in s\": 54.308453 }, { \"step\": 925, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8896103896103896, \"F1\": 0.8695652173913043, \"Memory in Mb\": 1.350116729736328, \"Time in s\": 57.385236 }, { \"step\": 950, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8893572181243414, \"F1\": 0.8708487084870848, \"Memory in Mb\": 1.3506507873535156, \"Time in s\": 60.544707 }, { \"step\": 975, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8901437371663244, \"F1\": 0.8718562874251498, \"Memory in Mb\": 1.3507575988769531, \"Time in s\": 63.787028 }, { \"step\": 1000, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8878878878878879, \"F1\": 0.8697674418604652, \"Memory in Mb\": 1.3509178161621094, \"Time in s\": 67.116503 }, { \"step\": 1025, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8876953125, \"F1\": 0.8700564971751412, \"Memory in Mb\": 1.3512077331542969, \"Time in s\": 70.53228 }, { \"step\": 1050, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8894184938036225, \"F1\": 0.8725274725274725, \"Memory in Mb\": 1.3513069152832031, \"Time in s\": 74.046134 }, { \"step\": 1075, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8901303538175046, \"F1\": 0.8742004264392325, \"Memory in Mb\": 1.3514289855957031, \"Time in s\": 77.61240799999999 }, { \"step\": 1100, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.89171974522293, \"F1\": 0.8761706555671176, \"Memory in Mb\": 1.3517951965332031, \"Time in s\": 81.22848299999998 }, { \"step\": 1125, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8932384341637011, \"F1\": 0.8790322580645162, \"Memory in Mb\": 1.3518562316894531, \"Time in s\": 84.88876699999999 }, { \"step\": 1150, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8938207136640557, \"F1\": 0.8794466403162056, \"Memory in Mb\": 1.3518562316894531, \"Time in s\": 88.60766799999999 }, { \"step\": 1175, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8926746166950597, \"F1\": 0.877906976744186, \"Memory in Mb\": 1.3519172668457031, \"Time in s\": 92.36705899999998 }, { \"step\": 1200, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8932443703085905, \"F1\": 0.8783269961977186, \"Memory in Mb\": 1.365093231201172, \"Time in s\": 96.17394699999998 }, { \"step\": 1225, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8929738562091504, \"F1\": 0.8779123951537745, \"Memory in Mb\": 1.4202919006347656, \"Time in s\": 100.0275 }, { \"step\": 1250, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8935148118494796, \"F1\": 0.8792007266121706, \"Memory in Mb\": 1.4205055236816406, \"Time in s\": 103.925929 }, { \"step\": 1903, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.217813491821289, \"Time in s\": 1.823771 }, { \"step\": 3806, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.218423843383789, \"Time in s\": 5.582317 }, { \"step\": 5709, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.2189655303955078, \"Time in s\": 10.437191 }, { \"step\": 7612, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.2189884185791015, \"Time in s\": 16.255529 }, { \"step\": 9515, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.2189884185791015, \"Time in s\": 22.995497 }, { \"step\": 11418, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.2195987701416015, \"Time in s\": 30.599323 }, { \"step\": 13321, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.2196216583251953, \"Time in s\": 39.036327 }, { \"step\": 15224, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999343099257702, \"F1\": 0.1666666666666666, \"Memory in Mb\": 0.5055475234985352, \"Time in s\": 48.440921 }, { \"step\": 17127, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9992993109891392, \"F1\": 0.1428571428571428, \"Memory in Mb\": 0.5140314102172852, \"Time in s\": 59.356571 }, { \"step\": 19030, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999369383572442, \"F1\": 0.1428571428571428, \"Memory in Mb\": 0.5010766983032227, \"Time in s\": 71.78490500000001 }, { \"step\": 20933, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9994267150773934, \"F1\": 0.1428571428571428, \"Memory in Mb\": 0.5033426284790039, \"Time in s\": 85.737205 }, { \"step\": 22836, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999474490913072, \"F1\": 0.1428571428571428, \"Memory in Mb\": 0.5038537979125977, \"Time in s\": 101.221325 }, { \"step\": 24739, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9995149163230658, \"F1\": 0.1428571428571428, \"Memory in Mb\": 0.5166254043579102, \"Time in s\": 118.224146 }, { \"step\": 26642, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9995495664577156, \"F1\": 0.25, \"Memory in Mb\": 0.5370950698852539, \"Time in s\": 136.728077 }, { \"step\": 28545, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999579596412556, \"F1\": 0.25, \"Memory in Mb\": 0.5375986099243164, \"Time in s\": 156.75763500000002 }, { \"step\": 30448, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996058724997536, \"F1\": 0.25, \"Memory in Mb\": 0.5378046035766602, \"Time in s\": 178.30216700000005 }, { \"step\": 32351, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999629057187017, \"F1\": 0.25, \"Memory in Mb\": 0.5502328872680664, \"Time in s\": 201.36650900000004 }, { \"step\": 34254, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996496657227104, \"F1\": 0.25, \"Memory in Mb\": 0.5628290176391602, \"Time in s\": 225.93287100000003 }, { \"step\": 36157, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996681048788584, \"F1\": 0.25, \"Memory in Mb\": 0.5643777847290039, \"Time in s\": 252.01061500000003 }, { \"step\": 38060, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996847000709423, \"F1\": 0.25, \"Memory in Mb\": 0.5652093887329102, \"Time in s\": 279.629747 }, { \"step\": 39963, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996997147289926, \"F1\": 0.25, \"Memory in Mb\": 0.5653314590454102, \"Time in s\": 308.76820100000003 }, { \"step\": 41866, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997133643855248, \"F1\": 0.25, \"Memory in Mb\": 0.5653314590454102, \"Time in s\": 339.41859300000004 }, { \"step\": 43769, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997258270882836, \"F1\": 0.25, \"Memory in Mb\": 0.5653085708618164, \"Time in s\": 371.57202200000006 }, { \"step\": 45672, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997372512097392, \"F1\": 0.25, \"Memory in Mb\": 0.5653696060180664, \"Time in s\": 405.2428510000001 }, { \"step\": 47575, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997477613822676, \"F1\": 0.25, \"Memory in Mb\": 0.5838403701782227, \"Time in s\": 440.4367680000001 }, { \"step\": 49478, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997574630636458, \"F1\": 0.25, \"Memory in Mb\": 0.5960397720336914, \"Time in s\": 477.1431370000001 }, { \"step\": 51381, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997469832619696, \"F1\": 0.3157894736842105, \"Memory in Mb\": 0.6703958511352539, \"Time in s\": 515.395289 }, { \"step\": 53284, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999756019743633, \"F1\": 0.3157894736842105, \"Memory in Mb\": 0.6706399917602539, \"Time in s\": 555.1871910000001 }, { \"step\": 55187, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997644330083716, \"F1\": 0.3157894736842105, \"Memory in Mb\": 0.6832361221313477, \"Time in s\": 596.5168480000001 }, { \"step\": 57090, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996321533044896, \"F1\": 0.3225806451612903, \"Memory in Mb\": 1.1210947036743164, \"Time in s\": 639.6973030000001 }, { \"step\": 58993, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996440195280716, \"F1\": 0.3225806451612903, \"Memory in Mb\": 1.145817756652832, \"Time in s\": 684.8963280000002 }, { \"step\": 60896, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996551441005008, \"F1\": 0.3225806451612903, \"Memory in Mb\": 1.1608476638793943, \"Time in s\": 732.0726040000002 }, { \"step\": 62799, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996337462976528, \"F1\": 0.303030303030303, \"Memory in Mb\": 1.2456941604614258, \"Time in s\": 781.2786070000002 }, { \"step\": 64702, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996445186318604, \"F1\": 0.303030303030303, \"Memory in Mb\": 1.246922492980957, \"Time in s\": 832.4886640000002 }, { \"step\": 66605, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996546753948712, \"F1\": 0.303030303030303, \"Memory in Mb\": 1.2487382888793943, \"Time in s\": 885.7141210000002 }, { \"step\": 68508, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996642678850336, \"F1\": 0.3783783783783784, \"Memory in Mb\": 1.266160011291504, \"Time in s\": 940.9452050000002 }, { \"step\": 70411, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.99967334185485, \"F1\": 0.3783783783783784, \"Memory in Mb\": 1.296757698059082, \"Time in s\": 998.2257750000002 }, { \"step\": 72314, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996819382407036, \"F1\": 0.3783783783783784, \"Memory in Mb\": 1.2987031936645508, \"Time in s\": 1057.5572380000003 }, { \"step\": 74217, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996900937803168, \"F1\": 0.3783783783783784, \"Memory in Mb\": 1.2991762161254885, \"Time in s\": 1118.9337870000004 }, { \"step\": 76120, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996978415375924, \"F1\": 0.3783783783783784, \"Memory in Mb\": 1.3004274368286133, \"Time in s\": 1182.3536270000004 }, { \"step\": 78023, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997052113506448, \"F1\": 0.3783783783783784, \"Memory in Mb\": 1.3022356033325195, \"Time in s\": 1247.8338970000004 }, { \"step\": 79926, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997122302158272, \"F1\": 0.3783783783783784, \"Memory in Mb\": 1.302800178527832, \"Time in s\": 1315.3680150000005 }, { \"step\": 81829, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997189226181749, \"F1\": 0.3783783783783784, \"Memory in Mb\": 1.3035783767700195, \"Time in s\": 1384.9414330000004 }, { \"step\": 83732, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997253108167824, \"F1\": 0.3783783783783784, \"Memory in Mb\": 1.3040666580200195, \"Time in s\": 1456.5304290000004 }, { \"step\": 85635, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997314150921364, \"F1\": 0.3783783783783784, \"Memory in Mb\": 1.3194093704223633, \"Time in s\": 1530.1954770000004 }, { \"step\": 87538, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997372539611822, \"F1\": 0.3783783783783784, \"Memory in Mb\": 1.3199357986450195, \"Time in s\": 1605.9064890000004 }, { \"step\": 89441, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999731663685152, \"F1\": 0.3684210526315789, \"Memory in Mb\": 1.3624582290649414, \"Time in s\": 1683.7024010000005 }, { \"step\": 91344, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997372540862464, \"F1\": 0.3684210526315789, \"Memory in Mb\": 1.3635034561157229, \"Time in s\": 1763.5593190000004 }, { \"step\": 93247, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997426163052572, \"F1\": 0.3684210526315789, \"Memory in Mb\": 1.3640680313110352, \"Time in s\": 1845.4940930000005 }, { \"step\": 95150, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997477640332532, \"F1\": 0.3684210526315789, \"Memory in Mb\": 1.3650827407836914, \"Time in s\": 1929.4936330000005 }, { \"step\": 106, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5142857142857142, \"F1\": 0.4516129032258064, \"Memory in Mb\": 0.1929693222045898, \"Time in s\": 0.378114 }, { \"step\": 212, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5402843601895735, \"F1\": 0.4756756756756757, \"Memory in Mb\": 0.1935796737670898, \"Time in s\": 1.064058 }, { \"step\": 318, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5394321766561514, \"F1\": 0.4930555555555555, \"Memory in Mb\": 0.1942129135131836, \"Time in s\": 1.944827 }, { \"step\": 424, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5531914893617021, \"F1\": 0.4932975871313673, \"Memory in Mb\": 0.1941900253295898, \"Time in s\": 3.02079 }, { \"step\": 530, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5614366729678639, \"F1\": 0.4703196347031963, \"Memory in Mb\": 0.1941900253295898, \"Time in s\": 4.293626 }, { \"step\": 636, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5763779527559055, \"F1\": 0.4836852207293666, \"Memory in Mb\": 0.4277210235595703, \"Time in s\": 5.781232999999999 }, { \"step\": 742, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5991902834008097, \"F1\": 0.4940374787052811, \"Memory in Mb\": 0.5387935638427734, \"Time in s\": 7.493556 }, { \"step\": 848, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6210153482880756, \"F1\": 0.5201793721973094, \"Memory in Mb\": 0.6348705291748047, \"Time in s\": 9.45639 }, { \"step\": 954, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6411332633788038, \"F1\": 0.5464190981432361, \"Memory in Mb\": 0.7018413543701172, \"Time in s\": 11.663967 }, { \"step\": 1060, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6515580736543909, \"F1\": 0.555956678700361, \"Memory in Mb\": 0.7448062896728516, \"Time in s\": 14.117771 }, { \"step\": 1166, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6626609442060086, \"F1\": 0.5732899022801302, \"Memory in Mb\": 0.8341083526611328, \"Time in s\": 16.817314 }, { \"step\": 1272, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6766325727773407, \"F1\": 0.5958702064896755, \"Memory in Mb\": 0.8756198883056641, \"Time in s\": 19.759644 }, { \"step\": 1378, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6877269426289034, \"F1\": 0.6062271062271062, \"Memory in Mb\": 0.961111068725586, \"Time in s\": 22.944053 }, { \"step\": 1484, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6999325691166555, \"F1\": 0.6238377007607777, \"Memory in Mb\": 1.0045452117919922, \"Time in s\": 26.370908 }, { \"step\": 1590, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.7073631214600378, \"F1\": 0.6375681995323461, \"Memory in Mb\": 1.1097278594970703, \"Time in s\": 30.041027 }, { \"step\": 1696, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.7162241887905605, \"F1\": 0.6496722505462491, \"Memory in Mb\": 1.175821304321289, \"Time in s\": 33.956331 }, { \"step\": 1802, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.7262631871182677, \"F1\": 0.6662153012863914, \"Memory in Mb\": 1.262613296508789, \"Time in s\": 38.117335 }, { \"step\": 1908, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.7315154693235448, \"F1\": 0.6767676767676768, \"Memory in Mb\": 1.3344478607177734, \"Time in s\": 42.524713 }, { \"step\": 2014, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.7386984600099354, \"F1\": 0.6894923258559622, \"Memory in Mb\": 1.391103744506836, \"Time in s\": 47.18564 }, { \"step\": 2120, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.7451628126474752, \"F1\": 0.7013274336283186, \"Memory in Mb\": 1.475076675415039, \"Time in s\": 52.095729 }, { \"step\": 2226, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.7501123595505618, \"F1\": 0.7073684210526315, \"Memory in Mb\": 1.496999740600586, \"Time in s\": 57.242367 }, { \"step\": 2332, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.7550407550407551, \"F1\": 0.7143571785892947, \"Memory in Mb\": 1.516103744506836, \"Time in s\": 62.636095000000005 }, { \"step\": 2438, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.7595404185473943, \"F1\": 0.7196172248803827, \"Memory in Mb\": 1.5630512237548828, \"Time in s\": 68.252973 }, { \"step\": 2544, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.7620920173023987, \"F1\": 0.725124943207633, \"Memory in Mb\": 1.623823165893555, \"Time in s\": 74.089247 }, { \"step\": 2650, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.7674594186485466, \"F1\": 0.7326388888888887, \"Memory in Mb\": 1.6640300750732422, \"Time in s\": 80.135514 }, { \"step\": 2756, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.7727767695099819, \"F1\": 0.7391666666666666, \"Memory in Mb\": 1.7581462860107422, \"Time in s\": 86.389582 }, { \"step\": 2862, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.777001048584411, \"F1\": 0.7435691318327974, \"Memory in Mb\": 1.8173961639404297, \"Time in s\": 92.855234 }, { \"step\": 2968, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.7809234917425009, \"F1\": 0.7474747474747475, \"Memory in Mb\": 1.919931411743164, \"Time in s\": 99.529297 }, { \"step\": 3074, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.784249918646274, \"F1\": 0.7521495327102804, \"Memory in Mb\": 2.025243759155273, \"Time in s\": 106.427437 }, { \"step\": 3180, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.7889273356401384, \"F1\": 0.7567959405581733, \"Memory in Mb\": 2.059762954711914, \"Time in s\": 113.53746300000002 }, { \"step\": 3286, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.7920852359208523, \"F1\": 0.7600983491394451, \"Memory in Mb\": 2.1231555938720703, \"Time in s\": 120.860592 }, { \"step\": 3392, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.7935712179298142, \"F1\": 0.7631935047361299, \"Memory in Mb\": 2.208223342895508, \"Time in s\": 128.39806900000002 }, { \"step\": 3498, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.7963969116385473, \"F1\": 0.7653263019116677, \"Memory in Mb\": 2.2967967987060547, \"Time in s\": 136.14712200000002 }, { \"step\": 3604, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.7993338884263114, \"F1\": 0.7677481529071635, \"Memory in Mb\": 2.342061996459961, \"Time in s\": 144.10985700000003 }, { \"step\": 3710, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8015637638177406, \"F1\": 0.7710018668326073, \"Memory in Mb\": 2.4194507598876958, \"Time in s\": 152.28979100000004 }, { \"step\": 3816, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8049803407601572, \"F1\": 0.7753623188405797, \"Memory in Mb\": 2.452432632446289, \"Time in s\": 160.68369700000002 }, { \"step\": 3922, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8066819688854884, \"F1\": 0.7769276044732195, \"Memory in Mb\": 2.484903335571289, \"Time in s\": 169.29472 }, { \"step\": 4028, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8080456915818227, \"F1\": 0.7781922525107604, \"Memory in Mb\": 2.555143356323242, \"Time in s\": 178.12388700000002 }, { \"step\": 4134, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8103072828453908, \"F1\": 0.7810055865921788, \"Memory in Mb\": 2.665616989135742, \"Time in s\": 187.169884 }, { \"step\": 4240, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8131634819532909, \"F1\": 0.7845484221980414, \"Memory in Mb\": 2.698610305786133, \"Time in s\": 196.43184 }, { \"step\": 4346, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8158803222094362, \"F1\": 0.7877984084880637, \"Memory in Mb\": 2.7711353302001958, \"Time in s\": 205.91274200000004 }, { \"step\": 4452, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8173444169849472, \"F1\": 0.78932365897901, \"Memory in Mb\": 2.792703628540039, \"Time in s\": 215.61058800000004 }, { \"step\": 4558, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8183015141540487, \"F1\": 0.7909090909090909, \"Memory in Mb\": 2.8151988983154297, \"Time in s\": 225.53181000000004 }, { \"step\": 4664, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8205018228608192, \"F1\": 0.7940959409594096, \"Memory in Mb\": 2.871114730834961, \"Time in s\": 235.67375100000004 }, { \"step\": 4770, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8209268190396309, \"F1\": 0.7941176470588236, \"Memory in Mb\": 2.9113216400146484, \"Time in s\": 246.03360500000005 }, { \"step\": 4876, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.822974358974359, \"F1\": 0.7958362905133666, \"Memory in Mb\": 3.014688491821289, \"Time in s\": 256.6143220000001 }, { \"step\": 4982, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.825135514956836, \"F1\": 0.7989845372720977, \"Memory in Mb\": 3.062814712524414, \"Time in s\": 267.41563500000007 }, { \"step\": 5088, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.825437389424022, \"F1\": 0.7994579945799458, \"Memory in Mb\": 3.1556224822998047, \"Time in s\": 278.43394300000006 }, { \"step\": 5194, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8266897746967071, \"F1\": 0.800796812749004, \"Memory in Mb\": 3.260141372680664, \"Time in s\": 289.67931000000004 }, { \"step\": 5300, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8282694848084544, \"F1\": 0.8026030368763557, \"Memory in Mb\": 3.31306266784668, \"Time in s\": 301.15239700000006 }, { \"step\": 906, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8883977900552487, \"F1\": 0.8861330326944759, \"Memory in Mb\": 2.6054086685180664, \"Time in s\": 3.368733 }, { \"step\": 1812, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9127553837658752, \"F1\": 0.8939597315436243, \"Memory in Mb\": 3.465878486633301, \"Time in s\": 9.891931 }, { \"step\": 2718, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9013617960986382, \"F1\": 0.8816254416961131, \"Memory in Mb\": 3.735013008117676, \"Time in s\": 19.447972 }, { \"step\": 3624, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9100193210046924, \"F1\": 0.8914780292942742, \"Memory in Mb\": 3.9933290481567374, \"Time in s\": 31.888989 }, { \"step\": 4530, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9125634797968648, \"F1\": 0.890728476821192, \"Memory in Mb\": 3.547215461730957, \"Time in s\": 47.051529 }, { \"step\": 5436, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.910027598896044, \"F1\": 0.8870408870408871, \"Memory in Mb\": 4.102688789367676, \"Time in s\": 65.23951100000001 }, { \"step\": 6342, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9101088156442202, \"F1\": 0.8873517786561265, \"Memory in Mb\": 4.165738105773926, \"Time in s\": 86.445256 }, { \"step\": 7248, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9079619152752864, \"F1\": 0.8839798225778397, \"Memory in Mb\": 3.951657295227051, \"Time in s\": 110.708644 }, { \"step\": 8154, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9089905556236968, \"F1\": 0.8907216494845361, \"Memory in Mb\": 4.59532642364502, \"Time in s\": 137.95385399999998 }, { \"step\": 9060, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9097030577326416, \"F1\": 0.8939040207522698, \"Memory in Mb\": 4.363041877746582, \"Time in s\": 168.15601999999998 }, { \"step\": 9966, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9079779227295536, \"F1\": 0.893631829254147, \"Memory in Mb\": 4.951889991760254, \"Time in s\": 201.32076 }, { \"step\": 10872, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.90920798454604, \"F1\": 0.896399706098457, \"Memory in Mb\": 4.796502113342285, \"Time in s\": 237.348168 }, { \"step\": 11778, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9081260083213042, \"F1\": 0.8950533462657614, \"Memory in Mb\": 5.402684211730957, \"Time in s\": 276.331972 }, { \"step\": 12684, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9066466924229284, \"F1\": 0.8936972526485905, \"Memory in Mb\": 4.1159868240356445, \"Time in s\": 318.342965 }, { \"step\": 13590, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9079402457870336, \"F1\": 0.896363184491757, \"Memory in Mb\": 5.441300392150879, \"Time in s\": 363.458518 }, { \"step\": 14496, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9079682649189376, \"F1\": 0.8968131188118812, \"Memory in Mb\": 6.5885820388793945, \"Time in s\": 411.598932 }, { \"step\": 15402, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.908252710862931, \"F1\": 0.8965062623599208, \"Memory in Mb\": 5.050324440002441, \"Time in s\": 462.698224 }, { \"step\": 16308, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9068498190960936, \"F1\": 0.8945797765285585, \"Memory in Mb\": 6.200932502746582, \"Time in s\": 516.871701 }, { \"step\": 17214, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9064079474815546, \"F1\": 0.8923775803326874, \"Memory in Mb\": 6.66331958770752, \"Time in s\": 574.184961 }, { \"step\": 18120, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9061758375186268, \"F1\": 0.8919399949148233, \"Memory in Mb\": 6.39217472076416, \"Time in s\": 634.677368 }, { \"step\": 19026, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9064388961892248, \"F1\": 0.8910515362957523, \"Memory in Mb\": 7.244908332824707, \"Time in s\": 698.371295 }, { \"step\": 19932, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9069790778184738, \"F1\": 0.8925092764378478, \"Memory in Mb\": 8.84801197052002, \"Time in s\": 765.4898820000001 }, { \"step\": 20838, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9057445889523444, \"F1\": 0.8911670176216338, \"Memory in Mb\": 6.796334266662598, \"Time in s\": 836.163928 }, { \"step\": 21744, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9054408315319872, \"F1\": 0.8892837910608508, \"Memory in Mb\": 8.134293556213379, \"Time in s\": 910.125477 }, { \"step\": 22650, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9049847675394056, \"F1\": 0.8878816296759404, \"Memory in Mb\": 6.271161079406738, \"Time in s\": 987.478572 }, { \"step\": 23556, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.903077902780726, \"F1\": 0.8852244733799206, \"Memory in Mb\": 4.8961381912231445, \"Time in s\": 1068.279683 }, { \"step\": 24462, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9014349372470464, \"F1\": 0.8823845065612956, \"Memory in Mb\": 4.712262153625488, \"Time in s\": 1152.538044 }, { \"step\": 25368, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8988055347498719, \"F1\": 0.8794439487155403, \"Memory in Mb\": 4.656708717346191, \"Time in s\": 1240.2539379999998 }, { \"step\": 26274, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8987934381304, \"F1\": 0.8792625891113836, \"Memory in Mb\": 4.200020790100098, \"Time in s\": 1331.2764919999995 }, { \"step\": 27180, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8987453548695684, \"F1\": 0.8798777826276736, \"Memory in Mb\": 3.921463966369629, \"Time in s\": 1425.5964879999997 }, { \"step\": 28086, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8965283959408937, \"F1\": 0.8766762858597862, \"Memory in Mb\": 3.647244453430176, \"Time in s\": 1523.2409069999997 }, { \"step\": 28992, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8964851160705046, \"F1\": 0.8761606074361409, \"Memory in Mb\": 3.9709863662719727, \"Time in s\": 1624.2218739999996 }, { \"step\": 29898, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8961434257617821, \"F1\": 0.8756059452746283, \"Memory in Mb\": 3.793328285217285, \"Time in s\": 1728.6222849999997 }, { \"step\": 30804, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8959516930169139, \"F1\": 0.8747704450435666, \"Memory in Mb\": 4.122292518615723, \"Time in s\": 1836.357538 }, { \"step\": 31710, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8945725188432306, \"F1\": 0.8729911477527449, \"Memory in Mb\": 3.9709787368774414, \"Time in s\": 1947.541881 }, { \"step\": 32616, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8943124329296336, \"F1\": 0.8729872139725119, \"Memory in Mb\": 4.574315071105957, \"Time in s\": 2061.9964669999995 }, { \"step\": 33522, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8941559022702187, \"F1\": 0.8730862784375448, \"Memory in Mb\": 5.331856727600098, \"Time in s\": 2179.80501 }, { \"step\": 34428, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8936299997095303, \"F1\": 0.8722973915469383, \"Memory in Mb\": 5.328598976135254, \"Time in s\": 2300.9518229999994 }, { \"step\": 35334, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.893612203888716, \"F1\": 0.8717196191516227, \"Memory in Mb\": 5.664120674133301, \"Time in s\": 2425.5167449999994 }, { \"step\": 36240, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8932641629184028, \"F1\": 0.8702709954386908, \"Memory in Mb\": 4.706181526184082, \"Time in s\": 2553.3715769999994 }, { \"step\": 37146, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.893067707632252, \"F1\": 0.8697875688434304, \"Memory in Mb\": 5.890534400939941, \"Time in s\": 2684.525425 }, { \"step\": 38052, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8926178024230638, \"F1\": 0.8686004630820685, \"Memory in Mb\": 5.698811531066895, \"Time in s\": 2819.0438189999995 }, { \"step\": 38958, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8924198475241933, \"F1\": 0.8686165710523841, \"Memory in Mb\": 5.71596622467041, \"Time in s\": 2956.962011 }, { \"step\": 39864, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8929583824599252, \"F1\": 0.8702763505913111, \"Memory in Mb\": 6.792008399963379, \"Time in s\": 3098.2064909999995 }, { \"step\": 40770, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8931541121930879, \"F1\": 0.8715801886792452, \"Memory in Mb\": 8.543190956115723, \"Time in s\": 3242.702748 }, { \"step\": 41676, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8934373125374925, \"F1\": 0.8727689442773243, \"Memory in Mb\": 7.9951677322387695, \"Time in s\": 3390.4303709999995 }, { \"step\": 42582, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8937554308259552, \"F1\": 0.8733411725180581, \"Memory in Mb\": 7.843605995178223, \"Time in s\": 3541.260541 }, { \"step\": 43488, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8933474371651298, \"F1\": 0.8727572016460905, \"Memory in Mb\": 8.17009449005127, \"Time in s\": 3695.381786 }, { \"step\": 44394, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8920550537246863, \"F1\": 0.8708216519301273, \"Memory in Mb\": 5.159916877746582, \"Time in s\": 3852.778305 }, { \"step\": 45300, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8923817302810216, \"F1\": 0.8714568226763348, \"Memory in Mb\": 4.89464282989502, \"Time in s\": 4013.28156 }, { \"step\": 25, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.75, \"F1\": 0.75, \"Memory in Mb\": 0.6839132308959961, \"Time in s\": 0.224986 }, { \"step\": 50, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8163265306122449, \"F1\": 0.8, \"Memory in Mb\": 0.6847753524780273, \"Time in s\": 0.690283 }, { \"step\": 75, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8378378378378378, \"F1\": 0.8333333333333334, \"Memory in Mb\": 0.6847753524780273, \"Time in s\": 1.396176 }, { \"step\": 100, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8484848484848485, \"F1\": 0.8421052631578947, \"Memory in Mb\": 0.6699657440185547, \"Time in s\": 2.3378490000000003 }, { \"step\": 125, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8467741935483871, \"F1\": 0.8403361344537815, \"Memory in Mb\": 0.9459667205810548, \"Time in s\": 3.5085690000000005 }, { \"step\": 150, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8456375838926175, \"F1\": 0.8456375838926175, \"Memory in Mb\": 0.9459896087646484, \"Time in s\": 4.905419 }, { \"step\": 175, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.867816091954023, \"F1\": 0.8588957055214724, \"Memory in Mb\": 1.1184329986572266, \"Time in s\": 6.555308 }, { \"step\": 200, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8693467336683417, \"F1\": 0.8617021276595744, \"Memory in Mb\": 1.313650131225586, \"Time in s\": 8.437938 }, { \"step\": 225, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8660714285714286, \"F1\": 0.8557692307692308, \"Memory in Mb\": 1.3411617279052734, \"Time in s\": 10.542565 }, { \"step\": 250, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8554216867469879, \"F1\": 0.8434782608695653, \"Memory in Mb\": 1.341230392456055, \"Time in s\": 12.865947 }, { \"step\": 275, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8576642335766423, \"F1\": 0.844621513944223, \"Memory in Mb\": 1.278768539428711, \"Time in s\": 15.438039 }, { \"step\": 300, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.862876254180602, \"F1\": 0.8464419475655431, \"Memory in Mb\": 1.497152328491211, \"Time in s\": 18.241575 }, { \"step\": 325, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8703703703703703, \"F1\": 0.851063829787234, \"Memory in Mb\": 1.5338878631591797, \"Time in s\": 21.265574 }, { \"step\": 350, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8710601719197708, \"F1\": 0.8494983277591974, \"Memory in Mb\": 1.6005840301513672, \"Time in s\": 24.507018 }, { \"step\": 375, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8716577540106952, \"F1\": 0.8481012658227849, \"Memory in Mb\": 1.880643844604492, \"Time in s\": 27.994389 }, { \"step\": 400, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8696741854636592, \"F1\": 0.8433734939759037, \"Memory in Mb\": 2.144338607788086, \"Time in s\": 31.712731 }, { \"step\": 425, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8702830188679245, \"F1\": 0.8405797101449276, \"Memory in Mb\": 2.182210922241211, \"Time in s\": 35.657971 }, { \"step\": 450, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8752783964365256, \"F1\": 0.845303867403315, \"Memory in Mb\": 2.182027816772461, \"Time in s\": 39.813046 }, { \"step\": 475, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8776371308016878, \"F1\": 0.8505154639175259, \"Memory in Mb\": 2.2095394134521484, \"Time in s\": 44.205113 }, { \"step\": 500, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.875751503006012, \"F1\": 0.8502415458937198, \"Memory in Mb\": 2.2298946380615234, \"Time in s\": 48.820324 }, { \"step\": 525, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8778625954198473, \"F1\": 0.8497652582159624, \"Memory in Mb\": 2.294797897338867, \"Time in s\": 53.667583 }, { \"step\": 550, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8743169398907104, \"F1\": 0.8463251670378619, \"Memory in Mb\": 2.4045467376708984, \"Time in s\": 58.748532 }, { \"step\": 575, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8763066202090593, \"F1\": 0.8479657387580299, \"Memory in Mb\": 2.459569931030273, \"Time in s\": 64.055891 }, { \"step\": 600, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8764607679465777, \"F1\": 0.8451882845188285, \"Memory in Mb\": 2.459569931030273, \"Time in s\": 69.582297 }, { \"step\": 625, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8782051282051282, \"F1\": 0.8442622950819672, \"Memory in Mb\": 2.459615707397461, \"Time in s\": 75.26312 }, { \"step\": 650, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8813559322033898, \"F1\": 0.850485436893204, \"Memory in Mb\": 2.390268325805664, \"Time in s\": 81.075862 }, { \"step\": 675, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8798219584569733, \"F1\": 0.8513761467889909, \"Memory in Mb\": 2.665342330932617, \"Time in s\": 87.004325 }, { \"step\": 700, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8841201716738197, \"F1\": 0.8550983899821109, \"Memory in Mb\": 2.685148239135742, \"Time in s\": 93.043775 }, { \"step\": 725, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8825966850828729, \"F1\": 0.8556876061120544, \"Memory in Mb\": 2.988882064819336, \"Time in s\": 99.203547 }, { \"step\": 750, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8838451268357811, \"F1\": 0.8576104746317513, \"Memory in Mb\": 3.061452865600586, \"Time in s\": 105.474091 }, { \"step\": 775, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8850129198966409, \"F1\": 0.8585055643879173, \"Memory in Mb\": 3.171110153198242, \"Time in s\": 111.86121 }, { \"step\": 800, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8848560700876095, \"F1\": 0.8601823708206686, \"Memory in Mb\": 3.2535533905029297, \"Time in s\": 118.359722 }, { \"step\": 825, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8822815533980582, \"F1\": 0.8583941605839417, \"Memory in Mb\": 3.308439254760742, \"Time in s\": 124.969526 }, { \"step\": 850, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8845700824499411, \"F1\": 0.8607954545454546, \"Memory in Mb\": 3.3186397552490234, \"Time in s\": 131.697623 }, { \"step\": 875, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.88558352402746, \"F1\": 0.8611111111111112, \"Memory in Mb\": 3.2835521697998047, \"Time in s\": 138.541741 }, { \"step\": 900, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8843159065628476, \"F1\": 0.859078590785908, \"Memory in Mb\": 3.3748836517333984, \"Time in s\": 145.497394 }, { \"step\": 925, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8852813852813853, \"F1\": 0.8612565445026178, \"Memory in Mb\": 3.4775447845458984, \"Time in s\": 152.56592600000002 }, { \"step\": 950, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8861959957850368, \"F1\": 0.864321608040201, \"Memory in Mb\": 3.507909774780273, \"Time in s\": 159.751347 }, { \"step\": 975, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8880903490759754, \"F1\": 0.8665850673194614, \"Memory in Mb\": 3.56281852722168, \"Time in s\": 167.044698 }, { \"step\": 1000, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8888888888888888, \"F1\": 0.867699642431466, \"Memory in Mb\": 3.645017623901367, \"Time in s\": 174.44997500000002 }, { \"step\": 1025, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.888671875, \"F1\": 0.8680555555555557, \"Memory in Mb\": 3.708791732788086, \"Time in s\": 181.96372800000003 }, { \"step\": 1050, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8903717826501429, \"F1\": 0.8706411698537682, \"Memory in Mb\": 3.783597946166992, \"Time in s\": 189.590748 }, { \"step\": 1075, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8910614525139665, \"F1\": 0.8724100327153763, \"Memory in Mb\": 3.893186569213867, \"Time in s\": 197.32835100000003 }, { \"step\": 1100, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8926296633303002, \"F1\": 0.8744680851063831, \"Memory in Mb\": 3.893209457397461, \"Time in s\": 205.17488800000004 }, { \"step\": 1125, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.891459074733096, \"F1\": 0.8742268041237113, \"Memory in Mb\": 3.893209457397461, \"Time in s\": 213.13117300000005 }, { \"step\": 1150, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8929503916449086, \"F1\": 0.8758829465186679, \"Memory in Mb\": 3.893255233764648, \"Time in s\": 221.19416900000004 }, { \"step\": 1175, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8918228279386712, \"F1\": 0.874381800197824, \"Memory in Mb\": 3.8934383392333984, \"Time in s\": 229.37121900000005 }, { \"step\": 1200, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8932443703085905, \"F1\": 0.8757281553398059, \"Memory in Mb\": 3.867467880249024, \"Time in s\": 237.66201400000008 }, { \"step\": 1225, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8946078431372549, \"F1\": 0.8772597526165558, \"Memory in Mb\": 3.903593063354492, \"Time in s\": 246.05749800000007 }, { \"step\": 1250, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8951160928742994, \"F1\": 0.8783658310120707, \"Memory in Mb\": 3.932668685913086, \"Time in s\": 254.56059300000007 }, { \"step\": 1903, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.1741485595703125, \"Time in s\": 4.196349 }, { \"step\": 3806, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.1747589111328125, \"Time in s\": 11.133978 }, { \"step\": 5709, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.1753692626953125, \"Time in s\": 20.515058 }, { \"step\": 7612, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.1753692626953125, \"Time in s\": 32.334394 }, { \"step\": 9515, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.1753692626953125, \"Time in s\": 46.585565 }, { \"step\": 11418, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.1759796142578125, \"Time in s\": 63.272850000000005 }, { \"step\": 13321, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.1759796142578125, \"Time in s\": 82.384207 }, { \"step\": 15224, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996058595546212, \"F1\": 0.625, \"Memory in Mb\": 0.4890699386596679, \"Time in s\": 104.078316 }, { \"step\": 17127, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996496554945696, \"F1\": 0.7000000000000001, \"Memory in Mb\": 0.4890470504760742, \"Time in s\": 128.646042 }, { \"step\": 19030, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999684691786221, \"F1\": 0.7000000000000001, \"Memory in Mb\": 0.4890470504760742, \"Time in s\": 156.063076 }, { \"step\": 20933, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997133575386968, \"F1\": 0.7000000000000001, \"Memory in Mb\": 0.4896574020385742, \"Time in s\": 186.340267 }, { \"step\": 22836, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999737245456536, \"F1\": 0.7000000000000001, \"Memory in Mb\": 0.4896574020385742, \"Time in s\": 219.481443 }, { \"step\": 24739, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997574581615328, \"F1\": 0.7000000000000001, \"Memory in Mb\": 0.4896574020385742, \"Time in s\": 255.483814 }, { \"step\": 26642, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997747832288578, \"F1\": 0.7272727272727273, \"Memory in Mb\": 0.4897031784057617, \"Time in s\": 294.351753 }, { \"step\": 28545, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999789798206278, \"F1\": 0.7272727272727273, \"Memory in Mb\": 0.4897031784057617, \"Time in s\": 336.077392 }, { \"step\": 30448, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998029362498768, \"F1\": 0.7272727272727273, \"Memory in Mb\": 0.4897031784057617, \"Time in s\": 380.664356 }, { \"step\": 32351, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998145285935084, \"F1\": 0.7272727272727273, \"Memory in Mb\": 0.4897031784057617, \"Time in s\": 428.135176 }, { \"step\": 34254, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998248328613552, \"F1\": 0.7272727272727273, \"Memory in Mb\": 0.4897031784057617, \"Time in s\": 478.442458 }, { \"step\": 36157, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998340524394292, \"F1\": 0.7272727272727273, \"Memory in Mb\": 0.4984426498413086, \"Time in s\": 531.6199799999999 }, { \"step\": 38060, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998423500354712, \"F1\": 0.7272727272727273, \"Memory in Mb\": 0.5112142562866211, \"Time in s\": 587.6570539999999 }, { \"step\": 39963, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998498573644964, \"F1\": 0.7272727272727273, \"Memory in Mb\": 0.5112142562866211, \"Time in s\": 646.546353 }, { \"step\": 41866, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998566821927624, \"F1\": 0.7272727272727273, \"Memory in Mb\": 0.5118017196655273, \"Time in s\": 708.303189 }, { \"step\": 43769, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998629135441418, \"F1\": 0.7272727272727273, \"Memory in Mb\": 0.5118017196655273, \"Time in s\": 772.911335 }, { \"step\": 45672, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998686256048696, \"F1\": 0.7272727272727273, \"Memory in Mb\": 0.5118017196655273, \"Time in s\": 840.389935 }, { \"step\": 47575, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998528608063229, \"F1\": 0.6956521739130435, \"Memory in Mb\": 0.6032171249389648, \"Time in s\": 910.729159 }, { \"step\": 49478, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.99985852012046, \"F1\": 0.6956521739130435, \"Memory in Mb\": 0.6032171249389648, \"Time in s\": 983.924468 }, { \"step\": 51381, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998248345659788, \"F1\": 0.6666666666666666, \"Memory in Mb\": 0.6488561630249023, \"Time in s\": 1059.968701 }, { \"step\": 53284, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999831090591746, \"F1\": 0.6666666666666666, \"Memory in Mb\": 0.6488561630249023, \"Time in s\": 1138.856016 }, { \"step\": 55187, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999836915159642, \"F1\": 0.6666666666666666, \"Memory in Mb\": 0.6487874984741211, \"Time in s\": 1220.600051 }, { \"step\": 57090, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997197358510396, \"F1\": 0.5789473684210525, \"Memory in Mb\": 0.915858268737793, \"Time in s\": 1305.292865 }, { \"step\": 58993, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997118253322484, \"F1\": 0.5641025641025641, \"Memory in Mb\": 0.9158124923706056, \"Time in s\": 1392.9564999999998 }, { \"step\": 60896, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997208309385008, \"F1\": 0.5641025641025641, \"Memory in Mb\": 0.9158353805541992, \"Time in s\": 1483.5784209999997 }, { \"step\": 62799, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996974425937132, \"F1\": 0.5365853658536585, \"Memory in Mb\": 1.0681943893432615, \"Time in s\": 1577.1896599999998 }, { \"step\": 64702, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997063414784934, \"F1\": 0.5365853658536585, \"Memory in Mb\": 1.0681486129760742, \"Time in s\": 1673.7731769999998 }, { \"step\": 66605, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999714731847937, \"F1\": 0.5365853658536585, \"Memory in Mb\": 1.0681257247924805, \"Time in s\": 1773.3049339999998 }, { \"step\": 68508, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997226560789408, \"F1\": 0.5777777777777777, \"Memory in Mb\": 1.0767507553100586, \"Time in s\": 1875.807102 }, { \"step\": 70411, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997301519670502, \"F1\": 0.5777777777777777, \"Memory in Mb\": 1.0766592025756836, \"Time in s\": 1981.276839 }, { \"step\": 72314, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997372533292768, \"F1\": 0.5777777777777777, \"Memory in Mb\": 1.0766363143920898, \"Time in s\": 2089.690205 }, { \"step\": 74217, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997439905141748, \"F1\": 0.5777777777777777, \"Memory in Mb\": 1.183516502380371, \"Time in s\": 2201.064573 }, { \"step\": 76120, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997503908354024, \"F1\": 0.5777777777777777, \"Memory in Mb\": 1.1835393905639648, \"Time in s\": 2315.37923 }, { \"step\": 78023, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999756478941837, \"F1\": 0.5777777777777777, \"Memory in Mb\": 1.183516502380371, \"Time in s\": 2432.635431 }, { \"step\": 79926, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999762277134814, \"F1\": 0.5777777777777777, \"Memory in Mb\": 1.1834936141967771, \"Time in s\": 2552.8498939999995 }, { \"step\": 81829, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997678056411008, \"F1\": 0.5777777777777777, \"Memory in Mb\": 1.183516502380371, \"Time in s\": 2676.010932 }, { \"step\": 83732, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997730828486464, \"F1\": 0.5777777777777777, \"Memory in Mb\": 1.184126853942871, \"Time in s\": 2802.105151 }, { \"step\": 85635, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997781255108952, \"F1\": 0.5777777777777777, \"Memory in Mb\": 1.1841497421264648, \"Time in s\": 2931.145124 }, { \"step\": 87538, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997829489244549, \"F1\": 0.5777777777777777, \"Memory in Mb\": 1.1841497421264648, \"Time in s\": 3063.115766 }, { \"step\": 89441, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999765205724508, \"F1\": 0.5531914893617021, \"Memory in Mb\": 1.323287010192871, \"Time in s\": 3198.03692 }, { \"step\": 91344, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997700973254656, \"F1\": 0.5531914893617021, \"Memory in Mb\": 1.3272314071655271, \"Time in s\": 3335.914266 }, { \"step\": 93247, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997747892671, \"F1\": 0.5531914893617021, \"Memory in Mb\": 1.3272314071655271, \"Time in s\": 3476.72485 }, { \"step\": 95150, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997792935290964, \"F1\": 0.5531914893617021, \"Memory in Mb\": 1.327254295349121, \"Time in s\": 3620.355738 }, { \"step\": 106, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6, \"F1\": 0.5434782608695652, \"Memory in Mb\": 0.7756900787353516, \"Time in s\": 0.335809 }, { \"step\": 212, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Bananas\", \"Accuracy\": 0.7251184834123223, \"F1\": 0.6881720430107526, \"Memory in Mb\": 1.166391372680664, \"Time in s\": 1.061098 }, { \"step\": 318, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Bananas\", \"Accuracy\": 0.7539432176656151, \"F1\": 0.7253521126760563, \"Memory in Mb\": 1.6065692901611328, \"Time in s\": 2.189762 }, { \"step\": 424, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Bananas\", \"Accuracy\": 0.7825059101654847, \"F1\": 0.7566137566137565, \"Memory in Mb\": 2.022294044494629, \"Time in s\": 3.739465 }, { \"step\": 530, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Bananas\", \"Accuracy\": 0.7958412098298677, \"F1\": 0.7631578947368421, \"Memory in Mb\": 2.4158077239990234, \"Time in s\": 5.557825 }, { \"step\": 636, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Bananas\", \"Accuracy\": 0.7937007874015748, \"F1\": 0.7622504537205083, \"Memory in Mb\": 2.785458564758301, \"Time in s\": 7.636877 }, { \"step\": 742, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8029689608636977, \"F1\": 0.7675159235668789, \"Memory in Mb\": 3.1855859756469727, \"Time in s\": 9.989946 }, { \"step\": 848, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8110979929161747, \"F1\": 0.7790055248618785, \"Memory in Mb\": 3.637519836425781, \"Time in s\": 12.613619 }, { \"step\": 954, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8163693599160545, \"F1\": 0.7836835599505564, \"Memory in Mb\": 3.940545082092285, \"Time in s\": 15.512474 }, { \"step\": 1060, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8243626062322946, \"F1\": 0.7905405405405406, \"Memory in Mb\": 4.2790374755859375, \"Time in s\": 18.684266 }, { \"step\": 1166, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8240343347639485, \"F1\": 0.790602655771195, \"Memory in Mb\": 4.743890762329102, \"Time in s\": 22.147393 }, { \"step\": 1272, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8253343823760818, \"F1\": 0.7936802973977696, \"Memory in Mb\": 5.073901176452637, \"Time in s\": 25.9097 }, { \"step\": 1378, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8271604938271605, \"F1\": 0.7941176470588235, \"Memory in Mb\": 5.443793296813965, \"Time in s\": 29.969434 }, { \"step\": 1484, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8293998651382333, \"F1\": 0.7967871485943775, \"Memory in Mb\": 5.808208465576172, \"Time in s\": 34.339834 }, { \"step\": 1590, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8313404657016992, \"F1\": 0.7994011976047903, \"Memory in Mb\": 6.233636856079102, \"Time in s\": 39.030108 }, { \"step\": 1696, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8348082595870207, \"F1\": 0.8030942334739802, \"Memory in Mb\": 6.672585487365723, \"Time in s\": 44.033544 }, { \"step\": 1802, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8384230982787341, \"F1\": 0.8094302554027505, \"Memory in Mb\": 7.1109819412231445, \"Time in s\": 49.366899 }, { \"step\": 1908, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8363922391190352, \"F1\": 0.8095238095238096, \"Memory in Mb\": 7.617318153381348, \"Time in s\": 55.036325 }, { \"step\": 2014, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8335817188276204, \"F1\": 0.8080229226361031, \"Memory in Mb\": 8.179092407226562, \"Time in s\": 61.04992 }, { \"step\": 2120, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8357715903728173, \"F1\": 0.8127018299246501, \"Memory in Mb\": 8.54446792602539, \"Time in s\": 67.397947 }, { \"step\": 2226, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8368539325842697, \"F1\": 0.8143222506393862, \"Memory in Mb\": 9.00230598449707, \"Time in s\": 74.092607 }, { \"step\": 2332, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8365508365508365, \"F1\": 0.8142369575816674, \"Memory in Mb\": 9.503581047058104, \"Time in s\": 81.1143 }, { \"step\": 2438, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8379154698399671, \"F1\": 0.8160223567768979, \"Memory in Mb\": 9.875147819519045, \"Time in s\": 88.456785 }, { \"step\": 2544, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8375933936295714, \"F1\": 0.816525988449578, \"Memory in Mb\": 10.285362243652344, \"Time in s\": 96.129153 }, { \"step\": 2650, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8369195922989807, \"F1\": 0.8161702127659575, \"Memory in Mb\": 10.684521675109863, \"Time in s\": 104.109815 }, { \"step\": 2756, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8381125226860254, \"F1\": 0.8176614881439086, \"Memory in Mb\": 11.088122367858888, \"Time in s\": 112.408624 }, { \"step\": 2862, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8392170569730864, \"F1\": 0.8184688239936858, \"Memory in Mb\": 11.42569065093994, \"Time in s\": 121.031545 }, { \"step\": 2968, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Bananas\", \"Accuracy\": 0.840242669362993, \"F1\": 0.8188073394495413, \"Memory in Mb\": 11.86772918701172, \"Time in s\": 129.980691 }, { \"step\": 3074, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8395704523267166, \"F1\": 0.8186833394630378, \"Memory in Mb\": 12.388784408569336, \"Time in s\": 139.271806 }, { \"step\": 3180, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8417741428122051, \"F1\": 0.8205494113449874, \"Memory in Mb\": 12.724870681762695, \"Time in s\": 148.909629 }, { \"step\": 3286, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8423135464231355, \"F1\": 0.8207612456747405, \"Memory in Mb\": 13.150970458984377, \"Time in s\": 158.896732 }, { \"step\": 3392, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8428192273665586, \"F1\": 0.8221554888221555, \"Memory in Mb\": 13.55066967010498, \"Time in s\": 169.223096 }, { \"step\": 3498, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8441521303974836, \"F1\": 0.8228794280142996, \"Memory in Mb\": 13.954619407653809, \"Time in s\": 179.903449 }, { \"step\": 3604, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8454066056064391, \"F1\": 0.8234548335974643, \"Memory in Mb\": 14.297491073608398, \"Time in s\": 190.93427 }, { \"step\": 3710, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Bananas\", \"Accuracy\": 0.845510919385279, \"F1\": 0.8241791960724149, \"Memory in Mb\": 14.779010772705078, \"Time in s\": 202.327208 }, { \"step\": 3816, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8453473132372215, \"F1\": 0.8241954707985697, \"Memory in Mb\": 15.263079643249512, \"Time in s\": 214.078215 }, { \"step\": 3922, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8454475899005356, \"F1\": 0.8239395700174318, \"Memory in Mb\": 15.65628433227539, \"Time in s\": 226.208072 }, { \"step\": 4028, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8435559970201142, \"F1\": 0.8216308040770102, \"Memory in Mb\": 16.25907039642334, \"Time in s\": 238.717005 }, { \"step\": 4134, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8439390273409146, \"F1\": 0.8220689655172413, \"Memory in Mb\": 16.753341674804688, \"Time in s\": 251.598926 }, { \"step\": 4240, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8461901391837697, \"F1\": 0.8249194414607949, \"Memory in Mb\": 17.15353012084961, \"Time in s\": 264.978021 }, { \"step\": 4346, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8474108170310702, \"F1\": 0.8263033796175006, \"Memory in Mb\": 17.548202514648438, \"Time in s\": 278.744972 }, { \"step\": 4452, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8467760053920468, \"F1\": 0.8253968253968255, \"Memory in Mb\": 17.966373443603516, \"Time in s\": 292.921574 }, { \"step\": 4558, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Bananas\", \"Accuracy\": 0.847267939433838, \"F1\": 0.8265204386839482, \"Memory in Mb\": 18.466463088989254, \"Time in s\": 307.49799799999994 }, { \"step\": 4664, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8475230538280077, \"F1\": 0.8273014330823415, \"Memory in Mb\": 18.88692092895508, \"Time in s\": 322.48837999999995 }, { \"step\": 4770, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8483958901237156, \"F1\": 0.828143570240076, \"Memory in Mb\": 19.283724784851078, \"Time in s\": 337.9195859999999 }, { \"step\": 4876, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8488205128205129, \"F1\": 0.8279243520896566, \"Memory in Mb\": 19.64915180206299, \"Time in s\": 353.77839399999993 }, { \"step\": 4982, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8496285886368199, \"F1\": 0.8291904218928163, \"Memory in Mb\": 19.982958793640137, \"Time in s\": 370.058467 }, { \"step\": 5088, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Bananas\", \"Accuracy\": 0.850206408492235, \"F1\": 0.829682610639249, \"Memory in Mb\": 20.404964447021484, \"Time in s\": 386.77508599999993 }, { \"step\": 5194, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8501829385711535, \"F1\": 0.8296847635726795, \"Memory in Mb\": 20.84154510498047, \"Time in s\": 403.931326 }, { \"step\": 5300, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8503491224759389, \"F1\": 0.8299378082779326, \"Memory in Mb\": 21.28391456604004, \"Time in s\": 421.539661 }, { \"step\": 906, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9082872928176796, \"F1\": 0.905788876276958, \"Memory in Mb\": 2.982789993286133, \"Time in s\": 3.516385 }, { \"step\": 1812, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9276642738818331, \"F1\": 0.910089224433768, \"Memory in Mb\": 5.035035133361816, \"Time in s\": 9.67052 }, { \"step\": 2718, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9193963930806036, \"F1\": 0.9004092769440655, \"Memory in Mb\": 7.41602611541748, \"Time in s\": 18.410774 }, { \"step\": 3624, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9213359094672924, \"F1\": 0.9030941856511392, \"Memory in Mb\": 8.590222358703613, \"Time in s\": 29.484026 }, { \"step\": 4530, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Elec2\", \"Accuracy\": 0.919629057187017, \"F1\": 0.896825396825397, \"Memory in Mb\": 10.053829193115234, \"Time in s\": 42.932107 }, { \"step\": 5436, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9164673413063478, \"F1\": 0.8929245283018867, \"Memory in Mb\": 11.804065704345703, \"Time in s\": 58.899071000000006 }, { \"step\": 6342, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9165746727645482, \"F1\": 0.8945585010962727, \"Memory in Mb\": 14.095972061157228, \"Time in s\": 77.41200400000001 }, { \"step\": 7248, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9136194287291292, \"F1\": 0.8907885554780182, \"Memory in Mb\": 16.2540225982666, \"Time in s\": 98.532638 }, { \"step\": 8154, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9135287624187416, \"F1\": 0.8957254843957995, \"Memory in Mb\": 18.652454376220703, \"Time in s\": 122.39659 }, { \"step\": 9060, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9157743680317916, \"F1\": 0.9006122183144457, \"Memory in Mb\": 20.98922061920166, \"Time in s\": 148.829731 }, { \"step\": 9966, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9175112895132964, \"F1\": 0.9043741275011632, \"Memory in Mb\": 21.67603302001953, \"Time in s\": 177.92592100000002 }, { \"step\": 10872, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9189586974519364, \"F1\": 0.9074093536521284, \"Memory in Mb\": 23.620224952697757, \"Time in s\": 209.60222600000003 }, { \"step\": 11778, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9187399167869577, \"F1\": 0.907222491517208, \"Memory in Mb\": 26.53412914276123, \"Time in s\": 243.98565400000004 }, { \"step\": 12684, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9176062445793582, \"F1\": 0.906084299451784, \"Memory in Mb\": 30.32783317565918, \"Time in s\": 281.19943900000004 }, { \"step\": 13590, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9178747516373538, \"F1\": 0.9076464746772592, \"Memory in Mb\": 30.98683452606201, \"Time in s\": 321.28235000000006 }, { \"step\": 14496, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Elec2\", \"Accuracy\": 0.917764746464298, \"F1\": 0.9080388828884433, \"Memory in Mb\": 33.47672271728516, \"Time in s\": 364.2111060000001 }, { \"step\": 15402, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9181221998571522, \"F1\": 0.9079091506609216, \"Memory in Mb\": 36.0946741104126, \"Time in s\": 409.94528800000006 }, { \"step\": 16308, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9156803826577544, \"F1\": 0.90483770503149, \"Memory in Mb\": 39.82530307769776, \"Time in s\": 458.76832800000005 }, { \"step\": 17214, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9136118050310812, \"F1\": 0.9010184383944618, \"Memory in Mb\": 31.60810947418213, \"Time in s\": 510.754243 }, { \"step\": 18120, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9136265798333242, \"F1\": 0.9008803597441256, \"Memory in Mb\": 29.540003776550293, \"Time in s\": 565.7272700000001 }, { \"step\": 19026, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9144809461235216, \"F1\": 0.9008350094471872, \"Memory in Mb\": 30.251863479614254, \"Time in s\": 623.5764490000001 }, { \"step\": 19932, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9137022728413025, \"F1\": 0.9008188213585516, \"Memory in Mb\": 34.67849349975586, \"Time in s\": 684.5888420000001 }, { \"step\": 20838, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9112156260498152, \"F1\": 0.8982286280118825, \"Memory in Mb\": 25.08942031860352, \"Time in s\": 748.7394940000001 }, { \"step\": 21744, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9108678655199376, \"F1\": 0.8963414634146342, \"Memory in Mb\": 28.645745277404785, \"Time in s\": 815.8674180000002 }, { \"step\": 22650, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Elec2\", \"Accuracy\": 0.910062254404168, \"F1\": 0.8947232415111892, \"Memory in Mb\": 32.00656318664551, \"Time in s\": 886.1270340000001 }, { \"step\": 23556, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9082148163871788, \"F1\": 0.8923306772908367, \"Memory in Mb\": 29.995322227478027, \"Time in s\": 959.605197 }, { \"step\": 24462, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9068312824496136, \"F1\": 0.8901633813677768, \"Memory in Mb\": 31.142220497131348, \"Time in s\": 1036.2892310000002 }, { \"step\": 25368, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Elec2\", \"Accuracy\": 0.904876414238972, \"F1\": 0.8880745860197596, \"Memory in Mb\": 17.65717887878418, \"Time in s\": 1115.9640360000003 }, { \"step\": 26274, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9052639591976552, \"F1\": 0.8883004981375936, \"Memory in Mb\": 19.38848114013672, \"Time in s\": 1198.3281000000004 }, { \"step\": 27180, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Elec2\", \"Accuracy\": 0.905331321976526, \"F1\": 0.8887639963685098, \"Memory in Mb\": 20.18451499938965, \"Time in s\": 1283.4271300000005 }, { \"step\": 28086, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9040769093822324, \"F1\": 0.886768661735037, \"Memory in Mb\": 24.22576713562012, \"Time in s\": 1371.4128720000006 }, { \"step\": 28992, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9030388741333516, \"F1\": 0.8849883392659875, \"Memory in Mb\": 26.1504430770874, \"Time in s\": 1462.6499510000006 }, { \"step\": 29898, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9022644412482858, \"F1\": 0.8837708830548926, \"Memory in Mb\": 31.19912052154541, \"Time in s\": 1557.0334400000006 }, { \"step\": 30804, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9016004934584294, \"F1\": 0.8822684016313848, \"Memory in Mb\": 35.062607765197754, \"Time in s\": 1654.6421970000006 }, { \"step\": 31710, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9002491406225361, \"F1\": 0.8805739097602416, \"Memory in Mb\": 34.509202003479004, \"Time in s\": 1755.5826140000006 }, { \"step\": 32616, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8993407941131382, \"F1\": 0.8797127468581688, \"Memory in Mb\": 39.23386001586914, \"Time in s\": 1859.7925240000009 }, { \"step\": 33522, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8986307091077235, \"F1\": 0.879040296169728, \"Memory in Mb\": 43.53414344787598, \"Time in s\": 1967.3677710000009 }, { \"step\": 34428, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8976384814244633, \"F1\": 0.8779440288168467, \"Memory in Mb\": 46.41888236999512, \"Time in s\": 2078.506307000001 }, { \"step\": 35334, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8964990235756941, \"F1\": 0.8759119134063996, \"Memory in Mb\": 47.29628944396973, \"Time in s\": 2193.0138890000007 }, { \"step\": 36240, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8963271613455117, \"F1\": 0.874812568724801, \"Memory in Mb\": 50.92376518249512, \"Time in s\": 2310.752343000001 }, { \"step\": 37146, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8956252523892853, \"F1\": 0.8736433855881107, \"Memory in Mb\": 52.94390201568604, \"Time in s\": 2431.8466670000007 }, { \"step\": 38052, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8955612204672676, \"F1\": 0.8730756946662408, \"Memory in Mb\": 54.53785705566406, \"Time in s\": 2556.3609540000007 }, { \"step\": 38958, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Elec2\", \"Accuracy\": 0.895320481556588, \"F1\": 0.8731176104542625, \"Memory in Mb\": 58.273138999938965, \"Time in s\": 2684.2112230000007 }, { \"step\": 39864, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8957178335800116, \"F1\": 0.8745056603773586, \"Memory in Mb\": 61.72060203552246, \"Time in s\": 2815.6197070000007 }, { \"step\": 40770, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8957541269101523, \"F1\": 0.8755198875285573, \"Memory in Mb\": 62.47746276855469, \"Time in s\": 2950.394142000001 }, { \"step\": 41676, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8962447510497901, \"F1\": 0.8769003017707682, \"Memory in Mb\": 58.14933300018311, \"Time in s\": 3088.5413130000006 }, { \"step\": 42582, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8967614663817195, \"F1\": 0.8777598576274956, \"Memory in Mb\": 60.56365966796875, \"Time in s\": 3230.0396030000006 }, { \"step\": 43488, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Elec2\", \"Accuracy\": 0.896612780831053, \"F1\": 0.8775932480261367, \"Memory in Mb\": 59.69151401519776, \"Time in s\": 3375.0306610000007 }, { \"step\": 44394, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8962223774018426, \"F1\": 0.8767423816785724, \"Memory in Mb\": 64.22966861724854, \"Time in s\": 3523.5486860000005 }, { \"step\": 45300, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8967968387823131, \"F1\": 0.8776210046857412, \"Memory in Mb\": 42.88050365447998, \"Time in s\": 3675.4418880000007 }, { \"step\": 25, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Phishing\", \"Accuracy\": 0.6666666666666666, \"F1\": 0.7142857142857143, \"Memory in Mb\": 0.5762147903442383, \"Time in s\": 0.119361 }, { \"step\": 50, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Phishing\", \"Accuracy\": 0.7551020408163265, \"F1\": 0.7391304347826088, \"Memory in Mb\": 0.6475057601928711, \"Time in s\": 0.366938 }, { \"step\": 75, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Phishing\", \"Accuracy\": 0.7837837837837838, \"F1\": 0.7777777777777778, \"Memory in Mb\": 0.9396762847900392, \"Time in s\": 0.745521 }, { \"step\": 100, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8080808080808081, \"F1\": 0.7999999999999999, \"Memory in Mb\": 1.1059551239013672, \"Time in s\": 1.266772 }, { \"step\": 125, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8145161290322581, \"F1\": 0.8067226890756303, \"Memory in Mb\": 1.2883186340332031, \"Time in s\": 1.944005 }, { \"step\": 150, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8187919463087249, \"F1\": 0.8187919463087249, \"Memory in Mb\": 1.3393936157226562, \"Time in s\": 2.776317 }, { \"step\": 175, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8390804597701149, \"F1\": 0.8292682926829268, \"Memory in Mb\": 1.354720115661621, \"Time in s\": 3.764387 }, { \"step\": 200, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8391959798994975, \"F1\": 0.8297872340425532, \"Memory in Mb\": 1.492502212524414, \"Time in s\": 4.90761 }, { \"step\": 225, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Phishing\", \"Accuracy\": 0.84375, \"F1\": 0.8309178743961353, \"Memory in Mb\": 1.6093759536743164, \"Time in s\": 6.207708 }, { \"step\": 250, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8353413654618473, \"F1\": 0.8225108225108225, \"Memory in Mb\": 1.7083539962768557, \"Time in s\": 7.674896 }, { \"step\": 275, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8394160583941606, \"F1\": 0.8253968253968254, \"Memory in Mb\": 1.7150201797485352, \"Time in s\": 9.301078 }, { \"step\": 300, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Phishing\", \"Accuracy\": 0.842809364548495, \"F1\": 0.825278810408922, \"Memory in Mb\": 1.782989501953125, \"Time in s\": 11.084076 }, { \"step\": 325, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8518518518518519, \"F1\": 0.8309859154929577, \"Memory in Mb\": 1.8577651977539065, \"Time in s\": 13.027607 }, { \"step\": 350, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8567335243553008, \"F1\": 0.8344370860927152, \"Memory in Mb\": 1.8672637939453125, \"Time in s\": 15.126796 }, { \"step\": 375, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8529411764705882, \"F1\": 0.8286604361370716, \"Memory in Mb\": 2.0530452728271484, \"Time in s\": 17.394419 }, { \"step\": 400, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8546365914786967, \"F1\": 0.8284023668639053, \"Memory in Mb\": 2.076310157775879, \"Time in s\": 19.823605 }, { \"step\": 425, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8584905660377359, \"F1\": 0.8295454545454545, \"Memory in Mb\": 2.0878963470458984, \"Time in s\": 22.419713 }, { \"step\": 450, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8596881959910914, \"F1\": 0.8292682926829269, \"Memory in Mb\": 2.0654611587524414, \"Time in s\": 25.175931 }, { \"step\": 475, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8649789029535865, \"F1\": 0.8383838383838383, \"Memory in Mb\": 2.202821731567383, \"Time in s\": 28.096353 }, { \"step\": 500, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8677354709418837, \"F1\": 0.8443396226415094, \"Memory in Mb\": 2.377251625061035, \"Time in s\": 31.184831 }, { \"step\": 525, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8702290076335878, \"F1\": 0.8447488584474886, \"Memory in Mb\": 2.37432861328125, \"Time in s\": 34.435066 }, { \"step\": 550, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8706739526411658, \"F1\": 0.8466522678185745, \"Memory in Mb\": 2.44970703125, \"Time in s\": 37.85251 }, { \"step\": 575, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Phishing\", \"Accuracy\": 0.872822299651568, \"F1\": 0.8488612836438924, \"Memory in Mb\": 2.5103416442871094, \"Time in s\": 41.431292 }, { \"step\": 600, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8764607679465777, \"F1\": 0.8508064516129032, \"Memory in Mb\": 2.478057861328125, \"Time in s\": 45.177182 }, { \"step\": 625, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Phishing\", \"Accuracy\": 0.875, \"F1\": 0.8464566929133858, \"Memory in Mb\": 2.529691696166992, \"Time in s\": 49.08745 }, { \"step\": 650, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8782742681047766, \"F1\": 0.8528864059590316, \"Memory in Mb\": 2.6017770767211914, \"Time in s\": 53.16255700000001 }, { \"step\": 675, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8783382789317508, \"F1\": 0.856140350877193, \"Memory in Mb\": 2.631270408630371, \"Time in s\": 57.40937600000001 }, { \"step\": 700, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Phishing\", \"Accuracy\": 0.882689556509299, \"F1\": 0.8595890410958904, \"Memory in Mb\": 2.6406030654907227, \"Time in s\": 61.81678500000001 }, { \"step\": 725, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8825966850828729, \"F1\": 0.8617886178861789, \"Memory in Mb\": 2.701584815979004, \"Time in s\": 66.39355 }, { \"step\": 750, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8851802403204272, \"F1\": 0.8652037617554857, \"Memory in Mb\": 2.789071083068848, \"Time in s\": 71.086949 }, { \"step\": 775, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8863049095607235, \"F1\": 0.8658536585365854, \"Memory in Mb\": 2.950723648071289, \"Time in s\": 75.87878400000001 }, { \"step\": 800, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Phishing\", \"Accuracy\": 0.886107634543179, \"F1\": 0.8671532846715327, \"Memory in Mb\": 2.9481277465820312, \"Time in s\": 80.77073000000001 }, { \"step\": 825, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8871359223300971, \"F1\": 0.869198312236287, \"Memory in Mb\": 3.217336654663086, \"Time in s\": 85.76636800000001 }, { \"step\": 850, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8881036513545347, \"F1\": 0.8696844993141291, \"Memory in Mb\": 3.2494144439697266, \"Time in s\": 90.85872600000002 }, { \"step\": 875, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8901601830663616, \"F1\": 0.8713136729222519, \"Memory in Mb\": 3.264657974243164, \"Time in s\": 96.047686 }, { \"step\": 900, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8887652947719689, \"F1\": 0.8694516971279374, \"Memory in Mb\": 3.388858795166016, \"Time in s\": 101.334025 }, { \"step\": 925, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8906926406926406, \"F1\": 0.8729559748427673, \"Memory in Mb\": 3.3625974655151367, \"Time in s\": 106.715104 }, { \"step\": 950, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8914646996838778, \"F1\": 0.875453446191052, \"Memory in Mb\": 3.5129919052124023, \"Time in s\": 112.201489 }, { \"step\": 975, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Phishing\", \"Accuracy\": 0.893223819301848, \"F1\": 0.8773584905660378, \"Memory in Mb\": 3.552186965942383, \"Time in s\": 117.78607 }, { \"step\": 1000, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8928928928928929, \"F1\": 0.8771526980482205, \"Memory in Mb\": 3.671197891235352, \"Time in s\": 123.468781 }, { \"step\": 1025, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Phishing\", \"Accuracy\": 0.892578125, \"F1\": 0.8772321428571428, \"Memory in Mb\": 3.734159469604492, \"Time in s\": 129.25173700000002 }, { \"step\": 1050, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Phishing\", \"Accuracy\": 0.894184938036225, \"F1\": 0.8794788273615636, \"Memory in Mb\": 3.775693893432617, \"Time in s\": 135.127992 }, { \"step\": 1075, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8929236499068901, \"F1\": 0.879074658254469, \"Memory in Mb\": 3.8186750411987305, \"Time in s\": 141.101233 }, { \"step\": 1100, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8944494995450409, \"F1\": 0.8809034907597535, \"Memory in Mb\": 3.859647750854492, \"Time in s\": 147.172282 }, { \"step\": 1125, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8959074733096085, \"F1\": 0.8835820895522387, \"Memory in Mb\": 3.8923940658569336, \"Time in s\": 153.346878 }, { \"step\": 1150, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Phishing\", \"Accuracy\": 0.896431679721497, \"F1\": 0.8839024390243903, \"Memory in Mb\": 4.0131940841674805, \"Time in s\": 159.622141 }, { \"step\": 1175, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8952299829642248, \"F1\": 0.8822966507177035, \"Memory in Mb\": 4.097073554992676, \"Time in s\": 165.998758 }, { \"step\": 1200, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Phishing\", \"Accuracy\": 0.896580483736447, \"F1\": 0.8834586466165414, \"Memory in Mb\": 4.154815673828125, \"Time in s\": 172.479929 }, { \"step\": 1225, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8978758169934641, \"F1\": 0.8847926267281105, \"Memory in Mb\": 4.238761901855469, \"Time in s\": 179.059934 }, { \"step\": 1250, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Phishing\", \"Accuracy\": 0.899119295436349, \"F1\": 0.8866906474820143, \"Memory in Mb\": 4.319509506225586, \"Time in s\": 185.738258 }, { \"step\": 1903, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.2471084594726562, \"Time in s\": 2.767854 }, { \"step\": 3806, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.2488327026367187, \"Time in s\": 8.302014 }, { \"step\": 5709, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.2500534057617187, \"Time in s\": 16.382949 }, { \"step\": 7612, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.2495498657226562, \"Time in s\": 26.80983 }, { \"step\": 9515, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.2495498657226562, \"Time in s\": 39.580928 }, { \"step\": 11418, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.2512741088867187, \"Time in s\": 54.705622 }, { \"step\": 13321, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.2512741088867187, \"Time in s\": 72.18382 }, { \"step\": 15224, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996058595546212, \"F1\": 0.625, \"Memory in Mb\": 0.6267004013061523, \"Time in s\": 92.151846 }, { \"step\": 17127, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996496554945696, \"F1\": 0.7000000000000001, \"Memory in Mb\": 0.6147451400756836, \"Time in s\": 114.903201 }, { \"step\": 19030, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999684691786221, \"F1\": 0.7000000000000001, \"Memory in Mb\": 0.6187658309936523, \"Time in s\": 140.427577 }, { \"step\": 20933, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997133575386968, \"F1\": 0.7000000000000001, \"Memory in Mb\": 0.6200857162475586, \"Time in s\": 168.711887 }, { \"step\": 22836, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999737245456536, \"F1\": 0.7000000000000001, \"Memory in Mb\": 0.6196355819702148, \"Time in s\": 199.762171 }, { \"step\": 24739, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997574581615328, \"F1\": 0.7000000000000001, \"Memory in Mb\": 0.619715690612793, \"Time in s\": 233.587883 }, { \"step\": 26642, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999737247100334, \"F1\": 0.6666666666666666, \"Memory in Mb\": 0.656519889831543, \"Time in s\": 270.202649 }, { \"step\": 28545, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999754764573991, \"F1\": 0.6666666666666666, \"Memory in Mb\": 0.6566305160522461, \"Time in s\": 309.590328 }, { \"step\": 30448, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999770092291523, \"F1\": 0.6666666666666666, \"Memory in Mb\": 0.6724729537963867, \"Time in s\": 351.755182 }, { \"step\": 32351, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997836166924264, \"F1\": 0.6666666666666666, \"Memory in Mb\": 0.6681547164916992, \"Time in s\": 396.69715 }, { \"step\": 34254, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997956383382476, \"F1\": 0.6666666666666666, \"Memory in Mb\": 0.669642448425293, \"Time in s\": 444.422506 }, { \"step\": 36157, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998063945126672, \"F1\": 0.6666666666666666, \"Memory in Mb\": 0.6692113876342773, \"Time in s\": 494.922369 }, { \"step\": 38060, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998160750413831, \"F1\": 0.6666666666666666, \"Memory in Mb\": 0.6613035202026367, \"Time in s\": 548.2094599999999 }, { \"step\": 39963, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998248335919124, \"F1\": 0.6666666666666666, \"Memory in Mb\": 0.6618070602416992, \"Time in s\": 604.285405 }, { \"step\": 41866, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998327958915562, \"F1\": 0.6666666666666666, \"Memory in Mb\": 0.6630735397338867, \"Time in s\": 663.137659 }, { \"step\": 43769, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998400658014988, \"F1\": 0.6666666666666666, \"Memory in Mb\": 0.6625699996948242, \"Time in s\": 724.773781 }, { \"step\": 45672, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999846729872348, \"F1\": 0.6666666666666666, \"Memory in Mb\": 0.6625699996948242, \"Time in s\": 789.184255 }, { \"step\": 47575, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998528608063229, \"F1\": 0.6666666666666666, \"Memory in Mb\": 0.7120122909545898, \"Time in s\": 856.394593 }, { \"step\": 49478, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"SMTP\", \"Accuracy\": 0.99985852012046, \"F1\": 0.6666666666666666, \"Memory in Mb\": 0.7120351791381836, \"Time in s\": 926.380828 }, { \"step\": 51381, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998442973919812, \"F1\": 0.6666666666666666, \"Memory in Mb\": 0.741633415222168, \"Time in s\": 999.155992 }, { \"step\": 53284, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998498583037742, \"F1\": 0.6666666666666666, \"Memory in Mb\": 0.7417364120483398, \"Time in s\": 1074.696379 }, { \"step\": 55187, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998550356974596, \"F1\": 0.6666666666666666, \"Memory in Mb\": 0.7564992904663086, \"Time in s\": 1153.024047 }, { \"step\": 57090, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997022193417296, \"F1\": 0.5142857142857143, \"Memory in Mb\": 1.0212621688842771, \"Time in s\": 1234.302477 }, { \"step\": 58993, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997118253322484, \"F1\": 0.5142857142857143, \"Memory in Mb\": 1.0188016891479492, \"Time in s\": 1318.479479 }, { \"step\": 60896, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997208309385008, \"F1\": 0.5142857142857143, \"Memory in Mb\": 1.0275907516479492, \"Time in s\": 1405.549891 }, { \"step\": 62799, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996974425937132, \"F1\": 0.4864864864864864, \"Memory in Mb\": 1.2421979904174805, \"Time in s\": 1495.555588 }, { \"step\": 64702, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997063414784934, \"F1\": 0.4864864864864864, \"Memory in Mb\": 1.258589744567871, \"Time in s\": 1588.500286 }, { \"step\": 66605, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999714731847937, \"F1\": 0.4864864864864864, \"Memory in Mb\": 1.266993522644043, \"Time in s\": 1684.370287 }, { \"step\": 68508, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997226560789408, \"F1\": 0.5365853658536586, \"Memory in Mb\": 1.3167448043823242, \"Time in s\": 1783.171155 }, { \"step\": 70411, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997301519670502, \"F1\": 0.5365853658536586, \"Memory in Mb\": 1.3264188766479492, \"Time in s\": 1884.908958 }, { \"step\": 72314, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997372533292768, \"F1\": 0.5365853658536586, \"Memory in Mb\": 1.3260221481323242, \"Time in s\": 1989.570925 }, { \"step\": 74217, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997439905141748, \"F1\": 0.5365853658536586, \"Memory in Mb\": 1.342616081237793, \"Time in s\": 2097.157749 }, { \"step\": 76120, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997503908354024, \"F1\": 0.5365853658536586, \"Memory in Mb\": 1.3624944686889648, \"Time in s\": 2207.674598 }, { \"step\": 78023, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999756478941837, \"F1\": 0.5365853658536586, \"Memory in Mb\": 1.357996940612793, \"Time in s\": 2321.128083 }, { \"step\": 79926, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999762277134814, \"F1\": 0.5365853658536586, \"Memory in Mb\": 1.3575201034545898, \"Time in s\": 2437.510157 }, { \"step\": 81829, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997678056411008, \"F1\": 0.5365853658536586, \"Memory in Mb\": 1.3575468063354492, \"Time in s\": 2556.8217050000003 }, { \"step\": 83732, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997730828486464, \"F1\": 0.5365853658536586, \"Memory in Mb\": 1.3463621139526367, \"Time in s\": 2679.067711 }, { \"step\": 85635, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997781255108952, \"F1\": 0.5365853658536586, \"Memory in Mb\": 1.3506765365600586, \"Time in s\": 2804.239644 }, { \"step\": 87538, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997829489244549, \"F1\": 0.5365853658536586, \"Memory in Mb\": 1.350123405456543, \"Time in s\": 2932.339248 }, { \"step\": 89441, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997763864042932, \"F1\": 0.5238095238095238, \"Memory in Mb\": 1.4778623580932615, \"Time in s\": 3063.373685 }, { \"step\": 91344, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999781045071872, \"F1\": 0.5238095238095238, \"Memory in Mb\": 1.489375114440918, \"Time in s\": 3197.337821 }, { \"step\": 93247, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997855135877142, \"F1\": 0.5238095238095238, \"Memory in Mb\": 1.5083913803100586, \"Time in s\": 3334.235023 }, { \"step\": 95150, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997898033610444, \"F1\": 0.5238095238095238, \"Memory in Mb\": 1.5167646408081057, \"Time in s\": 3474.068956 }, { \"step\": 106, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Bananas\", \"Accuracy\": 0.7142857142857143, \"F1\": 0.6590909090909091, \"Memory in Mb\": 0.0813856124877929, \"Time in s\": 0.066609 }, { \"step\": 212, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Bananas\", \"Accuracy\": 0.7819905213270142, \"F1\": 0.7444444444444445, \"Memory in Mb\": 0.0819120407104492, \"Time in s\": 0.219203 }, { \"step\": 318, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Bananas\", \"Accuracy\": 0.7949526813880127, \"F1\": 0.7583643122676579, \"Memory in Mb\": 0.0815153121948242, \"Time in s\": 0.458077 }, { \"step\": 424, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Bananas\", \"Accuracy\": 0.806146572104019, \"F1\": 0.7696629213483147, \"Memory in Mb\": 0.0815153121948242, \"Time in s\": 0.7833680000000001 }, { \"step\": 530, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Bananas\", \"Accuracy\": 0.7977315689981096, \"F1\": 0.7446300715990454, \"Memory in Mb\": 0.0820188522338867, \"Time in s\": 1.194727 }, { \"step\": 636, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Bananas\", \"Accuracy\": 0.7984251968503937, \"F1\": 0.7460317460317459, \"Memory in Mb\": 0.0815153121948242, \"Time in s\": 1.6920620000000002 }, { \"step\": 742, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Bananas\", \"Accuracy\": 0.805668016194332, \"F1\": 0.75, \"Memory in Mb\": 0.0815153121948242, \"Time in s\": 2.274961 }, { \"step\": 848, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8110979929161747, \"F1\": 0.7597597597597598, \"Memory in Mb\": 0.0820188522338867, \"Time in s\": 2.9435640000000003 }, { \"step\": 954, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8174186778593914, \"F1\": 0.7661290322580646, \"Memory in Mb\": 0.0815153121948242, \"Time in s\": 3.697699 }, { \"step\": 1060, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8253068932955618, \"F1\": 0.774114774114774, \"Memory in Mb\": 0.0815153121948242, \"Time in s\": 4.5372580000000005 }, { \"step\": 1166, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8266094420600858, \"F1\": 0.7770419426048565, \"Memory in Mb\": 0.0820188522338867, \"Time in s\": 5.437299 }, { \"step\": 1272, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8284815106215578, \"F1\": 0.7811244979919679, \"Memory in Mb\": 0.0815153121948242, \"Time in s\": 6.385956 }, { \"step\": 1378, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8264342774146696, \"F1\": 0.7764265668849394, \"Memory in Mb\": 0.0820188522338867, \"Time in s\": 7.383082 }, { \"step\": 1484, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8287255563047876, \"F1\": 0.7795138888888888, \"Memory in Mb\": 0.0820188522338867, \"Time in s\": 8.428656 }, { \"step\": 1590, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Bananas\", \"Accuracy\": 0.830081812460667, \"F1\": 0.7822580645161291, \"Memory in Mb\": 0.0815153121948242, \"Time in s\": 9.522527 }, { \"step\": 1696, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8348082595870207, \"F1\": 0.7881996974281392, \"Memory in Mb\": 0.0820188522338867, \"Time in s\": 10.665041 }, { \"step\": 1802, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8367573570238757, \"F1\": 0.7929577464788733, \"Memory in Mb\": 0.0820188522338867, \"Time in s\": 11.856044 }, { \"step\": 1908, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8342947037231253, \"F1\": 0.7931937172774869, \"Memory in Mb\": 0.0815153121948242, \"Time in s\": 13.095519 }, { \"step\": 2014, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8301043219076006, \"F1\": 0.7901840490797546, \"Memory in Mb\": 0.0820188522338867, \"Time in s\": 14.384085 }, { \"step\": 2120, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8310523831996225, \"F1\": 0.7935409457900807, \"Memory in Mb\": 0.0820188522338867, \"Time in s\": 15.721188 }, { \"step\": 2226, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8301123595505618, \"F1\": 0.792535675082327, \"Memory in Mb\": 0.0815153121948242, \"Time in s\": 17.109526000000002 }, { \"step\": 2332, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8301158301158301, \"F1\": 0.792887029288703, \"Memory in Mb\": 0.0820188522338867, \"Time in s\": 18.546805000000003 }, { \"step\": 2438, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8297086581862946, \"F1\": 0.7921882824236354, \"Memory in Mb\": 0.0820188522338867, \"Time in s\": 20.032509 }, { \"step\": 2544, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8297286669288242, \"F1\": 0.7933174224343675, \"Memory in Mb\": 0.0815153121948242, \"Time in s\": 21.566602 }, { \"step\": 2650, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8289920724801813, \"F1\": 0.7932450935645824, \"Memory in Mb\": 0.0820188522338867, \"Time in s\": 23.149486 }, { \"step\": 2756, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8294010889292196, \"F1\": 0.7942206654991244, \"Memory in Mb\": 0.0820188522338867, \"Time in s\": 24.780488 }, { \"step\": 2862, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8304788535477106, \"F1\": 0.7949260042283298, \"Memory in Mb\": 0.0815153121948242, \"Time in s\": 26.460055 }, { \"step\": 2968, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8308055274688237, \"F1\": 0.7944307944307943, \"Memory in Mb\": 0.0820188522338867, \"Time in s\": 28.187982 }, { \"step\": 3074, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8285063455906281, \"F1\": 0.7924379677038204, \"Memory in Mb\": 0.0820188522338867, \"Time in s\": 29.964311 }, { \"step\": 3180, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8307643913180245, \"F1\": 0.7941851568477429, \"Memory in Mb\": 0.0815153121948242, \"Time in s\": 31.788844 }, { \"step\": 3286, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8310502283105022, \"F1\": 0.7939101373932418, \"Memory in Mb\": 0.0820188522338867, \"Time in s\": 33.661829 }, { \"step\": 3392, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8307283987024476, \"F1\": 0.7948534667619728, \"Memory in Mb\": 0.0820188522338867, \"Time in s\": 35.582957 }, { \"step\": 3498, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8301401201029454, \"F1\": 0.7933194154488518, \"Memory in Mb\": 0.0973453521728515, \"Time in s\": 37.553729 }, { \"step\": 3604, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8320843741326672, \"F1\": 0.7952622673434856, \"Memory in Mb\": 0.097848892211914, \"Time in s\": 39.573419 }, { \"step\": 3710, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8306821245618765, \"F1\": 0.7939632545931758, \"Memory in Mb\": 0.0973453521728515, \"Time in s\": 41.642476 }, { \"step\": 3816, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8311926605504587, \"F1\": 0.794904458598726, \"Memory in Mb\": 0.0973453521728515, \"Time in s\": 43.760553 }, { \"step\": 3922, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Bananas\", \"Accuracy\": 0.831165519000255, \"F1\": 0.7945375543140905, \"Memory in Mb\": 0.1073513031005859, \"Time in s\": 45.928839 }, { \"step\": 4028, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8301465110504097, \"F1\": 0.7932285368802902, \"Memory in Mb\": 0.1130123138427734, \"Time in s\": 48.146786 }, { \"step\": 4134, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8296636825550447, \"F1\": 0.7929411764705883, \"Memory in Mb\": 0.1130123138427734, \"Time in s\": 50.413841 }, { \"step\": 4240, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8310922387355508, \"F1\": 0.7955454026270702, \"Memory in Mb\": 0.1135158538818359, \"Time in s\": 52.731009 }, { \"step\": 4346, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8317606444188723, \"F1\": 0.7965488449763429, \"Memory in Mb\": 0.1130123138427734, \"Time in s\": 55.097904 }, { \"step\": 4452, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8312738710402157, \"F1\": 0.7955349850258643, \"Memory in Mb\": 0.1130123138427734, \"Time in s\": 57.514886 }, { \"step\": 4558, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8294930875576036, \"F1\": 0.7937350676931244, \"Memory in Mb\": 0.1135158538818359, \"Time in s\": 59.981717 }, { \"step\": 4664, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8286510829937809, \"F1\": 0.7933798810447376, \"Memory in Mb\": 0.1130123138427734, \"Time in s\": 62.498268 }, { \"step\": 4770, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8280561962675613, \"F1\": 0.7922998986828774, \"Memory in Mb\": 0.1229228973388671, \"Time in s\": 65.06543599999999 }, { \"step\": 4876, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8274871794871795, \"F1\": 0.7908480477493159, \"Memory in Mb\": 0.1229686737060546, \"Time in s\": 67.68219599999999 }, { \"step\": 4982, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8285484842401124, \"F1\": 0.7928190198932558, \"Memory in Mb\": 0.1224651336669921, \"Time in s\": 70.34849499999999 }, { \"step\": 5088, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8287792412030667, \"F1\": 0.7930624851508671, \"Memory in Mb\": 0.1229686737060546, \"Time in s\": 73.06406199999998 }, { \"step\": 5194, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8297708453687657, \"F1\": 0.7943229409027454, \"Memory in Mb\": 0.1229686737060546, \"Time in s\": 75.82921599999997 }, { \"step\": 5300, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8301566333270428, \"F1\": 0.7949886104783599, \"Memory in Mb\": 0.1224651336669921, \"Time in s\": 78.64422599999997 }, { \"step\": 906, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8806629834254144, \"F1\": 0.8820960698689956, \"Memory in Mb\": 0.2990808486938476, \"Time in s\": 0.533765 }, { \"step\": 1812, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8901159580342353, \"F1\": 0.8654496281271129, \"Memory in Mb\": 0.3327569961547851, \"Time in s\": 1.6121 }, { \"step\": 2718, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8884799411115201, \"F1\": 0.8614540466392318, \"Memory in Mb\": 0.3573274612426758, \"Time in s\": 3.262924 }, { \"step\": 3624, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8948385316036434, \"F1\": 0.8697435897435898, \"Memory in Mb\": 0.3568239212036133, \"Time in s\": 5.47711 }, { \"step\": 4530, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8922499448001766, \"F1\": 0.8587145338737695, \"Memory in Mb\": 0.3567705154418945, \"Time in s\": 8.248427 }, { \"step\": 5436, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8833486660533578, \"F1\": 0.846116504854369, \"Memory in Mb\": 0.3567705154418945, \"Time in s\": 11.585141 }, { \"step\": 6342, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8836145718340955, \"F1\": 0.8482730263157895, \"Memory in Mb\": 0.357274055480957, \"Time in s\": 15.487845 }, { \"step\": 7248, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8802263005381538, \"F1\": 0.8418367346938775, \"Memory in Mb\": 0.357274055480957, \"Time in s\": 19.950209 }, { \"step\": 8154, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8828652029927634, \"F1\": 0.8526461965746027, \"Memory in Mb\": 0.357274055480957, \"Time in s\": 24.978187 }, { \"step\": 9060, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8872944033557788, \"F1\": 0.8620456695041211, \"Memory in Mb\": 0.4238061904907226, \"Time in s\": 30.568141 }, { \"step\": 9966, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8887104867034621, \"F1\": 0.8668187822745287, \"Memory in Mb\": 0.4239206314086914, \"Time in s\": 36.726803 }, { \"step\": 10872, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8916383037439058, \"F1\": 0.8724003466204506, \"Memory in Mb\": 0.4239206314086914, \"Time in s\": 43.450649000000006 }, { \"step\": 11778, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8908890209730831, \"F1\": 0.871229582122457, \"Memory in Mb\": 0.508366584777832, \"Time in s\": 50.737672 }, { \"step\": 12684, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8903256327367343, \"F1\": 0.8711917770163904, \"Memory in Mb\": 0.508366584777832, \"Time in s\": 58.59177 }, { \"step\": 13590, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8921186253587461, \"F1\": 0.8751702997275205, \"Memory in Mb\": 0.508366584777832, \"Time in s\": 67.02087300000001 }, { \"step\": 14496, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8925146602276647, \"F1\": 0.8765842839036756, \"Memory in Mb\": 0.5096101760864258, \"Time in s\": 76.01746500000002 }, { \"step\": 15402, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8924095837932602, \"F1\": 0.875628612174435, \"Memory in Mb\": 0.5091333389282227, \"Time in s\": 85.58361400000001 }, { \"step\": 16308, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8862451707855522, \"F1\": 0.8668437298112124, \"Memory in Mb\": 0.5348939895629883, \"Time in s\": 95.717046 }, { \"step\": 17214, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Elec2\", \"Accuracy\": 0.882646836693197, \"F1\": 0.8594880356149137, \"Memory in Mb\": 0.5348939895629883, \"Time in s\": 106.420525 }, { \"step\": 18120, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Elec2\", \"Accuracy\": 0.882664606214471, \"F1\": 0.8596143687268886, \"Memory in Mb\": 0.5429277420043945, \"Time in s\": 117.69460800000002 }, { \"step\": 19026, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8833114323258869, \"F1\": 0.858634742740703, \"Memory in Mb\": 0.5442209243774414, \"Time in s\": 129.542011 }, { \"step\": 19932, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8807385479905675, \"F1\": 0.8562095457020145, \"Memory in Mb\": 0.6346635818481445, \"Time in s\": 141.970262 }, { \"step\": 20838, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8784853865719633, \"F1\": 0.8535739070090216, \"Memory in Mb\": 0.6924257278442383, \"Time in s\": 154.986156 }, { \"step\": 21744, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8788115715402658, \"F1\": 0.8517414055027289, \"Memory in Mb\": 0.7508554458618164, \"Time in s\": 168.584532 }, { \"step\": 22650, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8773014261115281, \"F1\": 0.8483988871310894, \"Memory in Mb\": 0.7274637222290039, \"Time in s\": 182.766068 }, { \"step\": 23556, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8728507747824241, \"F1\": 0.8417102690132656, \"Memory in Mb\": 0.7538461685180664, \"Time in s\": 197.535569 }, { \"step\": 24462, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8720820898573239, \"F1\": 0.8400224960376297, \"Memory in Mb\": 0.7539834976196289, \"Time in s\": 212.893707 }, { \"step\": 25368, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8691607206212796, \"F1\": 0.8364944085915562, \"Memory in Mb\": 0.7539834976196289, \"Time in s\": 228.841108 }, { \"step\": 26274, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8692954744414417, \"F1\": 0.8360233024543979, \"Memory in Mb\": 0.7539834976196289, \"Time in s\": 245.378297 }, { \"step\": 27180, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8690901063320946, \"F1\": 0.8360217531569729, \"Memory in Mb\": 0.7534799575805664, \"Time in s\": 262.499248 }, { \"step\": 28086, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8664055545664946, \"F1\": 0.8316280739544067, \"Memory in Mb\": 0.7535486221313477, \"Time in s\": 280.20757899999995 }, { \"step\": 28992, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8639232865372012, \"F1\": 0.826859776168532, \"Memory in Mb\": 0.7540521621704102, \"Time in s\": 298.510007 }, { \"step\": 29898, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Elec2\", \"Accuracy\": 0.86353145800582, \"F1\": 0.8261017816042963, \"Memory in Mb\": 0.8122949600219727, \"Time in s\": 317.403419 }, { \"step\": 30804, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8633899295523163, \"F1\": 0.8248272416951128, \"Memory in Mb\": 0.8122949600219727, \"Time in s\": 336.895126 }, { \"step\": 31710, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8610804503453279, \"F1\": 0.8213199204964914, \"Memory in Mb\": 0.8808259963989258, \"Time in s\": 356.985514 }, { \"step\": 32616, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8589299402115591, \"F1\": 0.8183648493940232, \"Memory in Mb\": 0.9061365127563475, \"Time in s\": 377.673556 }, { \"step\": 33522, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8585961039348469, \"F1\": 0.8181399631675875, \"Memory in Mb\": 0.9644479751586914, \"Time in s\": 398.957872 }, { \"step\": 34428, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8569436779272083, \"F1\": 0.8155499794015206, \"Memory in Mb\": 0.989060401916504, \"Time in s\": 420.833858 }, { \"step\": 35334, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8570741233407863, \"F1\": 0.8144610184436769, \"Memory in Mb\": 1.055558204650879, \"Time in s\": 443.304179 }, { \"step\": 36240, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8577223433317697, \"F1\": 0.8140239503679124, \"Memory in Mb\": 1.0826387405395508, \"Time in s\": 466.364607 }, { \"step\": 37146, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8573966886525778, \"F1\": 0.812978851110405, \"Memory in Mb\": 1.1978578567504885, \"Time in s\": 490.019084 }, { \"step\": 38052, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8572967858926178, \"F1\": 0.8125388386384037, \"Memory in Mb\": 1.198460578918457, \"Time in s\": 514.261258 }, { \"step\": 38958, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8578432630849399, \"F1\": 0.8141610738255034, \"Memory in Mb\": 1.198460578918457, \"Time in s\": 539.085152 }, { \"step\": 39864, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8585906730552141, \"F1\": 0.816998344317112, \"Memory in Mb\": 1.232090950012207, \"Time in s\": 564.495213 }, { \"step\": 40770, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8591822217861611, \"F1\": 0.8196298972635019, \"Memory in Mb\": 1.2325944900512695, \"Time in s\": 590.495221 }, { \"step\": 41676, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Elec2\", \"Accuracy\": 0.859868026394721, \"F1\": 0.8219620754832023, \"Memory in Mb\": 1.2570466995239258, \"Time in s\": 617.0763880000001 }, { \"step\": 42582, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8602663159625185, \"F1\": 0.82283230109576, \"Memory in Mb\": 1.2570466995239258, \"Time in s\": 644.2465100000001 }, { \"step\": 43488, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8601651068135305, \"F1\": 0.8228301721877459, \"Memory in Mb\": 1.2570466995239258, \"Time in s\": 671.9994270000001 }, { \"step\": 44394, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8584912035681301, \"F1\": 0.8195968066165068, \"Memory in Mb\": 1.2565431594848633, \"Time in s\": 700.3413830000001 }, { \"step\": 45300, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8588710567562198, \"F1\": 0.8202547305086175, \"Memory in Mb\": 1.3147859573364258, \"Time in s\": 729.273758 }, { \"step\": 25, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Phishing\", \"Accuracy\": 0.5833333333333334, \"F1\": 0.7058823529411764, \"Memory in Mb\": 0.1548337936401367, \"Time in s\": 0.027523 }, { \"step\": 50, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Phishing\", \"Accuracy\": 0.7346938775510204, \"F1\": 0.7636363636363637, \"Memory in Mb\": 0.1710462570190429, \"Time in s\": 0.082708 }, { \"step\": 75, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Phishing\", \"Accuracy\": 0.7837837837837838, \"F1\": 0.8048780487804877, \"Memory in Mb\": 0.1877622604370117, \"Time in s\": 0.17051 }, { \"step\": 100, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8080808080808081, \"F1\": 0.819047619047619, \"Memory in Mb\": 0.2039480209350586, \"Time in s\": 0.296263 }, { \"step\": 125, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8145161290322581, \"F1\": 0.8217054263565893, \"Memory in Mb\": 0.2043638229370117, \"Time in s\": 0.463136 }, { \"step\": 150, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8187919463087249, \"F1\": 0.830188679245283, \"Memory in Mb\": 0.2048673629760742, \"Time in s\": 0.670871 }, { \"step\": 175, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8390804597701149, \"F1\": 0.8390804597701148, \"Memory in Mb\": 0.2043638229370117, \"Time in s\": 0.919601 }, { \"step\": 200, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8391959798994975, \"F1\": 0.8383838383838383, \"Memory in Mb\": 0.2048635482788086, \"Time in s\": 1.21153 }, { \"step\": 225, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8348214285714286, \"F1\": 0.8294930875576038, \"Memory in Mb\": 0.2048902511596679, \"Time in s\": 1.544633 }, { \"step\": 250, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8353413654618473, \"F1\": 0.8298755186721991, \"Memory in Mb\": 0.2043867111206054, \"Time in s\": 1.91889 }, { \"step\": 275, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8357664233576643, \"F1\": 0.8288973384030419, \"Memory in Mb\": 0.2053709030151367, \"Time in s\": 2.334116 }, { \"step\": 300, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8394648829431438, \"F1\": 0.8285714285714285, \"Memory in Mb\": 0.2048673629760742, \"Time in s\": 2.790538 }, { \"step\": 325, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8487654320987654, \"F1\": 0.8338983050847458, \"Memory in Mb\": 0.2053709030151367, \"Time in s\": 3.287923 }, { \"step\": 350, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8538681948424068, \"F1\": 0.8360128617363344, \"Memory in Mb\": 0.2053709030151367, \"Time in s\": 3.826274 }, { \"step\": 375, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8529411764705882, \"F1\": 0.8318042813455658, \"Memory in Mb\": 0.2048673629760742, \"Time in s\": 4.40584 }, { \"step\": 400, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8546365914786967, \"F1\": 0.8313953488372093, \"Memory in Mb\": 0.2053709030151367, \"Time in s\": 5.028368 }, { \"step\": 425, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8561320754716981, \"F1\": 0.8291316526610645, \"Memory in Mb\": 0.2048673629760742, \"Time in s\": 5.69196 }, { \"step\": 450, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8596881959910914, \"F1\": 0.8310991957104559, \"Memory in Mb\": 0.2053709030151367, \"Time in s\": 6.39676 }, { \"step\": 475, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8586497890295358, \"F1\": 0.8312342569269521, \"Memory in Mb\": 0.2053709030151367, \"Time in s\": 7.142548 }, { \"step\": 500, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8597194388777555, \"F1\": 0.835680751173709, \"Memory in Mb\": 0.2048673629760742, \"Time in s\": 7.929462 }, { \"step\": 525, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8606870229007634, \"F1\": 0.8337129840546698, \"Memory in Mb\": 0.2053709030151367, \"Time in s\": 8.757545 }, { \"step\": 550, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8615664845173042, \"F1\": 0.8362068965517241, \"Memory in Mb\": 0.2048673629760742, \"Time in s\": 9.62628 }, { \"step\": 575, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8641114982578397, \"F1\": 0.8388429752066116, \"Memory in Mb\": 0.2048673629760742, \"Time in s\": 10.535573 }, { \"step\": 600, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8664440734557596, \"F1\": 0.8387096774193549, \"Memory in Mb\": 0.2053709030151367, \"Time in s\": 11.487542 }, { \"step\": 625, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8669871794871795, \"F1\": 0.8362919132149902, \"Memory in Mb\": 0.2048673629760742, \"Time in s\": 12.480171000000002 }, { \"step\": 650, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8705701078582434, \"F1\": 0.8432835820895523, \"Memory in Mb\": 0.2053709030151367, \"Time in s\": 13.513483000000004 }, { \"step\": 675, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8724035608308606, \"F1\": 0.8485915492957745, \"Memory in Mb\": 0.2048673629760742, \"Time in s\": 14.588001000000002 }, { \"step\": 700, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Phishing\", \"Accuracy\": 0.876967095851216, \"F1\": 0.8522336769759451, \"Memory in Mb\": 0.2048673629760742, \"Time in s\": 15.703553000000005 }, { \"step\": 725, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8784530386740331, \"F1\": 0.8562091503267973, \"Memory in Mb\": 0.2053709030151367, \"Time in s\": 16.863107000000003 }, { \"step\": 750, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8785046728971962, \"F1\": 0.8571428571428572, \"Memory in Mb\": 0.2048673629760742, \"Time in s\": 18.064417 }, { \"step\": 775, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8785529715762274, \"F1\": 0.8567073170731707, \"Memory in Mb\": 0.2053709030151367, \"Time in s\": 19.306597000000004 }, { \"step\": 800, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8785982478097623, \"F1\": 0.8583941605839417, \"Memory in Mb\": 0.1428241729736328, \"Time in s\": 20.591420000000003 }, { \"step\": 825, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8786407766990292, \"F1\": 0.8595505617977528, \"Memory in Mb\": 0.2701892852783203, \"Time in s\": 21.920167000000003 }, { \"step\": 850, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8798586572438163, \"F1\": 0.8602739726027396, \"Memory in Mb\": 0.2707157135009765, \"Time in s\": 23.289893000000003 }, { \"step\": 875, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8832951945080092, \"F1\": 0.8636363636363635, \"Memory in Mb\": 0.270212173461914, \"Time in s\": 24.700235000000003 }, { \"step\": 900, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Phishing\", \"Accuracy\": 0.882091212458287, \"F1\": 0.8619791666666667, \"Memory in Mb\": 0.2707386016845703, \"Time in s\": 26.151721 }, { \"step\": 925, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8841991341991342, \"F1\": 0.8657465495608533, \"Memory in Mb\": 0.2707386016845703, \"Time in s\": 27.644342 }, { \"step\": 950, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8840885142255005, \"F1\": 0.8671497584541062, \"Memory in Mb\": 0.2702350616455078, \"Time in s\": 29.178064000000003 }, { \"step\": 975, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8860369609856262, \"F1\": 0.8692579505300354, \"Memory in Mb\": 0.2707386016845703, \"Time in s\": 30.753129 }, { \"step\": 1000, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8868868868868869, \"F1\": 0.870264064293915, \"Memory in Mb\": 0.2702350616455078, \"Time in s\": 32.369334 }, { \"step\": 1025, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Phishing\", \"Accuracy\": 0.88671875, \"F1\": 0.8705357142857143, \"Memory in Mb\": 0.2707386016845703, \"Time in s\": 34.02633 }, { \"step\": 1050, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Phishing\", \"Accuracy\": 0.888465204957102, \"F1\": 0.8729641693811074, \"Memory in Mb\": 0.2707386016845703, \"Time in s\": 35.724225000000004 }, { \"step\": 1075, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8873370577281192, \"F1\": 0.8727655099894847, \"Memory in Mb\": 0.2702350616455078, \"Time in s\": 37.462729 }, { \"step\": 1100, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8889899909008189, \"F1\": 0.8747433264887065, \"Memory in Mb\": 0.2707386016845703, \"Time in s\": 39.24172900000001 }, { \"step\": 1125, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8905693950177936, \"F1\": 0.8776119402985074, \"Memory in Mb\": 0.2702350616455078, \"Time in s\": 41.061817000000005 }, { \"step\": 1150, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8912097476066144, \"F1\": 0.8780487804878049, \"Memory in Mb\": 0.2702350616455078, \"Time in s\": 42.922291 }, { \"step\": 1175, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8901192504258943, \"F1\": 0.8765550239234451, \"Memory in Mb\": 0.2707386016845703, \"Time in s\": 44.822872 }, { \"step\": 1200, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8915763135946623, \"F1\": 0.8778195488721804, \"Memory in Mb\": 0.2702350616455078, \"Time in s\": 46.765498 }, { \"step\": 1225, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8913398692810458, \"F1\": 0.8774193548387096, \"Memory in Mb\": 0.2707386016845703, \"Time in s\": 48.748369 }, { \"step\": 1250, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8903122497998399, \"F1\": 0.8769092542677449, \"Memory in Mb\": 0.2707386016845703, \"Time in s\": 50.77338400000001 }, { \"step\": 1903, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.0777397155761718, \"Time in s\": 1.484811 }, { \"step\": 3806, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.0782432556152343, \"Time in s\": 4.457186 }, { \"step\": 5709, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.0782432556152343, \"Time in s\": 8.746386000000001 }, { \"step\": 7612, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.0777397155761718, \"Time in s\": 13.887875 }, { \"step\": 9515, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.0777397155761718, \"Time in s\": 19.865436000000003 }, { \"step\": 11418, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.0782432556152343, \"Time in s\": 26.640571 }, { \"step\": 13321, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.0782432556152343, \"Time in s\": 34.167751 }, { \"step\": 15224, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998029297773108, \"F1\": 0.8421052631578948, \"Memory in Mb\": 0.1108655929565429, \"Time in s\": 42.434428 }, { \"step\": 17127, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998248277472848, \"F1\": 0.8695652173913044, \"Memory in Mb\": 0.1108655929565429, \"Time in s\": 51.484422 }, { \"step\": 19030, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998423458931104, \"F1\": 0.8695652173913044, \"Memory in Mb\": 0.1113691329956054, \"Time in s\": 61.31819 }, { \"step\": 20933, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998566787693484, \"F1\": 0.8695652173913044, \"Memory in Mb\": 0.1113691329956054, \"Time in s\": 71.93436700000001 }, { \"step\": 22836, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999868622728268, \"F1\": 0.8695652173913044, \"Memory in Mb\": 0.1108655929565429, \"Time in s\": 83.32890400000001 }, { \"step\": 24739, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998787290807664, \"F1\": 0.8695652173913044, \"Memory in Mb\": 0.1108655929565429, \"Time in s\": 95.504963 }, { \"step\": 26642, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998873916144289, \"F1\": 0.88, \"Memory in Mb\": 0.1113920211791992, \"Time in s\": 108.464893 }, { \"step\": 28545, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999894899103139, \"F1\": 0.88, \"Memory in Mb\": 0.1113920211791992, \"Time in s\": 122.206969 }, { \"step\": 30448, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9999014681249384, \"F1\": 0.88, \"Memory in Mb\": 0.1108884811401367, \"Time in s\": 136.730455 }, { \"step\": 32351, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9999072642967544, \"F1\": 0.88, \"Memory in Mb\": 0.1113920211791992, \"Time in s\": 152.048808 }, { \"step\": 34254, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9999124164306776, \"F1\": 0.88, \"Memory in Mb\": 0.1113920211791992, \"Time in s\": 168.149805 }, { \"step\": 36157, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9999170262197146, \"F1\": 0.88, \"Memory in Mb\": 0.1108884811401367, \"Time in s\": 185.035032 }, { \"step\": 38060, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9999211750177356, \"F1\": 0.88, \"Memory in Mb\": 0.1028890609741211, \"Time in s\": 202.707805 }, { \"step\": 39963, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999924928682248, \"F1\": 0.88, \"Memory in Mb\": 0.1033926010131836, \"Time in s\": 221.166297 }, { \"step\": 41866, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9999283410963812, \"F1\": 0.88, \"Memory in Mb\": 0.1033926010131836, \"Time in s\": 240.410098 }, { \"step\": 43769, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999931456772071, \"F1\": 0.88, \"Memory in Mb\": 0.1028890609741211, \"Time in s\": 260.439908 }, { \"step\": 45672, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9999343128024348, \"F1\": 0.88, \"Memory in Mb\": 0.1028890609741211, \"Time in s\": 281.255775 }, { \"step\": 47575, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9999369403455668, \"F1\": 0.88, \"Memory in Mb\": 0.1155691146850586, \"Time in s\": 302.85620600000004 }, { \"step\": 49478, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9999393657659116, \"F1\": 0.88, \"Memory in Mb\": 0.1155691146850586, \"Time in s\": 325.23865100000006 }, { \"step\": 51381, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9999221486959906, \"F1\": 0.8666666666666666, \"Memory in Mb\": 0.1243124008178711, \"Time in s\": 348.4004960000001 }, { \"step\": 53284, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9999249291518872, \"F1\": 0.8666666666666666, \"Memory in Mb\": 0.1243124008178711, \"Time in s\": 372.3442280000001 }, { \"step\": 55187, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9999275178487298, \"F1\": 0.8666666666666666, \"Memory in Mb\": 0.1248159408569336, \"Time in s\": 397.0676850000001 }, { \"step\": 57090, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998073183975896, \"F1\": 0.7317073170731707, \"Memory in Mb\": 0.1517705917358398, \"Time in s\": 422.5782570000001 }, { \"step\": 58993, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998135340385136, \"F1\": 0.7317073170731707, \"Memory in Mb\": 0.1512670516967773, \"Time in s\": 448.87363900000014 }, { \"step\": 60896, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998193611955004, \"F1\": 0.7317073170731707, \"Memory in Mb\": 0.1517705917358398, \"Time in s\": 475.95484300000015 }, { \"step\": 62799, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997929870378036, \"F1\": 0.6976744186046512, \"Memory in Mb\": 0.1610174179077148, \"Time in s\": 503.8239590000002 }, { \"step\": 64702, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997990757484428, \"F1\": 0.6976744186046512, \"Memory in Mb\": 0.1605138778686523, \"Time in s\": 532.4818360000002 }, { \"step\": 66605, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998048165275358, \"F1\": 0.6976744186046512, \"Memory in Mb\": 0.1605138778686523, \"Time in s\": 561.9301430000002 }, { \"step\": 68508, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998102383698017, \"F1\": 0.7234042553191489, \"Memory in Mb\": 0.1610174179077148, \"Time in s\": 592.1790100000002 }, { \"step\": 70411, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"SMTP\", \"Accuracy\": 0.99981536713535, \"F1\": 0.7234042553191489, \"Memory in Mb\": 0.1610174179077148, \"Time in s\": 623.2264690000002 }, { \"step\": 72314, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998202259621368, \"F1\": 0.7234042553191489, \"Memory in Mb\": 0.1605138778686523, \"Time in s\": 655.0760590000002 }, { \"step\": 74217, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998113614314972, \"F1\": 0.7083333333333333, \"Memory in Mb\": 0.1605138778686523, \"Time in s\": 687.7245370000002 }, { \"step\": 76120, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999816077457665, \"F1\": 0.7083333333333333, \"Memory in Mb\": 0.1610174179077148, \"Time in s\": 721.1718600000002 }, { \"step\": 78023, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998205634308271, \"F1\": 0.7083333333333333, \"Memory in Mb\": 0.1610174179077148, \"Time in s\": 755.4176530000002 }, { \"step\": 79926, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998248357835472, \"F1\": 0.7083333333333333, \"Memory in Mb\": 0.1605138778686523, \"Time in s\": 790.4648560000002 }, { \"step\": 81829, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998289094197584, \"F1\": 0.7083333333333333, \"Memory in Mb\": 0.1605138778686523, \"Time in s\": 826.3125100000002 }, { \"step\": 83732, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998327978884762, \"F1\": 0.7083333333333333, \"Memory in Mb\": 0.1610174179077148, \"Time in s\": 862.9590210000002 }, { \"step\": 85635, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999836513534344, \"F1\": 0.7083333333333333, \"Memory in Mb\": 0.1653127670288086, \"Time in s\": 900.4018990000002 }, { \"step\": 87538, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998400676285456, \"F1\": 0.7083333333333333, \"Memory in Mb\": 0.1648092269897461, \"Time in s\": 938.635169 }, { \"step\": 89441, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997875670840788, \"F1\": 0.6415094339622641, \"Memory in Mb\": 0.1745138168334961, \"Time in s\": 977.660251 }, { \"step\": 91344, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999781045071872, \"F1\": 0.6296296296296297, \"Memory in Mb\": 0.1745138168334961, \"Time in s\": 1017.476761 }, { \"step\": 93247, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996782703815712, \"F1\": 0.53125, \"Memory in Mb\": 0.1740102767944336, \"Time in s\": 1058.085363 }, { \"step\": 95150, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996847050415664, \"F1\": 0.53125, \"Memory in Mb\": 0.1740102767944336, \"Time in s\": 1099.4855200000002 }, { \"step\": 106, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5333333333333333, \"F1\": 0.5242718446601942, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.003183 }, { \"step\": 212, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5876777251184834, \"F1\": 0.5538461538461539, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.008041 }, { \"step\": 318, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5457413249211357, \"F1\": 0.5102040816326531, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.0146489999999999 }, { \"step\": 424, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5460992907801419, \"F1\": 0.5025906735751295, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.0229629999999999 }, { \"step\": 530, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5671077504725898, \"F1\": 0.5096359743040686, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.032934 }, { \"step\": 636, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5464566929133858, \"F1\": 0.4875444839857651, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.044653 }, { \"step\": 742, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5573549257759784, \"F1\": 0.4875, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.058023 }, { \"step\": 848, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5501770956316411, \"F1\": 0.4816326530612245, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.073029 }, { \"step\": 954, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5487932843651626, \"F1\": 0.4794188861985472, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.08975 }, { \"step\": 1060, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5448536355051936, \"F1\": 0.4679911699779249, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.108101 }, { \"step\": 1166, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Bananas\", \"Accuracy\": 0.534763948497854, \"F1\": 0.4590818363273453, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.128117 }, { \"step\": 1272, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5287175452399685, \"F1\": 0.456935630099728, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.149836 }, { \"step\": 1378, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5286855482933914, \"F1\": 0.4523206751054852, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.173181 }, { \"step\": 1484, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5252865812542145, \"F1\": 0.4491392801251955, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.19815 }, { \"step\": 1590, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5204531151667715, \"F1\": 0.4437956204379563, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.224847 }, { \"step\": 1696, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5227138643067847, \"F1\": 0.4455106237148732, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.253189 }, { \"step\": 1802, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Bananas\", \"Accuracy\": 0.524153248195447, \"F1\": 0.4523961661341854, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.283166 }, { \"step\": 1908, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5233350812794966, \"F1\": 0.456664674237896, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.3148919999999999 }, { \"step\": 2014, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5171385991058122, \"F1\": 0.4563758389261745, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.3482999999999999 }, { \"step\": 2120, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5143935818782445, \"F1\": 0.4581358609794628, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.3833669999999999 }, { \"step\": 2226, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5114606741573033, \"F1\": 0.4545910687405921, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.4201549999999999 }, { \"step\": 2332, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Bananas\", \"Accuracy\": 0.510939510939511, \"F1\": 0.4550669216061185, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.4585819999999999 }, { \"step\": 2438, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5104636848584325, \"F1\": 0.4530032095369097, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.4986469999999999 }, { \"step\": 2544, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5084545812033032, \"F1\": 0.4546247818499127, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.5404629999999999 }, { \"step\": 2650, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5096262740656852, \"F1\": 0.458072590738423, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.5839479999999999 }, { \"step\": 2756, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5092558983666061, \"F1\": 0.4574638844301765, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.6291049999999999 }, { \"step\": 2862, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5103110800419434, \"F1\": 0.4563445867287544, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.6760149999999999 }, { \"step\": 2968, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5133131108864173, \"F1\": 0.457957957957958, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.7245799999999999 }, { \"step\": 3074, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5099251545720794, \"F1\": 0.4563176895306859, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.7747889999999998 }, { \"step\": 3180, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5102233406731677, \"F1\": 0.4538758330410382, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.8267639999999998 }, { \"step\": 3286, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5095890410958904, \"F1\": 0.4522271336280176, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.8803849999999999 }, { \"step\": 3392, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5107637864936597, \"F1\": 0.4558871761233191, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.9357289999999998 }, { \"step\": 3498, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5124392336288247, \"F1\": 0.4557931694861155, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.9927189999999998 }, { \"step\": 3604, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5134610047182903, \"F1\": 0.4544039838157485, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 1.0513469999999998 }, { \"step\": 3710, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5122674575357239, \"F1\": 0.4546276756104914, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 1.1117049999999995 }, { \"step\": 3816, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Bananas\", \"Accuracy\": 0.510615989515072, \"F1\": 0.4536142815335089, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 1.1737269999999995 }, { \"step\": 3922, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5090538128028564, \"F1\": 0.4507845934379457, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 1.2373829999999997 }, { \"step\": 4028, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5108020859200397, \"F1\": 0.452473596442468, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 1.3028159999999998 }, { \"step\": 4134, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5102830873457537, \"F1\": 0.4517876489707476, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 1.3698969999999997 }, { \"step\": 4240, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5102618542108988, \"F1\": 0.4525316455696203, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 1.4386049999999997 }, { \"step\": 4346, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5074798619102416, \"F1\": 0.4490216271884655, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 1.5090079999999997 }, { \"step\": 4452, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5099977533138621, \"F1\": 0.4513207547169811, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 1.5810569999999995 }, { \"step\": 4558, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5099846390168971, \"F1\": 0.4539007092198581, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 1.6547379999999996 }, { \"step\": 4664, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5099721209521767, \"F1\": 0.4553039332538737, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 1.7301709999999997 }, { \"step\": 4770, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5110085971901867, \"F1\": 0.4556489262371615, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 1.807246 }, { \"step\": 4876, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5109743589743589, \"F1\": 0.4539624370132845, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 1.885955 }, { \"step\": 4982, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5099377635013049, \"F1\": 0.453792794808682, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 1.9663889999999995 }, { \"step\": 5088, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5099272655789266, \"F1\": 0.4536489151873767, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 2.0484569999999995 }, { \"step\": 5194, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5097246293086848, \"F1\": 0.4531786941580756, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 2.1321769999999995 }, { \"step\": 5300, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5095301000188714, \"F1\": 0.4529572721532309, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 2.2176369999999994 }, { \"step\": 906, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8530386740331491, \"F1\": 0.8500563697857948, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.021647 }, { \"step\": 1812, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8619547211485368, \"F1\": 0.8287671232876712, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.064308 }, { \"step\": 2718, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8450496871549503, \"F1\": 0.80958842152872, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.127937 }, { \"step\": 3624, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8418437758763456, \"F1\": 0.8056968463886063, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.212647 }, { \"step\": 4530, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8388165157871494, \"F1\": 0.7960893854748604, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.318513 }, { \"step\": 5436, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8413983440662374, \"F1\": 0.7995348837209302, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.445933 }, { \"step\": 6342, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8370919413341744, \"F1\": 0.7958094485076103, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.594503 }, { \"step\": 7248, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8359321098385539, \"F1\": 0.7948231233822259, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.763985 }, { \"step\": 8154, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8352753587636453, \"F1\": 0.8021799970540581, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.954384 }, { \"step\": 9060, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8358538470029805, \"F1\": 0.8069081937410726, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 1.166026 }, { \"step\": 9966, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8372303060712494, \"F1\": 0.8118765947575969, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 1.399073 }, { \"step\": 10872, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8368135406126391, \"F1\": 0.8140461215932915, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 1.653093 }, { \"step\": 11778, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8374798335739153, \"F1\": 0.8150724637681159, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 1.928122 }, { \"step\": 12684, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8384451628163684, \"F1\": 0.8161177420802298, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 2.224333 }, { \"step\": 13590, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Elec2\", \"Accuracy\": 0.842004562513798, \"F1\": 0.8223417459660736, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 2.541433 }, { \"step\": 14496, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8448430493273542, \"F1\": 0.8264794383149447, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 2.879854 }, { \"step\": 15402, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8460489578598792, \"F1\": 0.8270983738058776, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 3.239289 }, { \"step\": 16308, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Elec2\", \"Accuracy\": 0.844851904090268, \"F1\": 0.8251313243019076, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 3.619613 }, { \"step\": 17214, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8443618195549875, \"F1\": 0.8222177981286084, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 4.020822 }, { \"step\": 18120, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8450797505381091, \"F1\": 0.8227792158595871, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 4.443085 }, { \"step\": 19026, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8462023653088042, \"F1\": 0.8224083515416363, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 4.886686 }, { \"step\": 19932, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Elec2\", \"Accuracy\": 0.847523957653906, \"F1\": 0.8255753888538139, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 5.35136 }, { \"step\": 20838, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Elec2\", \"Accuracy\": 0.84661899505687, \"F1\": 0.8249917862227577, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 5.836996 }, { \"step\": 21744, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8452835395299637, \"F1\": 0.8209495422610177, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 6.343734 }, { \"step\": 22650, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8444081416398075, \"F1\": 0.8188733552631579, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 6.871264 }, { \"step\": 23556, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8451284228401613, \"F1\": 0.8194595664654062, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 7.419847 }, { \"step\": 24462, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8464903315481788, \"F1\": 0.8198781599270878, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 7.989367 }, { \"step\": 25368, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8462963692986951, \"F1\": 0.8199492034172247, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 8.579944 }, { \"step\": 26274, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8477524454763445, \"F1\": 0.8213168944876262, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 9.191269 }, { \"step\": 27180, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8495529636851982, \"F1\": 0.8240457851026293, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 9.823483 }, { \"step\": 28086, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8509880719245149, \"F1\": 0.825107610012955, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 10.476909 }, { \"step\": 28992, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8521265220240765, \"F1\": 0.8258237516759436, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 11.151512 }, { \"step\": 29898, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8531959728400843, \"F1\": 0.8268160833366216, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 11.847222 }, { \"step\": 30804, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8537480115573158, \"F1\": 0.8267107743201139, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 12.564065 }, { \"step\": 31710, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8530385694913116, \"F1\": 0.8259895444361464, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 13.301932 }, { \"step\": 32616, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8536869538555879, \"F1\": 0.8269760696156635, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 14.060873999999998 }, { \"step\": 33522, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8541511291429253, \"F1\": 0.8276032300151628, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 14.840823 }, { \"step\": 34428, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8549684840386905, \"F1\": 0.8286724084685859, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 15.641785 }, { \"step\": 35334, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8555175048821215, \"F1\": 0.8284321962695346, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 16.463894999999997 }, { \"step\": 36240, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8545213720025387, \"F1\": 0.8259146744155329, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 17.307088999999998 }, { \"step\": 37146, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Elec2\", \"Accuracy\": 0.854354556467896, \"F1\": 0.8252696854208386, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 18.171425 }, { \"step\": 38052, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8545636119944285, \"F1\": 0.8247736052181622, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 19.056747 }, { \"step\": 38958, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8548142824139435, \"F1\": 0.8254213223038459, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 19.962946 }, { \"step\": 39864, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8546521837292728, \"F1\": 0.8262981172802495, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 20.890172 }, { \"step\": 40770, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8540067207927592, \"F1\": 0.8267652366261132, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 21.838151 }, { \"step\": 41676, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8537012597480504, \"F1\": 0.8274320002264302, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 22.807005 }, { \"step\": 42582, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8536201592259458, \"F1\": 0.8277177368086459, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 23.796527 }, { \"step\": 43488, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Elec2\", \"Accuracy\": 0.853473451836181, \"F1\": 0.8276626818845675, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 24.806653 }, { \"step\": 44394, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8533777847858897, \"F1\": 0.8271686890948196, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 25.837451 }, { \"step\": 45300, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8533521711296055, \"F1\": 0.8273155007928462, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 26.888684 }, { \"step\": 25, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Phishing\", \"Accuracy\": 0.625, \"F1\": 0.64, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.002343 }, { \"step\": 50, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Phishing\", \"Accuracy\": 0.6530612244897959, \"F1\": 0.6222222222222223, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.006017 }, { \"step\": 75, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Phishing\", \"Accuracy\": 0.5675675675675675, \"F1\": 0.5555555555555556, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.0109819999999999 }, { \"step\": 100, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Phishing\", \"Accuracy\": 0.5555555555555556, \"F1\": 0.5416666666666666, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.017228 }, { \"step\": 125, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Phishing\", \"Accuracy\": 0.5241935483870968, \"F1\": 0.5123966942148761, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.024779 }, { \"step\": 150, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Phishing\", \"Accuracy\": 0.5234899328859061, \"F1\": 0.5298013245033113, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.033621 }, { \"step\": 175, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Phishing\", \"Accuracy\": 0.5229885057471264, \"F1\": 0.496969696969697, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.043754 }, { \"step\": 200, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Phishing\", \"Accuracy\": 0.507537688442211, \"F1\": 0.4787234042553192, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.055183 }, { \"step\": 225, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Phishing\", \"Accuracy\": 0.5, \"F1\": 0.4509803921568627, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.067905 }, { \"step\": 250, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Phishing\", \"Accuracy\": 0.5180722891566265, \"F1\": 0.4782608695652174, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.082 }, { \"step\": 275, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Phishing\", \"Accuracy\": 0.5218978102189781, \"F1\": 0.4738955823293172, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.097411 }, { \"step\": 300, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Phishing\", \"Accuracy\": 0.5217391304347826, \"F1\": 0.460377358490566, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.114126 }, { \"step\": 325, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Phishing\", \"Accuracy\": 0.5216049382716049, \"F1\": 0.4483985765124554, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.132158 }, { \"step\": 350, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Phishing\", \"Accuracy\": 0.5329512893982808, \"F1\": 0.4511784511784511, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.151512 }, { \"step\": 375, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Phishing\", \"Accuracy\": 0.5267379679144385, \"F1\": 0.4380952380952381, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.172161 }, { \"step\": 400, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Phishing\", \"Accuracy\": 0.5263157894736842, \"F1\": 0.4324324324324324, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.194113 }, { \"step\": 425, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Phishing\", \"Accuracy\": 0.5424528301886793, \"F1\": 0.436046511627907, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.217363 }, { \"step\": 450, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Phishing\", \"Accuracy\": 0.5367483296213809, \"F1\": 0.4222222222222222, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.241916 }, { \"step\": 475, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Phishing\", \"Accuracy\": 0.5358649789029536, \"F1\": 0.4329896907216494, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.267769 }, { \"step\": 500, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Phishing\", \"Accuracy\": 0.5370741482965932, \"F1\": 0.4460431654676259, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.2950149999999999 }, { \"step\": 525, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Phishing\", \"Accuracy\": 0.5400763358778626, \"F1\": 0.4382284382284382, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.323563 }, { \"step\": 550, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Phishing\", \"Accuracy\": 0.5391621129326047, \"F1\": 0.4415011037527593, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.3534009999999999 }, { \"step\": 575, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Phishing\", \"Accuracy\": 0.5418118466898955, \"F1\": 0.4416135881104034, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.384549 }, { \"step\": 600, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Phishing\", \"Accuracy\": 0.5509181969949917, \"F1\": 0.443064182194617, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.4169989999999999 }, { \"step\": 625, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Phishing\", \"Accuracy\": 0.5560897435897436, \"F1\": 0.4358452138492871, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.4507429999999999 }, { \"step\": 650, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Phishing\", \"Accuracy\": 0.551617873651772, \"F1\": 0.4393063583815029, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.4857869999999999 }, { \"step\": 675, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Phishing\", \"Accuracy\": 0.5459940652818991, \"F1\": 0.4436363636363636, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.522164 }, { \"step\": 700, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Phishing\", \"Accuracy\": 0.5464949928469242, \"F1\": 0.4389380530973452, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.55984 }, { \"step\": 725, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Phishing\", \"Accuracy\": 0.5441988950276243, \"F1\": 0.4463087248322148, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.598852 }, { \"step\": 750, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Phishing\", \"Accuracy\": 0.5367156208277704, \"F1\": 0.4412238325281803, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.639119 }, { \"step\": 775, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Phishing\", \"Accuracy\": 0.5310077519379846, \"F1\": 0.4336973478939157, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.680612 }, { \"step\": 800, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Phishing\", \"Accuracy\": 0.5294117647058824, \"F1\": 0.4388059701492537, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.723331 }, { \"step\": 825, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Phishing\", \"Accuracy\": 0.5266990291262136, \"F1\": 0.4396551724137931, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.7672859999999999 }, { \"step\": 850, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Phishing\", \"Accuracy\": 0.5241460541813898, \"F1\": 0.4341736694677871, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.812452 }, { \"step\": 875, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Phishing\", \"Accuracy\": 0.522883295194508, \"F1\": 0.4311050477489768, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.858842 }, { \"step\": 900, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Phishing\", \"Accuracy\": 0.5272525027808677, \"F1\": 0.4340878828229028, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.906455 }, { \"step\": 925, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Phishing\", \"Accuracy\": 0.5227272727272727, \"F1\": 0.4338896020539153, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.955289 }, { \"step\": 950, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Phishing\", \"Accuracy\": 0.5205479452054794, \"F1\": 0.438964241676942, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 1.005349 }, { \"step\": 975, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Phishing\", \"Accuracy\": 0.5174537987679672, \"F1\": 0.4337349397590361, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 1.056718 }, { \"step\": 1000, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Phishing\", \"Accuracy\": 0.5185185185185185, \"F1\": 0.4361078546307151, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 1.109312 }, { \"step\": 1025, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Phishing\", \"Accuracy\": 0.517578125, \"F1\": 0.4386363636363636, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 1.1631 }, { \"step\": 1050, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Phishing\", \"Accuracy\": 0.5138226882745471, \"F1\": 0.4370860927152318, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 1.218179 }, { \"step\": 1075, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Phishing\", \"Accuracy\": 0.5111731843575419, \"F1\": 0.4372990353697749, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 1.274568 }, { \"step\": 1100, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Phishing\", \"Accuracy\": 0.5122838944494995, \"F1\": 0.4393305439330544, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 1.332245 }, { \"step\": 1125, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Phishing\", \"Accuracy\": 0.5124555160142349, \"F1\": 0.4453441295546558, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 1.391212 }, { \"step\": 1150, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Phishing\", \"Accuracy\": 0.5143603133159269, \"F1\": 0.4464285714285714, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 1.451489 }, { \"step\": 1175, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Phishing\", \"Accuracy\": 0.5187393526405452, \"F1\": 0.4509232264334305, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 1.513052 }, { \"step\": 1200, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Phishing\", \"Accuracy\": 0.5187656380316931, \"F1\": 0.448901623686724, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 1.5759610000000002 }, { \"step\": 1225, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Phishing\", \"Accuracy\": 0.5171568627450981, \"F1\": 0.4471468662301216, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 1.640178 }, { \"step\": 1250, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Phishing\", \"Accuracy\": 0.5156124899919936, \"F1\": 0.4474885844748858, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 1.705799 }, { \"step\": 1903, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.0004835128784179, \"Time in s\": 0.070354 }, { \"step\": 3806, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.0004835128784179, \"Time in s\": 0.210001 }, { \"step\": 5709, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.0004835128784179, \"Time in s\": 0.417744 }, { \"step\": 7612, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.0004835128784179, \"Time in s\": 0.694871 }, { \"step\": 9515, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.0004835128784179, \"Time in s\": 1.039777 }, { \"step\": 11418, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.0004835128784179, \"Time in s\": 1.454499 }, { \"step\": 13321, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.0004835128784179, \"Time in s\": 1.938249 }, { \"step\": 15224, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9985548183669448, \"F1\": 0.0, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 2.491975 }, { \"step\": 17127, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9984818404764684, \"F1\": 0.0, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 3.11529 }, { \"step\": 19030, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9986336644069578, \"F1\": 0.0, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 3.807739 }, { \"step\": 20933, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9987578826676858, \"F1\": 0.0, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 4.570752 }, { \"step\": 22836, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9988613969783228, \"F1\": 0.0, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 5.403311 }, { \"step\": 24739, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9989489853666425, \"F1\": 0.0, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 6.304936 }, { \"step\": 26642, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9989489884013364, \"F1\": 0.0, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 7.275472 }, { \"step\": 28545, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9990190582959642, \"F1\": 0.0, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 8.315878999999999 }, { \"step\": 30448, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999080369166092, \"F1\": 0.0, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 9.425081 }, { \"step\": 32351, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9991344667697064, \"F1\": 0.0, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 10.602661 }, { \"step\": 34254, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999182553352991, \"F1\": 0.0, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 11.849262 }, { \"step\": 36157, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9992255780506694, \"F1\": 0.0, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 13.164257 }, { \"step\": 38060, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9992643001655324, \"F1\": 0.0, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 14.54421 }, { \"step\": 39963, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9992993343676492, \"F1\": 0.0, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 15.991924 }, { \"step\": 41866, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9993311835662247, \"F1\": 0.0, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 17.508838 }, { \"step\": 43769, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9993602632059952, \"F1\": 0.0, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 19.096664 }, { \"step\": 45672, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9993869194893916, \"F1\": 0.0, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 20.752142 }, { \"step\": 47575, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9994114432252912, \"F1\": 0.0, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 22.476952 }, { \"step\": 49478, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"SMTP\", \"Accuracy\": 0.99943408048184, \"F1\": 0.0, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 24.271157 }, { \"step\": 51381, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"SMTP\", \"Accuracy\": 0.99941611521993, \"F1\": 0.0625, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 26.132842000000004 }, { \"step\": 53284, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9994369686391532, \"F1\": 0.0625, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 28.063918000000005 }, { \"step\": 55187, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9994563838654732, \"F1\": 0.0625, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 30.065042000000005 }, { \"step\": 57090, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9994394717020793, \"F1\": 0.36, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 32.13415200000001 }, { \"step\": 58993, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9994575535665852, \"F1\": 0.36, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 34.27360200000001 }, { \"step\": 60896, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9994745052960012, \"F1\": 0.36, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 36.48320000000001 }, { \"step\": 62799, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9994585814834868, \"F1\": 0.3703703703703703, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 38.76173800000001 }, { \"step\": 64702, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9994745058036196, \"F1\": 0.3703703703703703, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 41.10958900000001 }, { \"step\": 66605, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"SMTP\", \"Accuracy\": 0.99948952014894, \"F1\": 0.3703703703703703, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 43.52762700000001 }, { \"step\": 68508, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9994745062548352, \"F1\": 0.3793103448275862, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 46.01474500000001 }, { \"step\": 70411, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9994887089902004, \"F1\": 0.3793103448275862, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 48.570884000000014 }, { \"step\": 72314, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9995021642028404, \"F1\": 0.3793103448275862, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 51.19747000000002 }, { \"step\": 74217, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9995149293952786, \"F1\": 0.3793103448275862, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 53.894680000000015 }, { \"step\": 76120, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"SMTP\", \"Accuracy\": 0.99952705631971, \"F1\": 0.3793103448275862, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 56.660361000000016 }, { \"step\": 78023, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"SMTP\", \"Accuracy\": 0.99953859167927, \"F1\": 0.3793103448275862, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 59.49662100000002 }, { \"step\": 79926, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999549577729121, \"F1\": 0.3793103448275862, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 62.40230700000002 }, { \"step\": 81829, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9995600527936648, \"F1\": 0.3793103448275862, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 65.37601400000003 }, { \"step\": 83732, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9995700517132244, \"F1\": 0.3793103448275862, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 68.41937500000003 }, { \"step\": 85635, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9995796062311698, \"F1\": 0.3793103448275862, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 71.53305000000003 }, { \"step\": 87538, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999588745330546, \"F1\": 0.3793103448275862, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 74.71604400000002 }, { \"step\": 89441, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9995751341681576, \"F1\": 0.3666666666666666, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 77.96763000000003 }, { \"step\": 91344, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9995839856365568, \"F1\": 0.3666666666666666, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 81.28881300000003 }, { \"step\": 93247, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999592475816657, \"F1\": 0.3666666666666666, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 84.68057600000003 }, { \"step\": 95150, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999600626385984, \"F1\": 0.3666666666666666, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 88.14010200000003 } ] }, \"params\": [ { \"name\": \"models\", \"select\": { \"type\": \"point\", \"fields\": [ \"model\" ] }, \"bind\": \"legend\" }, { \"name\": \"Dataset\", \"value\": \"Bananas\", \"bind\": { \"input\": \"select\", \"options\": [ \"Bananas\", \"Elec2\", \"Phishing\", \"SMTP\" ] } }, { \"name\": \"grid\", \"select\": \"interval\", \"bind\": \"scales\" } ], \"transform\": [ { \"filter\": { \"field\": \"dataset\", \"equal\": { \"expr\": \"Dataset\" } } } ], \"repeat\": { \"row\": [ \"Accuracy\", \"F1\", \"Memory in Mb\", \"Time in s\" ] }, \"spec\": { \"width\": \"container\", \"mark\": \"line\", \"encoding\": { \"x\": { \"field\": \"step\", \"type\": \"quantitative\", \"axis\": { \"titleFontSize\": 18, \"labelFontSize\": 18, \"title\": \"Instance\" } }, \"y\": { \"field\": { \"repeat\": \"row\" }, \"type\": \"quantitative\", \"axis\": { \"titleFontSize\": 18, \"labelFontSize\": 18 } }, \"color\": { \"field\": \"model\", \"type\": \"ordinal\", \"scale\": { \"scheme\": \"category20b\" }, \"title\": \"Models\", \"legend\": { \"titleFontSize\": 18, \"labelFontSize\": 18, \"labelLimit\": 500 } }, \"opacity\": { \"condition\": { \"param\": \"models\", \"value\": 1 }, \"value\": 0.2 } } } }

    "},{"location":"benchmarks/Binary%20classification/#datasets","title":"Datasets","text":"Bananas

    Bananas dataset.

    An artificial dataset where instances belongs to several clusters with a banana shape. There are two attributes that correspond to the x and y axis, respectively.

    Name  Bananas                                               \nTask  Binary classification\n

    Samples 5,300 Features 2 Sparse False Path /home/kulbach/projects/river/river/datasets/banana.zip

    Elec2

    Electricity prices in New South Wales.

    This is a binary classification task, where the goal is to predict if the price of electricity will go up or down.

    This data was collected from the Australian New South Wales Electricity Market. In this market, prices are not fixed and are affected by demand and supply of the market. They are set every five minutes. Electricity transfers to/from the neighboring state of Victoria were done to alleviate fluctuations.

      Name  Elec2                                                      \n  Task  Binary classification\n

    Samples 45,312 Features 8 Sparse False Path /home/kulbach/river_data/Elec2/electricity.csv URL https://maxhalford.github.io/files/datasets/electricity.zip Size 2.95 MB Downloaded True

    Phishing

    Phishing websites.

    This dataset contains features from web pages that are classified as phishing or not.

    Name  Phishing                                                   \nTask  Binary classification\n

    Samples 1,250 Features 9 Sparse False Path /home/kulbach/projects/river/river/datasets/phishing.csv.gz

    SMTP

    SMTP dataset from the KDD 1999 cup.

    The goal is to predict whether or not an SMTP connection is anomalous or not. The dataset only contains 2,211 (0.4%) positive labels.

      Name  SMTP                                                \n  Task  Binary classification\n

    Samples 95,156 Features 3 Sparse False Path /home/kulbach/river_data/SMTP/smtp.csv URL https://maxhalford.github.io/files/datasets/smtp.zip Size 5.23 MB Downloaded True

    "},{"location":"benchmarks/Binary%20classification/#models","title":"Models","text":"Logistic regression

    Pipeline (\n  StandardScaler (\n    with_std=True\n  ),\n  LogisticRegression (\n    optimizer=SGD (\n      lr=Constant (\n        learning_rate=0.005\n      )\n    )\n    loss=Log (\n      weight_pos=1.\n      weight_neg=1.\n    )\n    l2=0.\n    l1=0.\n    intercept_init=0.\n    intercept_lr=Constant (\n      learning_rate=0.01\n    )\n    clip_gradient=1e+12\n    initializer=Zeros ()\n  )\n)

    ALMA

    Pipeline (\n  StandardScaler (\n    with_std=True\n  ),\n  ALMAClassifier (\n    p=2\n    alpha=0.9\n    B=1.111111\n    C=1.414214\n  )\n)

    sklearn SGDClassifier

    Pipeline (\n  StandardScaler (\n    with_std=True\n  ),\n  SKL2RiverClassifier (\n    estimator=SGDClassifier(eta0=0.005, learning_rate='constant', loss='log', penalty='none')\n    classes=[False, True]\n  )\n)

    Vowpal Wabbit logistic regression

    VW2RiverClassifier ()

    Naive Bayes

    GaussianNB ()

    Hoeffding Tree

    HoeffdingTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n)

    Hoeffding Adaptive Tree

    HoeffdingAdaptiveTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  bootstrap_sampling=True\n  drift_window_threshold=300\n  drift_detector=ADWIN (\n    delta=0.002\n    clock=32\n    max_buckets=5\n    min_window_length=5\n    grace_period=10\n  )\n  switch_significance=0.05\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n  seed=42\n)

    Adaptive Random Forest

    []

    Streaming Random Patches

    SRPClassifier (\n  model=HoeffdingTreeClassifier (\n    grace_period=50\n    max_depth=inf\n    split_criterion=\"info_gain\"\n    delta=0.01\n    tau=0.05\n    leaf_prediction=\"nba\"\n    nb_threshold=0\n    nominal_attributes=None\n    splitter=GaussianSplitter (\n      n_splits=10\n    )\n    binary_split=False\n    max_size=100.\n    memory_estimate_period=1000000\n    stop_mem_management=False\n    remove_poor_attrs=False\n    merit_preprune=True\n  )\n  n_models=10\n  subspace_size=0.6\n  training_method=\"patches\"\n  lam=6\n  drift_detector=ADWIN (\n    delta=1e-05\n    clock=32\n    max_buckets=5\n    min_window_length=5\n    grace_period=10\n  )\n  warning_detector=ADWIN (\n    delta=0.0001\n    clock=32\n    max_buckets=5\n    min_window_length=5\n    grace_period=10\n  )\n  disable_detector=\"off\"\n  disable_weighted_vote=False\n  seed=None\n  metric=Accuracy (\n    cm=ConfusionMatrix (\n      classes=[]\n    )\n  )\n)

    k-Nearest Neighbors

    Pipeline (\n  StandardScaler (\n    with_std=True\n  ),\n  KNNClassifier (\n    n_neighbors=5\n    window_size=100\n    min_distance_keep=0.\n    weighted=True\n    cleanup_every=0\n    distance_func=functools.partial(, p=2)\n    softmax=False\n  )\n)\n

    \nADWIN Bagging\n

    [HoeffdingTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n), HoeffdingTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n), HoeffdingTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n), HoeffdingTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n), HoeffdingTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n), HoeffdingTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n), HoeffdingTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n), HoeffdingTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n), HoeffdingTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n), HoeffdingTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n)]

    \n

    \nAdaBoost\n

    [HoeffdingTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n), HoeffdingTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n), HoeffdingTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n), HoeffdingTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n), HoeffdingTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n), HoeffdingTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n), HoeffdingTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n), HoeffdingTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n), HoeffdingTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n), HoeffdingTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n)]

    \n

    \nBagging\n

    [HoeffdingAdaptiveTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  bootstrap_sampling=False\n  drift_window_threshold=300\n  drift_detector=ADWIN (\n    delta=0.002\n    clock=32\n    max_buckets=5\n    min_window_length=5\n    grace_period=10\n  )\n  switch_significance=0.05\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n  seed=None\n), HoeffdingAdaptiveTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  bootstrap_sampling=False\n  drift_window_threshold=300\n  drift_detector=ADWIN (\n    delta=0.002\n    clock=32\n    max_buckets=5\n    min_window_length=5\n    grace_period=10\n  )\n  switch_significance=0.05\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n  seed=None\n), HoeffdingAdaptiveTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  bootstrap_sampling=False\n  drift_window_threshold=300\n  drift_detector=ADWIN (\n    delta=0.002\n    clock=32\n    max_buckets=5\n    min_window_length=5\n    grace_period=10\n  )\n  switch_significance=0.05\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n  seed=None\n), HoeffdingAdaptiveTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  bootstrap_sampling=False\n  drift_window_threshold=300\n  drift_detector=ADWIN (\n    delta=0.002\n    clock=32\n    max_buckets=5\n    min_window_length=5\n    grace_period=10\n  )\n  switch_significance=0.05\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n  seed=None\n), HoeffdingAdaptiveTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  bootstrap_sampling=False\n  drift_window_threshold=300\n  drift_detector=ADWIN (\n    delta=0.002\n    clock=32\n    max_buckets=5\n    min_window_length=5\n    grace_period=10\n  )\n  switch_significance=0.05\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n  seed=None\n), HoeffdingAdaptiveTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  bootstrap_sampling=False\n  drift_window_threshold=300\n  drift_detector=ADWIN (\n    delta=0.002\n    clock=32\n    max_buckets=5\n    min_window_length=5\n    grace_period=10\n  )\n  switch_significance=0.05\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n  seed=None\n), HoeffdingAdaptiveTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  bootstrap_sampling=False\n  drift_window_threshold=300\n  drift_detector=ADWIN (\n    delta=0.002\n    clock=32\n    max_buckets=5\n    min_window_length=5\n    grace_period=10\n  )\n  switch_significance=0.05\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n  seed=None\n), HoeffdingAdaptiveTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  bootstrap_sampling=False\n  drift_window_threshold=300\n  drift_detector=ADWIN (\n    delta=0.002\n    clock=32\n    max_buckets=5\n    min_window_length=5\n    grace_period=10\n  )\n  switch_significance=0.05\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n  seed=None\n), HoeffdingAdaptiveTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  bootstrap_sampling=False\n  drift_window_threshold=300\n  drift_detector=ADWIN (\n    delta=0.002\n    clock=32\n    max_buckets=5\n    min_window_length=5\n    grace_period=10\n  )\n  switch_significance=0.05\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n  seed=None\n), HoeffdingAdaptiveTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  bootstrap_sampling=False\n  drift_window_threshold=300\n  drift_detector=ADWIN (\n    delta=0.002\n    clock=32\n    max_buckets=5\n    min_window_length=5\n    grace_period=10\n  )\n  switch_significance=0.05\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n  seed=None\n)]

    \n

    \nLeveraging Bagging\n

    [HoeffdingTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n), HoeffdingTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n), HoeffdingTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n), HoeffdingTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n), HoeffdingTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n), HoeffdingTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n), HoeffdingTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n), HoeffdingTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n), HoeffdingTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n), HoeffdingTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n)]

    \n

    \nStacking\n

    [Pipeline (\n  StandardScaler (\n    with_std=True\n  ),\n  SoftmaxRegression (\n    optimizer=SGD (\n      lr=Constant (\n        learning_rate=0.01\n      )\n    )\n    loss=CrossEntropy (\n      class_weight={}\n    )\n    l2=0\n  )\n), GaussianNB (), HoeffdingTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n), Pipeline (\n  StandardScaler (\n    with_std=True\n  ),\n  KNNClassifier (\n    n_neighbors=5\n    window_size=100\n    min_distance_keep=0.\n    weighted=True\n    cleanup_every=0\n    distance_func=functools.partial(, p=2)\n    softmax=False\n  )\n)]\n

    \nVoting\n

    VotingClassifier (\n  models=[Pipeline (\n  StandardScaler (\n    with_std=True\n  ),\n  SoftmaxRegression (\n    optimizer=SGD (\n      lr=Constant (\n        learning_rate=0.01\n      )\n    )\n    loss=CrossEntropy (\n      class_weight={}\n    )\n    l2=0\n  )\n), GaussianNB (), HoeffdingTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n), Pipeline (\n  StandardScaler (\n    with_std=True\n  ),\n  KNNClassifier (\n    n_neighbors=5\n    window_size=100\n    min_distance_keep=0.\n    weighted=True\n    cleanup_every=0\n    distance_func=functools.partial(, p=2)\n    softmax=False\n  )\n)]\n  use_probabilities=True\n)\n

    \n[baseline] Last Class\n

    NoChangeClassifier ()

    \n

    "},{"location":"benchmarks/Binary%20classification/#environment","title":"Environment","text":"
    Python implementation: CPython\nPython version       : 3.11.5\nIPython version      : 8.15.0\n\nriver       : 0.19.0\nnumpy       : 1.25.2\nscikit-learn: 1.3.0\npandas      : 2.1.0\nscipy       : 1.11.2\n\nCompiler    : GCC 11.4.0\nOS          : Linux\nRelease     : 5.15.0-1041-azure\nMachine     : x86_64\nProcessor   : x86_64\nCPU cores   : 2\nArchitecture: 64bit\n
    "},{"location":"benchmarks/Multiclass%20classification/","title":"Multiclass classification","text":"TableChart Model Dataset Accuracy MicroF1 MacroF1 Memory in Mb Time in s ADWIN Bagging ImageSegments 0.777729 0.777729 0.764912 4.14768 482.736 ADWIN Bagging Insects 0.579424 0.579424 0.570136 15.4446 12525.9 ADWIN Bagging Keystroke 0.805824 0.805824 0.80625 32.1812 8923.61 AdaBoost ImageSegments 0.805133 0.805133 0.798078 4.12853 438.219 AdaBoost Insects 0.554082 0.554082 0.543927 28.2902 12481.3 AdaBoost Keystroke 0.842492 0.842492 0.843635 177.385 12366.9 Adaptive Random Forest ImageSegments 0.819052 0.819052 0.814425 4.66081 227.541 Adaptive Random Forest Insects 0.744257 0.744257 0.741932 0.369647 4404.71 Adaptive Random Forest Keystroke 0.969851 0.969851 0.969867 2.33717 937.846 Bagging ImageSegments 0.77686 0.77686 0.764461 4.18729 482.036 Bagging Insects 0.606053 0.606053 0.598222 3.75006 14067.2 Bagging Keystroke 0.667974 0.667974 0.668853 50.4872 13509.1 Hoeffding Adaptive Tree ImageSegments 0.774685 0.774685 0.763496 0.425819 53.9974 Hoeffding Adaptive Tree Insects 0.611962 0.611962 0.602993 0.147679 1507.07 Hoeffding Adaptive Tree Keystroke 0.723712 0.723712 0.722393 0.727901 1274.73 Hoeffding Tree ImageSegments 0.77599 0.77599 0.763027 0.419177 39.4879 Hoeffding Tree Insects 0.537018 0.537018 0.527071 2.5392 921.351 Hoeffding Tree Keystroke 0.648218 0.648218 0.647249 5.09806 914.037 Leveraging Bagging ImageSegments 0.778164 0.778164 0.765914 4.13275 1135.16 Leveraging Bagging Insects 0.691547 0.691547 0.686411 18.1413 32334.1 Leveraging Bagging Keystroke 0.95039 0.95039 0.950468 10.4201 7265.02 Naive Bayes ImageSegments 0.731622 0.731622 0.730042 0.390004 38.4724 Naive Bayes Insects 0.506847 0.506847 0.493003 0.611693 557.606 Naive Bayes Keystroke 0.652532 0.652532 0.651577 4.86901 473.747 Stacking ImageSegments 0.849065 0.849065 0.847922 5.29567 399.289 Stacking Insects 0.752154 0.752154 0.750251 11.339 9741.14 Stacking Keystroke 0.976518 0.976518 0.976517 12.2203 4556.33 Streaming Random Patches ImageSegments 0.754676 0.754676 0.752727 10.4257 832.07 Streaming Random Patches Insects 0.739578 0.739578 0.737512 8.34194 26942.3 Streaming Random Patches Keystroke 0.953233 0.953233 0.953239 74.5521 5886.48 Voting ImageSegments 0.803393 0.803393 0.794975 0.951658 146.236 Voting Insects 0.647929 0.647929 0.635943 3.38862 3141.99 Voting Keystroke 0.793274 0.793274 0.798424 10.3088 2173.75 [baseline] Last Class ImageSegments 0.14789 0.14789 0.147887 0.00136757 2.67732 [baseline] Last Class Insects 0.289115 0.289115 0.289295 0.00138664 59.1503 [baseline] Last Class Keystroke 0.997549 0.997549 0.997549 0.00504208 24.227 k-Nearest Neighbors ImageSegments 0.819922 0.819922 0.815895 0.12676 38.8794 k-Nearest Neighbors Insects 0.686547 0.686547 0.683661 0.216656 1254.78 k-Nearest Neighbors Keystroke 0.984509 0.984509 0.984508 0.214242 515.415

    Try reloading the page if something is buggy

    { \"$schema\": \"https://vega.github.io/schema/vega-lite/v5.json\", \"data\": { \"values\": [ { \"step\": 46, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.4666666666666667, \"MicroF1\": 0.4666666666666667, \"MacroF1\": 0.4009102009102009, \"Memory in Mb\": 0.3899507522583008, \"Time in s\": 0.163216 }, { \"step\": 92, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.5604395604395604, \"MicroF1\": 0.5604395604395604, \"MacroF1\": 0.5279334700387331, \"Memory in Mb\": 0.3899507522583008, \"Time in s\": 0.349738 }, { \"step\": 138, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.5474452554744526, \"MicroF1\": 0.5474452554744526, \"MacroF1\": 0.5191892873237388, \"Memory in Mb\": 0.3899774551391601, \"Time in s\": 0.5584899999999999 }, { \"step\": 184, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.5573770491803278, \"MicroF1\": 0.5573770491803278, \"MacroF1\": 0.5225713529323662, \"Memory in Mb\": 0.3899507522583008, \"Time in s\": 0.789485 }, { \"step\": 230, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.5545851528384279, \"MicroF1\": 0.5545851528384279, \"MacroF1\": 0.5217226223148511, \"Memory in Mb\": 0.3899774551391601, \"Time in s\": 1.042858 }, { \"step\": 276, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.56, \"MicroF1\": 0.56, \"MacroF1\": 0.5450388711329708, \"Memory in Mb\": 0.3899774551391601, \"Time in s\": 1.324703 }, { \"step\": 322, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.5825545171339563, \"MicroF1\": 0.5825545171339563, \"MacroF1\": 0.5566705826058684, \"Memory in Mb\": 0.3900041580200195, \"Time in s\": 1.637036 }, { \"step\": 368, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.5940054495912807, \"MicroF1\": 0.5940054495912807, \"MacroF1\": 0.5613773296963412, \"Memory in Mb\": 0.3900041580200195, \"Time in s\": 1.979491 }, { \"step\": 414, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.5980629539951574, \"MicroF1\": 0.5980629539951574, \"MacroF1\": 0.5624927052752284, \"Memory in Mb\": 0.3900041580200195, \"Time in s\": 2.352111 }, { \"step\": 460, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.599128540305011, \"MicroF1\": 0.599128540305011, \"MacroF1\": 0.5669821167583783, \"Memory in Mb\": 0.3899774551391601, \"Time in s\": 2.754918 }, { \"step\": 506, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.6099009900990099, \"MicroF1\": 0.6099009900990099, \"MacroF1\": 0.5922286190986811, \"Memory in Mb\": 0.3900041580200195, \"Time in s\": 3.188186 }, { \"step\": 552, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.6116152450090744, \"MicroF1\": 0.6116152450090744, \"MacroF1\": 0.5983340184133136, \"Memory in Mb\": 0.3899507522583008, \"Time in s\": 3.651555 }, { \"step\": 598, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.6180904522613065, \"MicroF1\": 0.6180904522613065, \"MacroF1\": 0.611527101723203, \"Memory in Mb\": 0.3899774551391601, \"Time in s\": 4.145135 }, { \"step\": 644, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.6158631415241057, \"MicroF1\": 0.6158631415241057, \"MacroF1\": 0.6113311881078581, \"Memory in Mb\": 0.3899774551391601, \"Time in s\": 4.668896 }, { \"step\": 690, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.6182873730043541, \"MicroF1\": 0.6182873730043541, \"MacroF1\": 0.615018998714676, \"Memory in Mb\": 0.3899774551391601, \"Time in s\": 5.223075 }, { \"step\": 736, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.617687074829932, \"MicroF1\": 0.617687074829932, \"MacroF1\": 0.6157912419016742, \"Memory in Mb\": 0.3899774551391601, \"Time in s\": 5.807397 }, { \"step\": 782, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.6274007682458387, \"MicroF1\": 0.6274007682458387, \"MacroF1\": 0.6216325704223051, \"Memory in Mb\": 0.3899774551391601, \"Time in s\": 6.422078 }, { \"step\": 828, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.6324062877871826, \"MicroF1\": 0.6324062877871826, \"MacroF1\": 0.6280704917469789, \"Memory in Mb\": 0.3899774551391601, \"Time in s\": 7.066915 }, { \"step\": 874, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.6426116838487973, \"MicroF1\": 0.6426116838487973, \"MacroF1\": 0.6349558095046656, \"Memory in Mb\": 0.3899774551391601, \"Time in s\": 7.742184 }, { \"step\": 920, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.6485310119695321, \"MicroF1\": 0.6485310119695321, \"MacroF1\": 0.6384515982514894, \"Memory in Mb\": 0.3899774551391601, \"Time in s\": 8.447577 }, { \"step\": 966, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.6507772020725389, \"MicroF1\": 0.6507772020725389, \"MacroF1\": 0.6399118827528387, \"Memory in Mb\": 0.3899774551391601, \"Time in s\": 9.183146 }, { \"step\": 1012, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.6508407517309595, \"MicroF1\": 0.6508407517309595, \"MacroF1\": 0.6387857120889422, \"Memory in Mb\": 0.3899774551391601, \"Time in s\": 9.95137 }, { \"step\": 1058, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.6537369914853358, \"MicroF1\": 0.6537369914853358, \"MacroF1\": 0.6398811322847952, \"Memory in Mb\": 0.3899774551391601, \"Time in s\": 10.747402 }, { \"step\": 1104, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.658204895738894, \"MicroF1\": 0.658204895738894, \"MacroF1\": 0.6463297068165035, \"Memory in Mb\": 0.3899774551391601, \"Time in s\": 11.559914 }, { \"step\": 1150, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.6640557006092254, \"MicroF1\": 0.6640557006092254, \"MacroF1\": 0.6508930463144657, \"Memory in Mb\": 0.3900041580200195, \"Time in s\": 12.388643000000002 }, { \"step\": 1196, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.6702928870292887, \"MicroF1\": 0.6702928870292887, \"MacroF1\": 0.6599370641329335, \"Memory in Mb\": 0.3900041580200195, \"Time in s\": 13.233598000000002 }, { \"step\": 1242, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.6736502820306205, \"MicroF1\": 0.6736502820306205, \"MacroF1\": 0.669511465798708, \"Memory in Mb\": 0.3900041580200195, \"Time in s\": 14.094776000000005 }, { \"step\": 1288, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.6822066822066822, \"MicroF1\": 0.6822066822066822, \"MacroF1\": 0.6790074545382362, \"Memory in Mb\": 0.3900041580200195, \"Time in s\": 14.972203000000004 }, { \"step\": 1334, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.6841710427606902, \"MicroF1\": 0.6841710427606902, \"MacroF1\": 0.6834974476087325, \"Memory in Mb\": 0.3900041580200195, \"Time in s\": 15.866030000000004 }, { \"step\": 1380, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.6874546773023931, \"MicroF1\": 0.6874546773023931, \"MacroF1\": 0.6876766922721351, \"Memory in Mb\": 0.3900041580200195, \"Time in s\": 16.775981000000005 }, { \"step\": 1426, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.6919298245614035, \"MicroF1\": 0.6919298245614035, \"MacroF1\": 0.6930786661709784, \"Memory in Mb\": 0.3900041580200195, \"Time in s\": 17.702176000000005 }, { \"step\": 1472, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.698844323589395, \"MicroF1\": 0.698844323589395, \"MacroF1\": 0.6985606658027722, \"Memory in Mb\": 0.3899774551391601, \"Time in s\": 18.644575000000003 }, { \"step\": 1518, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7027027027027027, \"MicroF1\": 0.7027027027027027, \"MacroF1\": 0.7017787722939461, \"Memory in Mb\": 0.3900041580200195, \"Time in s\": 19.603248000000004 }, { \"step\": 1564, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7056941778630839, \"MicroF1\": 0.7056941778630839, \"MacroF1\": 0.7062915374924865, \"Memory in Mb\": 0.3899774551391601, \"Time in s\": 20.578282000000005 }, { \"step\": 1610, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7078931013051585, \"MicroF1\": 0.7078931013051585, \"MacroF1\": 0.7081385387673029, \"Memory in Mb\": 0.3899774551391601, \"Time in s\": 21.573844000000005 }, { \"step\": 1656, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7093655589123867, \"MicroF1\": 0.7093655589123867, \"MacroF1\": 0.7109488618373111, \"Memory in Mb\": 0.3899507522583008, \"Time in s\": 22.586424000000004 }, { \"step\": 1702, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7101704879482658, \"MicroF1\": 0.7101704879482658, \"MacroF1\": 0.7132092257742534, \"Memory in Mb\": 0.3899774551391601, \"Time in s\": 23.615335000000005 }, { \"step\": 1748, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7143674871207785, \"MicroF1\": 0.7143674871207784, \"MacroF1\": 0.7178399485500211, \"Memory in Mb\": 0.3899507522583008, \"Time in s\": 24.660526000000004 }, { \"step\": 1794, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7172336865588399, \"MicroF1\": 0.7172336865588399, \"MacroF1\": 0.7191260584555578, \"Memory in Mb\": 0.3899774551391601, \"Time in s\": 25.721983000000005 }, { \"step\": 1840, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7199564980967917, \"MicroF1\": 0.7199564980967917, \"MacroF1\": 0.7217017555070445, \"Memory in Mb\": 0.3900041580200195, \"Time in s\": 26.79968000000001 }, { \"step\": 1886, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7204244031830239, \"MicroF1\": 0.7204244031830238, \"MacroF1\": 0.7234495525792994, \"Memory in Mb\": 0.3900041580200195, \"Time in s\": 27.893629000000004 }, { \"step\": 1932, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7219057483169342, \"MicroF1\": 0.7219057483169342, \"MacroF1\": 0.723848351214801, \"Memory in Mb\": 0.3900041580200195, \"Time in s\": 29.003837000000004 }, { \"step\": 1978, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.723823975720789, \"MicroF1\": 0.723823975720789, \"MacroF1\": 0.725139923863974, \"Memory in Mb\": 0.3900041580200195, \"Time in s\": 30.130512000000003 }, { \"step\": 2024, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.726643598615917, \"MicroF1\": 0.726643598615917, \"MacroF1\": 0.7268553573885639, \"Memory in Mb\": 0.3900041580200195, \"Time in s\": 31.273399000000005 }, { \"step\": 2070, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7269212179797003, \"MicroF1\": 0.7269212179797003, \"MacroF1\": 0.7276782991451582, \"Memory in Mb\": 0.3900041580200195, \"Time in s\": 32.432577 }, { \"step\": 2116, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7286052009456265, \"MicroF1\": 0.7286052009456266, \"MacroF1\": 0.7283656039279266, \"Memory in Mb\": 0.3900041580200195, \"Time in s\": 33.608017000000004 }, { \"step\": 2162, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7306802406293382, \"MicroF1\": 0.7306802406293383, \"MacroF1\": 0.7303992643507475, \"Memory in Mb\": 0.3900041580200195, \"Time in s\": 34.7997 }, { \"step\": 2208, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.733574988672406, \"MicroF1\": 0.733574988672406, \"MacroF1\": 0.7322842940126589, \"Memory in Mb\": 0.3900041580200195, \"Time in s\": 36.007612 }, { \"step\": 2254, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7314691522414558, \"MicroF1\": 0.7314691522414558, \"MacroF1\": 0.7300322879925133, \"Memory in Mb\": 0.3900041580200195, \"Time in s\": 37.231763 }, { \"step\": 2300, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7316224445411048, \"MicroF1\": 0.7316224445411048, \"MacroF1\": 0.7300416811383057, \"Memory in Mb\": 0.3900041580200195, \"Time in s\": 38.472431 }, { \"step\": 1056, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Insects\", \"Accuracy\": 0.623696682464455, \"MicroF1\": 0.623696682464455, \"MacroF1\": 0.5870724729616661, \"Memory in Mb\": 0.6116933822631836, \"Time in s\": 0.909568 }, { \"step\": 2112, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Insects\", \"Accuracy\": 0.6148744670772146, \"MicroF1\": 0.6148744670772146, \"MacroF1\": 0.5800776869595597, \"Memory in Mb\": 0.6116933822631836, \"Time in s\": 2.67356 }, { \"step\": 3168, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Insects\", \"Accuracy\": 0.6065677297126618, \"MicroF1\": 0.6065677297126618, \"MacroF1\": 0.5714781230184183, \"Memory in Mb\": 0.6116933822631836, \"Time in s\": 5.143102000000001 }, { \"step\": 4224, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Insects\", \"Accuracy\": 0.6043097324177126, \"MicroF1\": 0.6043097324177126, \"MacroF1\": 0.5697541737710122, \"Memory in Mb\": 0.6116933822631836, \"Time in s\": 7.993857 }, { \"step\": 5280, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Insects\", \"Accuracy\": 0.6088274294373934, \"MicroF1\": 0.6088274294373934, \"MacroF1\": 0.5727560614138387, \"Memory in Mb\": 0.6116933822631836, \"Time in s\": 11.225513 }, { \"step\": 6336, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Insects\", \"Accuracy\": 0.6023677979479084, \"MicroF1\": 0.6023677979479084, \"MacroF1\": 0.5679597008529512, \"Memory in Mb\": 0.6116933822631836, \"Time in s\": 14.839337 }, { \"step\": 7392, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Insects\", \"Accuracy\": 0.5995129211202814, \"MicroF1\": 0.5995129211202814, \"MacroF1\": 0.5652603100832261, \"Memory in Mb\": 0.6116933822631836, \"Time in s\": 18.839998 }, { \"step\": 8448, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Insects\", \"Accuracy\": 0.6019888717888008, \"MicroF1\": 0.6019888717888008, \"MacroF1\": 0.5673514925692325, \"Memory in Mb\": 0.6116933822631836, \"Time in s\": 23.223853 }, { \"step\": 9504, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Insects\", \"Accuracy\": 0.5993896664211301, \"MicroF1\": 0.5993896664211301, \"MacroF1\": 0.5644951651039589, \"Memory in Mb\": 0.6116933822631836, \"Time in s\": 27.990643 }, { \"step\": 10560, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Insects\", \"Accuracy\": 0.5994885879344635, \"MicroF1\": 0.5994885879344635, \"MacroF1\": 0.5645655385998631, \"Memory in Mb\": 0.6116933822631836, \"Time in s\": 33.140509 }, { \"step\": 11616, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Insects\", \"Accuracy\": 0.5972449418854929, \"MicroF1\": 0.5972449418854929, \"MacroF1\": 0.5631227877868952, \"Memory in Mb\": 0.6116933822631836, \"Time in s\": 38.672833 }, { \"step\": 12672, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Insects\", \"Accuracy\": 0.6001894088864336, \"MicroF1\": 0.6001894088864336, \"MacroF1\": 0.5684733590606373, \"Memory in Mb\": 0.6116933822631836, \"Time in s\": 44.58831000000001 }, { \"step\": 13728, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Insects\", \"Accuracy\": 0.6120783856632913, \"MicroF1\": 0.6120783856632913, \"MacroF1\": 0.5935173038317552, \"Memory in Mb\": 0.6116933822631836, \"Time in s\": 50.89270200000001 }, { \"step\": 14784, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Insects\", \"Accuracy\": 0.6024487587093282, \"MicroF1\": 0.6024487587093282, \"MacroF1\": 0.5841270876002981, \"Memory in Mb\": 0.6116933822631836, \"Time in s\": 57.581734 }, { \"step\": 15840, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Insects\", \"Accuracy\": 0.5676494728202538, \"MicroF1\": 0.5676494728202538, \"MacroF1\": 0.5507155080701159, \"Memory in Mb\": 0.6116933822631836, \"Time in s\": 64.65553700000001 }, { \"step\": 16896, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Insects\", \"Accuracy\": 0.5418762947617638, \"MicroF1\": 0.5418762947617638, \"MacroF1\": 0.5256197352354143, \"Memory in Mb\": 0.6116933822631836, \"Time in s\": 72.114698 }, { \"step\": 17952, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Insects\", \"Accuracy\": 0.5232020500250683, \"MicroF1\": 0.5232020500250683, \"MacroF1\": 0.5066898143269706, \"Memory in Mb\": 0.6116933822631836, \"Time in s\": 79.958388 }, { \"step\": 19008, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Insects\", \"Accuracy\": 0.5118640500868101, \"MicroF1\": 0.5118640500868101, \"MacroF1\": 0.4926543583964285, \"Memory in Mb\": 0.6116933822631836, \"Time in s\": 88.190503 }, { \"step\": 20064, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Insects\", \"Accuracy\": 0.5103922643672432, \"MicroF1\": 0.5103922643672432, \"MacroF1\": 0.4900586962359796, \"Memory in Mb\": 0.6116933822631836, \"Time in s\": 96.808684 }, { \"step\": 21120, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Insects\", \"Accuracy\": 0.5115772527108291, \"MicroF1\": 0.5115772527108291, \"MacroF1\": 0.4910837640903744, \"Memory in Mb\": 0.6116933822631836, \"Time in s\": 105.81178 }, { \"step\": 22176, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Insects\", \"Accuracy\": 0.5140022547914318, \"MicroF1\": 0.5140022547914318, \"MacroF1\": 0.4932541888231956, \"Memory in Mb\": 0.6116933822631836, \"Time in s\": 115.205863 }, { \"step\": 23232, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Insects\", \"Accuracy\": 0.5154319659076234, \"MicroF1\": 0.5154319659076234, \"MacroF1\": 0.4943013417599926, \"Memory in Mb\": 0.6116933822631836, \"Time in s\": 124.990845 }, { \"step\": 24288, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Insects\", \"Accuracy\": 0.5184254951208466, \"MicroF1\": 0.5184254951208466, \"MacroF1\": 0.4965832238311332, \"Memory in Mb\": 0.6116933822631836, \"Time in s\": 135.166218 }, { \"step\": 25344, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Insects\", \"Accuracy\": 0.5225111470623052, \"MicroF1\": 0.5225111470623052, \"MacroF1\": 0.499893079239698, \"Memory in Mb\": 0.6116933822631836, \"Time in s\": 145.739141 }, { \"step\": 26400, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Insects\", \"Accuracy\": 0.5257396113489148, \"MicroF1\": 0.5257396113489148, \"MacroF1\": 0.5022487669255871, \"Memory in Mb\": 0.6116933822631836, \"Time in s\": 156.702601 }, { \"step\": 27456, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Insects\", \"Accuracy\": 0.5301402294663996, \"MicroF1\": 0.5301402294663996, \"MacroF1\": 0.5051550433324518, \"Memory in Mb\": 0.6116933822631836, \"Time in s\": 168.057909 }, { \"step\": 28512, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Insects\", \"Accuracy\": 0.5277261407877661, \"MicroF1\": 0.5277261407877661, \"MacroF1\": 0.5036945145235057, \"Memory in Mb\": 0.6116933822631836, \"Time in s\": 179.80420999999998 }, { \"step\": 29568, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Insects\", \"Accuracy\": 0.5204450908107011, \"MicroF1\": 0.5204450908107011, \"MacroF1\": 0.4989008712312767, \"Memory in Mb\": 0.6116933822631836, \"Time in s\": 191.944501 }, { \"step\": 30624, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Insects\", \"Accuracy\": 0.5147111648107632, \"MicroF1\": 0.5147111648107632, \"MacroF1\": 0.495826840073632, \"Memory in Mb\": 0.6116933822631836, \"Time in s\": 204.478499 }, { \"step\": 31680, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Insects\", \"Accuracy\": 0.5105590454244137, \"MicroF1\": 0.5105590454244137, \"MacroF1\": 0.4941101813344875, \"Memory in Mb\": 0.6116933822631836, \"Time in s\": 217.402092 }, { \"step\": 32736, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Insects\", \"Accuracy\": 0.5075607148312204, \"MicroF1\": 0.5075607148312204, \"MacroF1\": 0.4931947798921405, \"Memory in Mb\": 0.6116933822631836, \"Time in s\": 230.716201 }, { \"step\": 33792, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Insects\", \"Accuracy\": 0.5044538486579266, \"MicroF1\": 0.5044538486579266, \"MacroF1\": 0.4905626123916189, \"Memory in Mb\": 0.6116933822631836, \"Time in s\": 244.420884 }, { \"step\": 34848, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Insects\", \"Accuracy\": 0.5020231296811777, \"MicroF1\": 0.5020231296811777, \"MacroF1\": 0.487879842488124, \"Memory in Mb\": 0.6116933822631836, \"Time in s\": 258.51509 }, { \"step\": 35904, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Insects\", \"Accuracy\": 0.4998746622844887, \"MicroF1\": 0.4998746622844887, \"MacroF1\": 0.4853435061152475, \"Memory in Mb\": 0.6116933822631836, \"Time in s\": 273.003699 }, { \"step\": 36960, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Insects\", \"Accuracy\": 0.4967937444194918, \"MicroF1\": 0.4967937444194918, \"MacroF1\": 0.4819418474093529, \"Memory in Mb\": 0.6116933822631836, \"Time in s\": 287.883522 }, { \"step\": 38016, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Insects\", \"Accuracy\": 0.4955938445350519, \"MicroF1\": 0.4955938445350519, \"MacroF1\": 0.4801892436835747, \"Memory in Mb\": 0.6116933822631836, \"Time in s\": 303.152298 }, { \"step\": 39072, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Insects\", \"Accuracy\": 0.4940237004427836, \"MicroF1\": 0.4940237004427836, \"MacroF1\": 0.478380783820526, \"Memory in Mb\": 0.6116933822631836, \"Time in s\": 318.807697 }, { \"step\": 40128, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Insects\", \"Accuracy\": 0.493508111745209, \"MicroF1\": 0.493508111745209, \"MacroF1\": 0.4785213801670671, \"Memory in Mb\": 0.6116933822631836, \"Time in s\": 334.85223 }, { \"step\": 41184, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Insects\", \"Accuracy\": 0.4936988563242114, \"MicroF1\": 0.4936988563242114, \"MacroF1\": 0.4794201499427274, \"Memory in Mb\": 0.6116933822631836, \"Time in s\": 351.286644 }, { \"step\": 42240, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Insects\", \"Accuracy\": 0.4938800634484718, \"MicroF1\": 0.4938800634484718, \"MacroF1\": 0.4802377497532936, \"Memory in Mb\": 0.6116933822631836, \"Time in s\": 368.105611 }, { \"step\": 43296, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Insects\", \"Accuracy\": 0.4943757939715902, \"MicroF1\": 0.4943757939715902, \"MacroF1\": 0.4812132921167227, \"Memory in Mb\": 0.6116933822631836, \"Time in s\": 385.310693 }, { \"step\": 44352, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Insects\", \"Accuracy\": 0.494036211133909, \"MicroF1\": 0.494036211133909, \"MacroF1\": 0.4812388919618418, \"Memory in Mb\": 0.6116933822631836, \"Time in s\": 402.906414 }, { \"step\": 45408, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Insects\", \"Accuracy\": 0.4944832294580131, \"MicroF1\": 0.4944832294580131, \"MacroF1\": 0.4818441874360224, \"Memory in Mb\": 0.6116933822631836, \"Time in s\": 420.888505 }, { \"step\": 46464, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Insects\", \"Accuracy\": 0.4945225232981082, \"MicroF1\": 0.4945225232981082, \"MacroF1\": 0.4820791268335544, \"Memory in Mb\": 0.6116933822631836, \"Time in s\": 439.259743 }, { \"step\": 47520, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Insects\", \"Accuracy\": 0.4956333256171216, \"MicroF1\": 0.4956333256171216, \"MacroF1\": 0.4833168636021498, \"Memory in Mb\": 0.6116933822631836, \"Time in s\": 458.017368 }, { \"step\": 48576, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Insects\", \"Accuracy\": 0.4970869788986104, \"MicroF1\": 0.4970869788986104, \"MacroF1\": 0.4846703771634363, \"Memory in Mb\": 0.6116933822631836, \"Time in s\": 477.16088800000006 }, { \"step\": 49632, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Insects\", \"Accuracy\": 0.4987608551107171, \"MicroF1\": 0.4987608551107171, \"MacroF1\": 0.4862426724473749, \"Memory in Mb\": 0.6116933822631836, \"Time in s\": 496.692936 }, { \"step\": 50688, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Insects\", \"Accuracy\": 0.5009568528419516, \"MicroF1\": 0.5009568528419516, \"MacroF1\": 0.4881725476999718, \"Memory in Mb\": 0.6116933822631836, \"Time in s\": 516.6094800000001 }, { \"step\": 51744, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Insects\", \"Accuracy\": 0.5034497419940862, \"MicroF1\": 0.5034497419940862, \"MacroF1\": 0.4903712806540024, \"Memory in Mb\": 0.6116933822631836, \"Time in s\": 536.9146260000001 }, { \"step\": 52800, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Insects\", \"Accuracy\": 0.5068467205818292, \"MicroF1\": 0.5068467205818292, \"MacroF1\": 0.4930025316136313, \"Memory in Mb\": 0.6116933822631836, \"Time in s\": 557.6057650000001 }, { \"step\": 408, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9852579852579852, \"MicroF1\": 0.9852579852579852, \"MacroF1\": 0.6962686567164179, \"Memory in Mb\": 0.1935644149780273, \"Time in s\": 0.122414 }, { \"step\": 816, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.947239263803681, \"MicroF1\": 0.947239263803681, \"MacroF1\": 0.7418606503288051, \"Memory in Mb\": 0.2889022827148437, \"Time in s\": 0.375804 }, { \"step\": 1224, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.884709730171709, \"MicroF1\": 0.884709730171709, \"MacroF1\": 0.8705899666065842, \"Memory in Mb\": 0.3842401504516601, \"Time in s\": 0.764348 }, { \"step\": 1632, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8933169834457388, \"MicroF1\": 0.8933169834457388, \"MacroF1\": 0.8791291775937072, \"Memory in Mb\": 0.4795780181884765, \"Time in s\": 1.303316 }, { \"step\": 2040, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8921039725355566, \"MicroF1\": 0.8921039725355566, \"MacroF1\": 0.8831785360852743, \"Memory in Mb\": 0.575160026550293, \"Time in s\": 2.01214 }, { \"step\": 2448, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.851655087862689, \"MicroF1\": 0.851655087862689, \"MacroF1\": 0.858198428951664, \"Memory in Mb\": 0.6704978942871094, \"Time in s\": 2.906585 }, { \"step\": 2856, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8598949211908932, \"MicroF1\": 0.8598949211908932, \"MacroF1\": 0.8469962214365345, \"Memory in Mb\": 0.7658357620239258, \"Time in s\": 3.994802 }, { \"step\": 3264, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8513637756665645, \"MicroF1\": 0.8513637756665645, \"MacroF1\": 0.8281280134770848, \"Memory in Mb\": 0.8611736297607422, \"Time in s\": 5.296884 }, { \"step\": 3672, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8422773086352493, \"MicroF1\": 0.8422773086352493, \"MacroF1\": 0.8409307955747314, \"Memory in Mb\": 0.9565114974975586, \"Time in s\": 6.831079000000001 }, { \"step\": 4080, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8367246874233881, \"MicroF1\": 0.8367246874233881, \"MacroF1\": 0.8249418657104467, \"Memory in Mb\": 1.0523834228515625, \"Time in s\": 8.617788000000001 }, { \"step\": 4488, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8203699576554491, \"MicroF1\": 0.8203699576554491, \"MacroF1\": 0.8300896799820437, \"Memory in Mb\": 1.147721290588379, \"Time in s\": 10.679552 }, { \"step\": 4896, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8192032686414709, \"MicroF1\": 0.8192032686414709, \"MacroF1\": 0.8269731591910484, \"Memory in Mb\": 1.243059158325195, \"Time in s\": 13.032163 }, { \"step\": 5304, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8172732415613804, \"MicroF1\": 0.8172732415613804, \"MacroF1\": 0.8027823390848743, \"Memory in Mb\": 1.3383970260620115, \"Time in s\": 15.695238 }, { \"step\": 5712, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7961828051129399, \"MicroF1\": 0.7961828051129399, \"MacroF1\": 0.8002006091139847, \"Memory in Mb\": 1.433734893798828, \"Time in s\": 18.689224 }, { \"step\": 6120, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.793920575257395, \"MicroF1\": 0.793920575257395, \"MacroF1\": 0.7746960355921346, \"Memory in Mb\": 1.5290727615356443, \"Time in s\": 22.034543 }, { \"step\": 6528, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7688064960931515, \"MicroF1\": 0.7688064960931515, \"MacroF1\": 0.7622487598340326, \"Memory in Mb\": 1.624410629272461, \"Time in s\": 25.755146 }, { \"step\": 6936, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7568853640951694, \"MicroF1\": 0.7568853640951694, \"MacroF1\": 0.757813781660983, \"Memory in Mb\": 1.7197484970092771, \"Time in s\": 29.876127 }, { \"step\": 7344, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7669889690862045, \"MicroF1\": 0.7669889690862046, \"MacroF1\": 0.7643943615019535, \"Memory in Mb\": 1.8150863647460935, \"Time in s\": 34.413227 }, { \"step\": 7752, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7676428847890595, \"MicroF1\": 0.7676428847890595, \"MacroF1\": 0.7655695901071293, \"Memory in Mb\": 1.9104242324829104, \"Time in s\": 39.374485 }, { \"step\": 8160, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7714180659394534, \"MicroF1\": 0.7714180659394533, \"MacroF1\": 0.7672011803374248, \"Memory in Mb\": 2.0057621002197266, \"Time in s\": 44.773425 }, { \"step\": 8568, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7702813120112058, \"MicroF1\": 0.7702813120112058, \"MacroF1\": 0.7699263138193526, \"Memory in Mb\": 2.1021223068237305, \"Time in s\": 50.625164000000005 }, { \"step\": 8976, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7680222841225627, \"MicroF1\": 0.7680222841225627, \"MacroF1\": 0.7682287234686137, \"Memory in Mb\": 2.197460174560547, \"Time in s\": 56.940867 }, { \"step\": 9384, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7659597143770649, \"MicroF1\": 0.7659597143770649, \"MacroF1\": 0.7643546547243015, \"Memory in Mb\": 2.2927980422973637, \"Time in s\": 63.725868000000006 }, { \"step\": 9792, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7586559084873864, \"MicroF1\": 0.7586559084873864, \"MacroF1\": 0.7552148692020618, \"Memory in Mb\": 2.38813591003418, \"Time in s\": 70.991963 }, { \"step\": 10200, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7505637807628199, \"MicroF1\": 0.7505637807628199, \"MacroF1\": 0.7430512224080149, \"Memory in Mb\": 2.483473777770996, \"Time in s\": 78.748505 }, { \"step\": 10608, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7290468558499105, \"MicroF1\": 0.7290468558499106, \"MacroF1\": 0.715756093271779, \"Memory in Mb\": 2.5788116455078125, \"Time in s\": 87.01168299999999 }, { \"step\": 11016, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7217430776214253, \"MicroF1\": 0.7217430776214253, \"MacroF1\": 0.7173640789896896, \"Memory in Mb\": 2.674149513244629, \"Time in s\": 95.787317 }, { \"step\": 11424, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7151361288628206, \"MicroF1\": 0.7151361288628206, \"MacroF1\": 0.7011862635194492, \"Memory in Mb\": 2.7694873809814453, \"Time in s\": 105.08400199999998 }, { \"step\": 11832, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.705603921900093, \"MicroF1\": 0.705603921900093, \"MacroF1\": 0.6976881379682605, \"Memory in Mb\": 2.8648252487182617, \"Time in s\": 114.917299 }, { \"step\": 12240, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7094533867146009, \"MicroF1\": 0.7094533867146009, \"MacroF1\": 0.705840538940343, \"Memory in Mb\": 2.960163116455078, \"Time in s\": 125.31674099999998 }, { \"step\": 12648, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7053846762077963, \"MicroF1\": 0.7053846762077963, \"MacroF1\": 0.6965736948063981, \"Memory in Mb\": 3.0555009841918945, \"Time in s\": 136.28361299999997 }, { \"step\": 13056, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.6927613941018766, \"MicroF1\": 0.6927613941018766, \"MacroF1\": 0.6842255816736497, \"Memory in Mb\": 3.150838851928711, \"Time in s\": 147.832836 }, { \"step\": 13464, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.6890737577063062, \"MicroF1\": 0.6890737577063062, \"MacroF1\": 0.6845669389392289, \"Memory in Mb\": 3.246176719665528, \"Time in s\": 159.980064 }, { \"step\": 13872, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.6873332852714296, \"MicroF1\": 0.6873332852714296, \"MacroF1\": 0.6839054551822702, \"Memory in Mb\": 3.341514587402344, \"Time in s\": 172.74228 }, { \"step\": 14280, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.682960991666083, \"MicroF1\": 0.682960991666083, \"MacroF1\": 0.6781566371919946, \"Memory in Mb\": 3.43685245513916, \"Time in s\": 186.135321 }, { \"step\": 14688, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.686185061619119, \"MicroF1\": 0.686185061619119, \"MacroF1\": 0.6843713776162116, \"Memory in Mb\": 3.532190322875977, \"Time in s\": 200.177651 }, { \"step\": 15096, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.6928784365684001, \"MicroF1\": 0.6928784365684001, \"MacroF1\": 0.6911392400672977, \"Memory in Mb\": 3.627528190612793, \"Time in s\": 214.888654 }, { \"step\": 15504, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.6913500612784622, \"MicroF1\": 0.6913500612784622, \"MacroF1\": 0.687359772989117, \"Memory in Mb\": 3.72286605834961, \"Time in s\": 230.279565 }, { \"step\": 15912, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.6819810194205267, \"MicroF1\": 0.6819810194205267, \"MacroF1\": 0.6749159449359359, \"Memory in Mb\": 3.818203926086426, \"Time in s\": 246.365674 }, { \"step\": 16320, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.6726515105092223, \"MicroF1\": 0.6726515105092223, \"MacroF1\": 0.6670192172011686, \"Memory in Mb\": 3.913541793823242, \"Time in s\": 263.163212 }, { \"step\": 16728, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.6695163508100676, \"MicroF1\": 0.6695163508100676, \"MacroF1\": 0.6664051037977978, \"Memory in Mb\": 4.008879661560059, \"Time in s\": 280.687557 }, { \"step\": 17136, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.6650131310183834, \"MicroF1\": 0.6650131310183834, \"MacroF1\": 0.6608988619616459, \"Memory in Mb\": 4.1063079833984375, \"Time in s\": 298.952273 }, { \"step\": 17544, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.6568431853160804, \"MicroF1\": 0.6568431853160804, \"MacroF1\": 0.653138289771919, \"Memory in Mb\": 4.201645851135254, \"Time in s\": 317.97399 }, { \"step\": 17952, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.6556180714166342, \"MicroF1\": 0.6556180714166342, \"MacroF1\": 0.6538448358590967, \"Memory in Mb\": 4.29698371887207, \"Time in s\": 337.769402 }, { \"step\": 18360, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.6614194672912468, \"MicroF1\": 0.6614194672912468, \"MacroF1\": 0.6603186829199905, \"Memory in Mb\": 4.392321586608887, \"Time in s\": 358.361854 }, { \"step\": 18768, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.6669686151222891, \"MicroF1\": 0.6669686151222891, \"MacroF1\": 0.666229361655457, \"Memory in Mb\": 4.487659454345703, \"Time in s\": 379.763277 }, { \"step\": 19176, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.6579921773142112, \"MicroF1\": 0.6579921773142112, \"MacroF1\": 0.6554177118629491, \"Memory in Mb\": 4.58299732208252, \"Time in s\": 401.986094 }, { \"step\": 19584, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.6622580809886126, \"MicroF1\": 0.6622580809886126, \"MacroF1\": 0.6609360990360077, \"Memory in Mb\": 4.678335189819336, \"Time in s\": 425.04707400000007 }, { \"step\": 19992, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.6562453103896754, \"MicroF1\": 0.6562453103896754, \"MacroF1\": 0.6545704957554573, \"Memory in Mb\": 4.773673057556152, \"Time in s\": 448.962927 }, { \"step\": 20400, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.6525319868621011, \"MicroF1\": 0.6525319868621011, \"MacroF1\": 0.6515767870317881, \"Memory in Mb\": 4.869010925292969, \"Time in s\": 473.747426 }, { \"step\": 46, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.3555555555555555, \"MicroF1\": 0.3555555555555555, \"MacroF1\": 0.2537942449707155, \"Memory in Mb\": 0.4191083908081054, \"Time in s\": 0.164966 }, { \"step\": 92, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.4945054945054945, \"MicroF1\": 0.4945054945054945, \"MacroF1\": 0.5043329927491419, \"Memory in Mb\": 0.4191045761108398, \"Time in s\": 0.355643 }, { \"step\": 138, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.5328467153284672, \"MicroF1\": 0.5328467153284672, \"MacroF1\": 0.5564033878668025, \"Memory in Mb\": 0.4191999435424804, \"Time in s\": 0.571134 }, { \"step\": 184, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.6010928961748634, \"MicroF1\": 0.6010928961748634, \"MacroF1\": 0.6227664965396451, \"Memory in Mb\": 0.4191999435424804, \"Time in s\": 0.8113900000000001 }, { \"step\": 230, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.6375545851528385, \"MicroF1\": 0.6375545851528385, \"MacroF1\": 0.6539827168809461, \"Memory in Mb\": 0.4192228317260742, \"Time in s\": 1.079154 }, { \"step\": 276, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.6509090909090909, \"MicroF1\": 0.6509090909090909, \"MacroF1\": 0.6671561759164943, \"Memory in Mb\": 0.4192724227905273, \"Time in s\": 1.371943 }, { \"step\": 322, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.67601246105919, \"MicroF1\": 0.67601246105919, \"MacroF1\": 0.6756614325426025, \"Memory in Mb\": 0.4192724227905273, \"Time in s\": 1.689575 }, { \"step\": 368, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7029972752043597, \"MicroF1\": 0.7029972752043597, \"MacroF1\": 0.6993447851636565, \"Memory in Mb\": 0.4192457199096679, \"Time in s\": 2.032058 }, { \"step\": 414, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7142857142857143, \"MicroF1\": 0.7142857142857143, \"MacroF1\": 0.7108606838045498, \"Memory in Mb\": 0.4191656112670898, \"Time in s\": 2.4019660000000003 }, { \"step\": 460, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7145969498910676, \"MicroF1\": 0.7145969498910676, \"MacroF1\": 0.7090365931960759, \"Memory in Mb\": 0.4192419052124023, \"Time in s\": 2.796914 }, { \"step\": 506, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7207920792079208, \"MicroF1\": 0.7207920792079208, \"MacroF1\": 0.7126631585949763, \"Memory in Mb\": 0.4192419052124023, \"Time in s\": 3.216844 }, { \"step\": 552, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7223230490018149, \"MicroF1\": 0.7223230490018149, \"MacroF1\": 0.7157730164623107, \"Memory in Mb\": 0.4191350936889648, \"Time in s\": 3.6616 }, { \"step\": 598, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7286432160804021, \"MicroF1\": 0.7286432160804021, \"MacroF1\": 0.7216745323124732, \"Memory in Mb\": 0.4191579818725586, \"Time in s\": 4.131175 }, { \"step\": 644, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7278382581648523, \"MicroF1\": 0.7278382581648523, \"MacroF1\": 0.72291051830875, \"Memory in Mb\": 0.4191312789916992, \"Time in s\": 4.628008 }, { \"step\": 690, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7314949201741655, \"MicroF1\": 0.7314949201741654, \"MacroF1\": 0.7263583447448078, \"Memory in Mb\": 0.4191312789916992, \"Time in s\": 5.149870999999999 }, { \"step\": 736, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7333333333333333, \"MicroF1\": 0.7333333333333333, \"MacroF1\": 0.729431071218305, \"Memory in Mb\": 0.4191579818725586, \"Time in s\": 5.696603 }, { \"step\": 782, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7387964148527529, \"MicroF1\": 0.7387964148527529, \"MacroF1\": 0.7349287389986899, \"Memory in Mb\": 0.4191579818725586, \"Time in s\": 6.268242 }, { \"step\": 828, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7376058041112454, \"MicroF1\": 0.7376058041112454, \"MacroF1\": 0.7356226390109742, \"Memory in Mb\": 0.4191579818725586, \"Time in s\": 6.867156 }, { \"step\": 874, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7445589919816724, \"MicroF1\": 0.7445589919816724, \"MacroF1\": 0.7409366047432264, \"Memory in Mb\": 0.4191579818725586, \"Time in s\": 7.49107 }, { \"step\": 920, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7453754080522307, \"MicroF1\": 0.7453754080522307, \"MacroF1\": 0.7408438328939173, \"Memory in Mb\": 0.4191312789916992, \"Time in s\": 8.139827 }, { \"step\": 966, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7471502590673575, \"MicroF1\": 0.7471502590673575, \"MacroF1\": 0.7416651838589269, \"Memory in Mb\": 0.4191312789916992, \"Time in s\": 8.813418 }, { \"step\": 1012, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7467853610286844, \"MicroF1\": 0.7467853610286844, \"MacroF1\": 0.7416356251822, \"Memory in Mb\": 0.4191312789916992, \"Time in s\": 9.514287 }, { \"step\": 1058, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7492904446546831, \"MicroF1\": 0.7492904446546831, \"MacroF1\": 0.7430778844390782, \"Memory in Mb\": 0.4191312789916992, \"Time in s\": 10.240045 }, { \"step\": 1104, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7515865820489573, \"MicroF1\": 0.7515865820489573, \"MacroF1\": 0.7451256886686588, \"Memory in Mb\": 0.4191808700561523, \"Time in s\": 10.990683 }, { \"step\": 1150, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7536988685813751, \"MicroF1\": 0.7536988685813751, \"MacroF1\": 0.7468312166689606, \"Memory in Mb\": 0.4191808700561523, \"Time in s\": 11.766057 }, { \"step\": 1196, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7564853556485356, \"MicroF1\": 0.7564853556485356, \"MacroF1\": 0.7503479321738039, \"Memory in Mb\": 0.4191808700561523, \"Time in s\": 12.566171 }, { \"step\": 1242, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7566478646253022, \"MicroF1\": 0.7566478646253022, \"MacroF1\": 0.7509717522131719, \"Memory in Mb\": 0.4191808700561523, \"Time in s\": 13.393734 }, { \"step\": 1288, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7614607614607615, \"MicroF1\": 0.7614607614607615, \"MacroF1\": 0.7547643483779538, \"Memory in Mb\": 0.4191808700561523, \"Time in s\": 14.246394 }, { \"step\": 1334, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7614403600900225, \"MicroF1\": 0.7614403600900225, \"MacroF1\": 0.7551060921605869, \"Memory in Mb\": 0.4191808700561523, \"Time in s\": 15.123846 }, { \"step\": 1380, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7621464829586657, \"MicroF1\": 0.7621464829586658, \"MacroF1\": 0.7562209880685911, \"Memory in Mb\": 0.4191808700561523, \"Time in s\": 16.026049 }, { \"step\": 1426, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7642105263157895, \"MicroF1\": 0.7642105263157895, \"MacroF1\": 0.7575332274919562, \"Memory in Mb\": 0.4191808700561523, \"Time in s\": 16.955566 }, { \"step\": 1472, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7688647178789939, \"MicroF1\": 0.768864717878994, \"MacroF1\": 0.760438686053582, \"Memory in Mb\": 0.4191808700561523, \"Time in s\": 17.910383 }, { \"step\": 1518, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7705998681608438, \"MicroF1\": 0.7705998681608438, \"MacroF1\": 0.7612069012840875, \"Memory in Mb\": 0.4191808700561523, \"Time in s\": 18.890183 }, { \"step\": 1564, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7709532949456174, \"MicroF1\": 0.7709532949456174, \"MacroF1\": 0.7622701654854867, \"Memory in Mb\": 0.4191808700561523, \"Time in s\": 19.895086 }, { \"step\": 1610, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7712865133623369, \"MicroF1\": 0.771286513362337, \"MacroF1\": 0.7617247271717752, \"Memory in Mb\": 0.4192037582397461, \"Time in s\": 20.927569 }, { \"step\": 1656, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7709969788519637, \"MicroF1\": 0.7709969788519637, \"MacroF1\": 0.7615629120572474, \"Memory in Mb\": 0.4192037582397461, \"Time in s\": 21.985292 }, { \"step\": 1702, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.770135214579659, \"MicroF1\": 0.770135214579659, \"MacroF1\": 0.7627316365695141, \"Memory in Mb\": 0.4192037582397461, \"Time in s\": 23.068121 }, { \"step\": 1748, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7727532913566113, \"MicroF1\": 0.7727532913566113, \"MacroF1\": 0.7649467707214076, \"Memory in Mb\": 0.4192037582397461, \"Time in s\": 24.176005 }, { \"step\": 1794, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7741215839375348, \"MicroF1\": 0.7741215839375348, \"MacroF1\": 0.7649332326562147, \"Memory in Mb\": 0.4191770553588867, \"Time in s\": 25.309107999999995 }, { \"step\": 1840, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7754214246873301, \"MicroF1\": 0.7754214246873301, \"MacroF1\": 0.7664700790631906, \"Memory in Mb\": 0.4191770553588867, \"Time in s\": 26.470049 }, { \"step\": 1886, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7740053050397878, \"MicroF1\": 0.7740053050397878, \"MacroF1\": 0.7655121135276625, \"Memory in Mb\": 0.4191770553588867, \"Time in s\": 27.656614999999995 }, { \"step\": 1932, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7742102537545313, \"MicroF1\": 0.7742102537545313, \"MacroF1\": 0.7648034036287765, \"Memory in Mb\": 0.4191770553588867, \"Time in s\": 28.868293999999995 }, { \"step\": 1978, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7754172989377845, \"MicroF1\": 0.7754172989377845, \"MacroF1\": 0.7656013068970458, \"Memory in Mb\": 0.4191770553588867, \"Time in s\": 30.105038 }, { \"step\": 2024, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7770637666831438, \"MicroF1\": 0.7770637666831438, \"MacroF1\": 0.7660878232247856, \"Memory in Mb\": 0.4191770553588867, \"Time in s\": 31.36953 }, { \"step\": 2070, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7762203963267279, \"MicroF1\": 0.7762203963267279, \"MacroF1\": 0.7654829214385931, \"Memory in Mb\": 0.4191770553588867, \"Time in s\": 32.658967 }, { \"step\": 2116, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7768321513002364, \"MicroF1\": 0.7768321513002364, \"MacroF1\": 0.7653071619305024, \"Memory in Mb\": 0.4191770553588867, \"Time in s\": 33.973288999999994 }, { \"step\": 2162, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7778806108283203, \"MicroF1\": 0.7778806108283203, \"MacroF1\": 0.7659351904174981, \"Memory in Mb\": 0.4191770553588867, \"Time in s\": 35.312507 }, { \"step\": 2208, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7797915722700498, \"MicroF1\": 0.7797915722700498, \"MacroF1\": 0.7668192864082087, \"Memory in Mb\": 0.4191770553588867, \"Time in s\": 36.679284 }, { \"step\": 2254, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7767421216156236, \"MicroF1\": 0.7767421216156236, \"MacroF1\": 0.7637794374955548, \"Memory in Mb\": 0.4191770553588867, \"Time in s\": 38.070969 }, { \"step\": 2300, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7759895606785558, \"MicroF1\": 0.7759895606785558, \"MacroF1\": 0.763026662835187, \"Memory in Mb\": 0.4191770553588867, \"Time in s\": 39.487872 }, { \"step\": 1056, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.6218009478672986, \"MicroF1\": 0.6218009478672986, \"MacroF1\": 0.585266310719421, \"Memory in Mb\": 0.6617898941040039, \"Time in s\": 1.016292 }, { \"step\": 2112, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.6153481762198011, \"MicroF1\": 0.6153481762198011, \"MacroF1\": 0.5806436317780949, \"Memory in Mb\": 0.6617898941040039, \"Time in s\": 2.820671 }, { \"step\": 3168, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.6071992421850332, \"MicroF1\": 0.6071992421850332, \"MacroF1\": 0.572248584718361, \"Memory in Mb\": 0.6617898941040039, \"Time in s\": 5.468586999999999 }, { \"step\": 4224, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.6043097324177126, \"MicroF1\": 0.6043097324177126, \"MacroF1\": 0.5697573109597247, \"Memory in Mb\": 0.6617898941040039, \"Time in s\": 8.970813999999999 }, { \"step\": 5280, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.6088274294373934, \"MicroF1\": 0.6088274294373934, \"MacroF1\": 0.5727379077413696, \"Memory in Mb\": 0.6617898941040039, \"Time in s\": 13.304299 }, { \"step\": 6336, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.6026835043409629, \"MicroF1\": 0.6026835043409629, \"MacroF1\": 0.568251333238805, \"Memory in Mb\": 0.6617898941040039, \"Time in s\": 18.451533 }, { \"step\": 7392, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.600189419564335, \"MicroF1\": 0.600189419564335, \"MacroF1\": 0.5659762112716077, \"Memory in Mb\": 0.6617898941040039, \"Time in s\": 24.373815 }, { \"step\": 8448, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.60258079791642, \"MicroF1\": 0.60258079791642, \"MacroF1\": 0.5679781484640409, \"Memory in Mb\": 0.6617898941040039, \"Time in s\": 31.061276 }, { \"step\": 9504, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.5998105861306956, \"MicroF1\": 0.5998105861306956, \"MacroF1\": 0.5649597336877693, \"Memory in Mb\": 0.6617898941040039, \"Time in s\": 38.490335 }, { \"step\": 10560, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.5998674116867128, \"MicroF1\": 0.5998674116867128, \"MacroF1\": 0.5650173260529011, \"Memory in Mb\": 0.6617898941040039, \"Time in s\": 46.63726 }, { \"step\": 11616, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.5974171330176495, \"MicroF1\": 0.5974171330176495, \"MacroF1\": 0.5633067089377386, \"Memory in Mb\": 0.6617898941040039, \"Time in s\": 55.514266 }, { \"step\": 12672, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.6001894088864336, \"MicroF1\": 0.6001894088864336, \"MacroF1\": 0.5684760329567131, \"Memory in Mb\": 0.6617898941040039, \"Time in s\": 65.102691 }, { \"step\": 13728, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.6120783856632913, \"MicroF1\": 0.6120783856632913, \"MacroF1\": 0.5935956771555828, \"Memory in Mb\": 0.6617898941040039, \"Time in s\": 75.408233 }, { \"step\": 14784, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.6024487587093282, \"MicroF1\": 0.6024487587093282, \"MacroF1\": 0.5842148300149193, \"Memory in Mb\": 0.6617898941040039, \"Time in s\": 86.426133 }, { \"step\": 15840, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.5677757434181451, \"MicroF1\": 0.5677757434181451, \"MacroF1\": 0.5509250187877572, \"Memory in Mb\": 0.6617898941040039, \"Time in s\": 98.158455 }, { \"step\": 16896, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.5419354838709678, \"MicroF1\": 0.5419354838709678, \"MacroF1\": 0.5257359157219257, \"Memory in Mb\": 0.6617898941040039, \"Time in s\": 110.605121 }, { \"step\": 17952, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.5233691716338923, \"MicroF1\": 0.5233691716338923, \"MacroF1\": 0.506858183835206, \"Memory in Mb\": 0.6617898941040039, \"Time in s\": 123.763106 }, { \"step\": 19008, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.5121271110643447, \"MicroF1\": 0.5121271110643447, \"MacroF1\": 0.4929289906509415, \"Memory in Mb\": 0.6617898941040039, \"Time in s\": 137.636213 }, { \"step\": 20064, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.5120370831879579, \"MicroF1\": 0.5120370831879579, \"MacroF1\": 0.4920970323041603, \"Memory in Mb\": 1.317840576171875, \"Time in s\": 152.19804 }, { \"step\": 21120, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.5173066906577016, \"MicroF1\": 0.5173066906577016, \"MacroF1\": 0.497344716983625, \"Memory in Mb\": 1.3185958862304688, \"Time in s\": 167.375493 }, { \"step\": 22176, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.5229312288613304, \"MicroF1\": 0.5229312288613304, \"MacroF1\": 0.5026343687424488, \"Memory in Mb\": 1.3185958862304688, \"Time in s\": 183.138263 }, { \"step\": 23232, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.5301536739701261, \"MicroF1\": 0.5301536739701261, \"MacroF1\": 0.5095132087733324, \"Memory in Mb\": 1.3185958862304688, \"Time in s\": 199.493958 }, { \"step\": 24288, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.5351422571746202, \"MicroF1\": 0.5351422571746202, \"MacroF1\": 0.5135975374357353, \"Memory in Mb\": 1.3185958862304688, \"Time in s\": 216.435818 }, { \"step\": 25344, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.5403069881229531, \"MicroF1\": 0.5403069881229531, \"MacroF1\": 0.5180803411538233, \"Memory in Mb\": 1.3185958862304688, \"Time in s\": 233.973059 }, { \"step\": 26400, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.5441493995984696, \"MicroF1\": 0.5441493995984696, \"MacroF1\": 0.5209012984387186, \"Memory in Mb\": 1.3185958862304688, \"Time in s\": 252.0993 }, { \"step\": 27456, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.5475869604807867, \"MicroF1\": 0.5475869604807867, \"MacroF1\": 0.5230407124785976, \"Memory in Mb\": 1.3185958862304688, \"Time in s\": 270.826115 }, { \"step\": 28512, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.5442460804601733, \"MicroF1\": 0.5442460804601733, \"MacroF1\": 0.5199893698637053, \"Memory in Mb\": 1.3185958862304688, \"Time in s\": 290.125735 }, { \"step\": 29568, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.5439848479724017, \"MicroF1\": 0.5439848479724017, \"MacroF1\": 0.5225387960194382, \"Memory in Mb\": 1.3185958862304688, \"Time in s\": 310.131151 }, { \"step\": 30624, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.5449825294713124, \"MicroF1\": 0.5449825294713124, \"MacroF1\": 0.5260472440529832, \"Memory in Mb\": 1.3185958862304688, \"Time in s\": 330.869455 }, { \"step\": 31680, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.5469238296663405, \"MicroF1\": 0.5469238296663405, \"MacroF1\": 0.5300194392617626, \"Memory in Mb\": 1.3185958862304688, \"Time in s\": 352.339648 }, { \"step\": 32736, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.5492286543455017, \"MicroF1\": 0.5492286543455017, \"MacroF1\": 0.5337692045397759, \"Memory in Mb\": 1.3185958862304688, \"Time in s\": 374.544388 }, { \"step\": 33792, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.5448196265277737, \"MicroF1\": 0.5448196265277737, \"MacroF1\": 0.5298516474077152, \"Memory in Mb\": 1.3185958862304688, \"Time in s\": 397.480297 }, { \"step\": 34848, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.539357763939507, \"MicroF1\": 0.539357763939507, \"MacroF1\": 0.5246413689313029, \"Memory in Mb\": 1.3185958862304688, \"Time in s\": 421.148709 }, { \"step\": 35904, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.5352756037099964, \"MicroF1\": 0.5352756037099964, \"MacroF1\": 0.5204658240271912, \"Memory in Mb\": 1.3185958862304688, \"Time in s\": 445.552724 }, { \"step\": 36960, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.5307232338537298, \"MicroF1\": 0.5307232338537298, \"MacroF1\": 0.5158458403074863, \"Memory in Mb\": 1.3185958862304688, \"Time in s\": 470.685377 }, { \"step\": 38016, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.5287912666052874, \"MicroF1\": 0.5287912666052874, \"MacroF1\": 0.5138605376143625, \"Memory in Mb\": 1.8598642349243164, \"Time in s\": 496.544653 }, { \"step\": 39072, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.5245322617798367, \"MicroF1\": 0.5245322617798367, \"MacroF1\": 0.5100329616180462, \"Memory in Mb\": 1.9744834899902344, \"Time in s\": 523.1337460000001 }, { \"step\": 40128, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.5244847608841927, \"MicroF1\": 0.5244847608841927, \"MacroF1\": 0.5114466799524962, \"Memory in Mb\": 1.9744834899902344, \"Time in s\": 550.3794180000001 }, { \"step\": 41184, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.5269650098341548, \"MicroF1\": 0.5269650098341548, \"MacroF1\": 0.5145630920489553, \"Memory in Mb\": 1.9744834899902344, \"Time in s\": 578.1701970000001 }, { \"step\": 42240, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.5290608205686688, \"MicroF1\": 0.5290608205686688, \"MacroF1\": 0.5171452370879218, \"Memory in Mb\": 1.9744834899902344, \"Time in s\": 606.4941780000001 }, { \"step\": 43296, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.5316318281556762, \"MicroF1\": 0.5316318281556762, \"MacroF1\": 0.5200714653059242, \"Memory in Mb\": 1.9744834899902344, \"Time in s\": 635.3594070000001 }, { \"step\": 44352, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.5332912448422809, \"MicroF1\": 0.5332912448422809, \"MacroF1\": 0.521951703681177, \"Memory in Mb\": 1.975238800048828, \"Time in s\": 664.7773900000002 }, { \"step\": 45408, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.5350937080185875, \"MicroF1\": 0.5350937080185875, \"MacroF1\": 0.5236272112757866, \"Memory in Mb\": 1.975238800048828, \"Time in s\": 694.7425150000001 }, { \"step\": 46464, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.5374168693368917, \"MicroF1\": 0.5374168693368917, \"MacroF1\": 0.5257977177437826, \"Memory in Mb\": 1.975238800048828, \"Time in s\": 725.2648820000002 }, { \"step\": 47520, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.5359540394368568, \"MicroF1\": 0.5359540394368568, \"MacroF1\": 0.5247049329892776, \"Memory in Mb\": 1.975238800048828, \"Time in s\": 756.3925470000001 }, { \"step\": 48576, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.5333196088522902, \"MicroF1\": 0.5333196088522902, \"MacroF1\": 0.5224640186909638, \"Memory in Mb\": 1.975238800048828, \"Time in s\": 788.1537450000002 }, { \"step\": 49632, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.5314017448771937, \"MicroF1\": 0.5314017448771937, \"MacroF1\": 0.5209076603734538, \"Memory in Mb\": 1.975238800048828, \"Time in s\": 820.5431960000002 }, { \"step\": 50688, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.5321877404462683, \"MicroF1\": 0.5321877404462683, \"MacroF1\": 0.5219332135179457, \"Memory in Mb\": 2.097897529602051, \"Time in s\": 853.5752100000002 }, { \"step\": 51744, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.5376959202210927, \"MicroF1\": 0.5376959202210927, \"MacroF1\": 0.5274519689249669, \"Memory in Mb\": 2.335637092590332, \"Time in s\": 887.2128290000002 }, { \"step\": 52800, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.5370177465482301, \"MicroF1\": 0.5370177465482301, \"MacroF1\": 0.5270712327692165, \"Memory in Mb\": 2.5391950607299805, \"Time in s\": 921.3507020000002 }, { \"step\": 408, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9803439803439804, \"MicroF1\": 0.9803439803439804, \"MacroF1\": 0.4950372208436724, \"Memory in Mb\": 0.2276544570922851, \"Time in s\": 0.136786 }, { \"step\": 816, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9423312883435584, \"MicroF1\": 0.9423312883435584, \"MacroF1\": 0.7661667470992702, \"Memory in Mb\": 0.3232784271240234, \"Time in s\": 0.4359419999999999 }, { \"step\": 1224, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8830744071954211, \"MicroF1\": 0.883074407195421, \"MacroF1\": 0.8761191747044462, \"Memory in Mb\": 0.4189023971557617, \"Time in s\": 0.926938 }, { \"step\": 1632, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8902513795217658, \"MicroF1\": 0.8902513795217658, \"MacroF1\": 0.8767853151263398, \"Memory in Mb\": 0.5150146484375, \"Time in s\": 1.637883 }, { \"step\": 2040, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8891613536047082, \"MicroF1\": 0.8891613536047082, \"MacroF1\": 0.8807858055314012, \"Memory in Mb\": 0.6221132278442383, \"Time in s\": 2.570345 }, { \"step\": 2448, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.848385778504291, \"MicroF1\": 0.848385778504291, \"MacroF1\": 0.8522513926518692, \"Memory in Mb\": 0.7177371978759766, \"Time in s\": 3.758757 }, { \"step\": 2856, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8563922942206655, \"MicroF1\": 0.8563922942206655, \"MacroF1\": 0.8440193478447515, \"Memory in Mb\": 0.8133611679077148, \"Time in s\": 5.245901 }, { \"step\": 3264, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8482991112473184, \"MicroF1\": 0.8482991112473184, \"MacroF1\": 0.8269786301577753, \"Memory in Mb\": 0.9089851379394532, \"Time in s\": 7.065474 }, { \"step\": 3672, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8392808499046581, \"MicroF1\": 0.8392808499046581, \"MacroF1\": 0.8374924160046074, \"Memory in Mb\": 1.0046091079711914, \"Time in s\": 9.269679 }, { \"step\": 4080, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8323118411375338, \"MicroF1\": 0.8323118411375338, \"MacroF1\": 0.8182261307945194, \"Memory in Mb\": 1.1253337860107422, \"Time in s\": 11.89727 }, { \"step\": 4488, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8159126365054602, \"MicroF1\": 0.8159126365054602, \"MacroF1\": 0.8260965842218733, \"Memory in Mb\": 1.2209577560424805, \"Time in s\": 14.983422 }, { \"step\": 4896, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8149131767109296, \"MicroF1\": 0.8149131767109296, \"MacroF1\": 0.8221314665977922, \"Memory in Mb\": 1.3165817260742188, \"Time in s\": 18.566921 }, { \"step\": 5304, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8125589289081652, \"MicroF1\": 0.8125589289081652, \"MacroF1\": 0.797613058026624, \"Memory in Mb\": 1.412205696105957, \"Time in s\": 22.693048 }, { \"step\": 5712, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7907546839432674, \"MicroF1\": 0.7907546839432674, \"MacroF1\": 0.7936708037520237, \"Memory in Mb\": 1.507829666137695, \"Time in s\": 27.396131 }, { \"step\": 6120, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7886909625755842, \"MicroF1\": 0.7886909625755842, \"MacroF1\": 0.7694478218498494, \"Memory in Mb\": 1.6034536361694336, \"Time in s\": 32.715078 }, { \"step\": 6528, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7635973647924008, \"MicroF1\": 0.7635973647924008, \"MacroF1\": 0.75687960152136, \"Memory in Mb\": 1.699077606201172, \"Time in s\": 38.687416 }, { \"step\": 6936, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.75155010814708, \"MicroF1\": 0.7515501081470799, \"MacroF1\": 0.7521509466338958, \"Memory in Mb\": 1.7947015762329102, \"Time in s\": 45.356366 }, { \"step\": 7344, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7611330518861501, \"MicroF1\": 0.7611330518861501, \"MacroF1\": 0.7576671162861804, \"Memory in Mb\": 1.8917903900146484, \"Time in s\": 52.757111 }, { \"step\": 7752, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7617081666881693, \"MicroF1\": 0.7617081666881692, \"MacroF1\": 0.7593340838982118, \"Memory in Mb\": 1.9874143600463867, \"Time in s\": 60.92847 }, { \"step\": 8160, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7655349920333374, \"MicroF1\": 0.7655349920333374, \"MacroF1\": 0.7610505848438686, \"Memory in Mb\": 2.083038330078125, \"Time in s\": 69.910689 }, { \"step\": 8568, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7644449632310026, \"MicroF1\": 0.7644449632310025, \"MacroF1\": 0.7639417799779614, \"Memory in Mb\": 2.226712226867676, \"Time in s\": 79.742469 }, { \"step\": 8976, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7624512534818941, \"MicroF1\": 0.7624512534818941, \"MacroF1\": 0.7625605608371231, \"Memory in Mb\": 2.322336196899414, \"Time in s\": 90.464241 }, { \"step\": 9384, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7605243525524885, \"MicroF1\": 0.7605243525524885, \"MacroF1\": 0.7588384348689571, \"Memory in Mb\": 2.4179601669311523, \"Time in s\": 102.115634 }, { \"step\": 9792, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.753344908589521, \"MicroF1\": 0.753344908589521, \"MacroF1\": 0.7499438215834663, \"Memory in Mb\": 2.51358413696289, \"Time in s\": 114.735409 }, { \"step\": 10200, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7450730463770958, \"MicroF1\": 0.7450730463770959, \"MacroF1\": 0.7369660419615973, \"Memory in Mb\": 2.609208106994629, \"Time in s\": 128.375943 }, { \"step\": 10608, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7240501555576506, \"MicroF1\": 0.7240501555576506, \"MacroF1\": 0.7111305646829175, \"Memory in Mb\": 2.704832077026367, \"Time in s\": 143.06648900000002 }, { \"step\": 11016, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7166591012256015, \"MicroF1\": 0.7166591012256015, \"MacroF1\": 0.7122511515574345, \"Memory in Mb\": 2.800456047058105, \"Time in s\": 158.84720500000003 }, { \"step\": 11424, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.710146196270682, \"MicroF1\": 0.710146196270682, \"MacroF1\": 0.6963016796632095, \"Memory in Mb\": 2.896080017089844, \"Time in s\": 175.75710000000004 }, { \"step\": 11832, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7005324993660722, \"MicroF1\": 0.7005324993660722, \"MacroF1\": 0.6925666211338902, \"Memory in Mb\": 2.991703987121582, \"Time in s\": 193.83910100000003 }, { \"step\": 12240, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7043876133671052, \"MicroF1\": 0.7043876133671052, \"MacroF1\": 0.7007845610449206, \"Memory in Mb\": 3.0873279571533203, \"Time in s\": 213.15240600000004 }, { \"step\": 12648, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7004032576895707, \"MicroF1\": 0.7004032576895707, \"MacroF1\": 0.6915775762792659, \"Memory in Mb\": 3.1829519271850586, \"Time in s\": 233.71252100000004 }, { \"step\": 13056, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.6877058598238223, \"MicroF1\": 0.6877058598238223, \"MacroF1\": 0.6789768292873962, \"Memory in Mb\": 3.278575897216797, \"Time in s\": 255.55614900000003 }, { \"step\": 13464, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.6838743222164451, \"MicroF1\": 0.6838743222164451, \"MacroF1\": 0.6791243465680946, \"Memory in Mb\": 3.374199867248535, \"Time in s\": 278.72697300000004 }, { \"step\": 13872, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.6822146925239708, \"MicroF1\": 0.6822146925239708, \"MacroF1\": 0.6786558938530484, \"Memory in Mb\": 3.469823837280273, \"Time in s\": 303.265051 }, { \"step\": 14280, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.6777085230058127, \"MicroF1\": 0.6777085230058127, \"MacroF1\": 0.6725285130045525, \"Memory in Mb\": 3.565447807312012, \"Time in s\": 329.21132600000004 }, { \"step\": 14688, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.6807380676788997, \"MicroF1\": 0.6807380676788997, \"MacroF1\": 0.6786761142186741, \"Memory in Mb\": 3.66107177734375, \"Time in s\": 356.60277 }, { \"step\": 15096, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.6873799271281882, \"MicroF1\": 0.6873799271281882, \"MacroF1\": 0.68548393064844, \"Memory in Mb\": 3.756695747375488, \"Time in s\": 385.483239 }, { \"step\": 15504, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.6858027478552539, \"MicroF1\": 0.6858027478552539, \"MacroF1\": 0.6816808496509055, \"Memory in Mb\": 3.8523197174072266, \"Time in s\": 415.890234 }, { \"step\": 15912, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.6765759537426937, \"MicroF1\": 0.6765759537426937, \"MacroF1\": 0.6694713281964944, \"Memory in Mb\": 3.947943687438965, \"Time in s\": 447.863862 }, { \"step\": 16320, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.6673815797536614, \"MicroF1\": 0.6673815797536614, \"MacroF1\": 0.6617321933140904, \"Memory in Mb\": 4.043567657470703, \"Time in s\": 481.440864 }, { \"step\": 16728, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.6643151790518323, \"MicroF1\": 0.6643151790518323, \"MacroF1\": 0.6611780293584051, \"Memory in Mb\": 4.139191627502441, \"Time in s\": 516.667277 }, { \"step\": 17136, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.6598774438284214, \"MicroF1\": 0.6598774438284214, \"MacroF1\": 0.655734247886306, \"Memory in Mb\": 4.333066940307617, \"Time in s\": 553.571784 }, { \"step\": 17544, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.6518269395200365, \"MicroF1\": 0.6518269395200365, \"MacroF1\": 0.6481085155228207, \"Memory in Mb\": 4.428690910339356, \"Time in s\": 592.193317 }, { \"step\": 17952, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.6507158375577963, \"MicroF1\": 0.6507158375577963, \"MacroF1\": 0.648936899585426, \"Memory in Mb\": 4.524314880371094, \"Time in s\": 632.57823 }, { \"step\": 18360, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.6566806470940683, \"MicroF1\": 0.6566806470940683, \"MacroF1\": 0.6555764711123697, \"Memory in Mb\": 4.619938850402832, \"Time in s\": 674.762883 }, { \"step\": 18768, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.662279533223211, \"MicroF1\": 0.662279533223211, \"MacroF1\": 0.6615432060687811, \"Memory in Mb\": 4.71556282043457, \"Time in s\": 718.781471 }, { \"step\": 19176, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.6534028683181226, \"MicroF1\": 0.6534028683181226, \"MacroF1\": 0.6508089832432515, \"Memory in Mb\": 4.811186790466309, \"Time in s\": 764.6790530000001 }, { \"step\": 19584, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.6577643874789358, \"MicroF1\": 0.6577643874789358, \"MacroF1\": 0.6564201177589184, \"Memory in Mb\": 4.906810760498047, \"Time in s\": 812.4977690000001 }, { \"step\": 19992, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.6518433294982742, \"MicroF1\": 0.6518433294982742, \"MacroF1\": 0.6501496360982538, \"Memory in Mb\": 5.002434730529785, \"Time in s\": 862.2665020000001 }, { \"step\": 20400, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.6482180499044071, \"MicroF1\": 0.6482180499044071, \"MacroF1\": 0.6472493759146579, \"Memory in Mb\": 5.098058700561523, \"Time in s\": 914.036687 }, { \"step\": 46, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.4, \"MicroF1\": 0.4000000000000001, \"MacroF1\": 0.2926704014939309, \"Memory in Mb\": 0.4254798889160156, \"Time in s\": 0.179349 }, { \"step\": 92, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.5274725274725275, \"MicroF1\": 0.5274725274725275, \"MacroF1\": 0.5399541634835753, \"Memory in Mb\": 0.425537109375, \"Time in s\": 0.395098 }, { \"step\": 138, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.5547445255474452, \"MicroF1\": 0.5547445255474452, \"MacroF1\": 0.5795767508697842, \"Memory in Mb\": 0.4256591796875, \"Time in s\": 0.646414 }, { \"step\": 184, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.6174863387978142, \"MicroF1\": 0.6174863387978142, \"MacroF1\": 0.6398140932417979, \"Memory in Mb\": 0.4257431030273437, \"Time in s\": 0.936097 }, { \"step\": 230, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.6419213973799127, \"MicroF1\": 0.6419213973799127, \"MacroF1\": 0.6592174177506214, \"Memory in Mb\": 0.4257431030273437, \"Time in s\": 1.2615820000000002 }, { \"step\": 276, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.6545454545454545, \"MicroF1\": 0.6545454545454545, \"MacroF1\": 0.6716869228432982, \"Memory in Mb\": 0.4257926940917969, \"Time in s\": 1.6228100000000003 }, { \"step\": 322, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.6791277258566978, \"MicroF1\": 0.6791277258566978, \"MacroF1\": 0.6806263486692059, \"Memory in Mb\": 0.4258537292480469, \"Time in s\": 2.022508 }, { \"step\": 368, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7029972752043597, \"MicroF1\": 0.7029972752043597, \"MacroF1\": 0.7008299817149242, \"Memory in Mb\": 0.4258270263671875, \"Time in s\": 2.458102 }, { \"step\": 414, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7142857142857143, \"MicroF1\": 0.7142857142857143, \"MacroF1\": 0.7121569327354127, \"Memory in Mb\": 0.4257469177246094, \"Time in s\": 2.92926 }, { \"step\": 460, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7145969498910676, \"MicroF1\": 0.7145969498910676, \"MacroF1\": 0.7103106155638, \"Memory in Mb\": 0.4258232116699219, \"Time in s\": 3.4385440000000003 }, { \"step\": 506, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7227722772277227, \"MicroF1\": 0.7227722772277227, \"MacroF1\": 0.715881182832702, \"Memory in Mb\": 0.4258232116699219, \"Time in s\": 3.983535 }, { \"step\": 552, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7241379310344828, \"MicroF1\": 0.7241379310344829, \"MacroF1\": 0.7187949260386588, \"Memory in Mb\": 0.4257164001464844, \"Time in s\": 4.564144000000001 }, { \"step\": 598, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7286432160804021, \"MicroF1\": 0.7286432160804021, \"MacroF1\": 0.7227601649788371, \"Memory in Mb\": 0.4257392883300781, \"Time in s\": 5.1830560000000006 }, { \"step\": 644, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7278382581648523, \"MicroF1\": 0.7278382581648523, \"MacroF1\": 0.7240595992457829, \"Memory in Mb\": 0.4257736206054687, \"Time in s\": 5.837887 }, { \"step\": 690, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7314949201741655, \"MicroF1\": 0.7314949201741654, \"MacroF1\": 0.727547508877315, \"Memory in Mb\": 0.4257736206054687, \"Time in s\": 6.528431 }, { \"step\": 736, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7333333333333333, \"MicroF1\": 0.7333333333333333, \"MacroF1\": 0.730585229165138, \"Memory in Mb\": 0.4258003234863281, \"Time in s\": 7.25733 }, { \"step\": 782, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7387964148527529, \"MicroF1\": 0.7387964148527529, \"MacroF1\": 0.7359626710287273, \"Memory in Mb\": 0.4258003234863281, \"Time in s\": 8.022590000000001 }, { \"step\": 828, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7376058041112454, \"MicroF1\": 0.7376058041112454, \"MacroF1\": 0.7367699509780541, \"Memory in Mb\": 0.4258003234863281, \"Time in s\": 8.823569 }, { \"step\": 874, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7434135166093929, \"MicroF1\": 0.7434135166093929, \"MacroF1\": 0.7406779161411566, \"Memory in Mb\": 0.4258003234863281, \"Time in s\": 9.663167 }, { \"step\": 920, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7431991294885746, \"MicroF1\": 0.7431991294885745, \"MacroF1\": 0.7396284921253597, \"Memory in Mb\": 0.4257736206054687, \"Time in s\": 10.538696000000002 }, { \"step\": 966, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7430051813471502, \"MicroF1\": 0.7430051813471502, \"MacroF1\": 0.7386475429248082, \"Memory in Mb\": 0.4257736206054687, \"Time in s\": 11.449986 }, { \"step\": 1012, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7428288822947576, \"MicroF1\": 0.7428288822947575, \"MacroF1\": 0.7387392151852316, \"Memory in Mb\": 0.4257736206054687, \"Time in s\": 12.399906 }, { \"step\": 1058, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7445600756859035, \"MicroF1\": 0.7445600756859035, \"MacroF1\": 0.7397141356071754, \"Memory in Mb\": 0.4257736206054687, \"Time in s\": 13.385988 }, { \"step\": 1104, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7470534904805077, \"MicroF1\": 0.7470534904805077, \"MacroF1\": 0.7419829508197956, \"Memory in Mb\": 0.4258232116699219, \"Time in s\": 14.408007 }, { \"step\": 1150, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7484769364664926, \"MicroF1\": 0.7484769364664926, \"MacroF1\": 0.7430153502407321, \"Memory in Mb\": 0.4258232116699219, \"Time in s\": 15.46854 }, { \"step\": 1196, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7514644351464436, \"MicroF1\": 0.7514644351464436, \"MacroF1\": 0.7466450927602833, \"Memory in Mb\": 0.4254570007324219, \"Time in s\": 16.565517 }, { \"step\": 1242, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7518130539887188, \"MicroF1\": 0.7518130539887188, \"MacroF1\": 0.7475811251410989, \"Memory in Mb\": 0.4255790710449219, \"Time in s\": 17.698596 }, { \"step\": 1288, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7567987567987567, \"MicroF1\": 0.7567987567987567, \"MacroF1\": 0.7515585748403605, \"Memory in Mb\": 0.4256401062011719, \"Time in s\": 18.868108 }, { \"step\": 1334, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7576894223555889, \"MicroF1\": 0.7576894223555888, \"MacroF1\": 0.7527145732365901, \"Memory in Mb\": 0.4256401062011719, \"Time in s\": 20.076794 }, { \"step\": 1380, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7592458303118201, \"MicroF1\": 0.7592458303118201, \"MacroF1\": 0.754880899709855, \"Memory in Mb\": 0.4257011413574219, \"Time in s\": 21.321463 }, { \"step\": 1426, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7621052631578947, \"MicroF1\": 0.7621052631578947, \"MacroF1\": 0.7572480123106181, \"Memory in Mb\": 0.4257011413574219, \"Time in s\": 22.601949 }, { \"step\": 1472, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7661454792658056, \"MicroF1\": 0.7661454792658056, \"MacroF1\": 0.7596240117389202, \"Memory in Mb\": 0.4257011413574219, \"Time in s\": 23.921025 }, { \"step\": 1518, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7679630850362558, \"MicroF1\": 0.7679630850362558, \"MacroF1\": 0.7604664202984912, \"Memory in Mb\": 0.4257621765136719, \"Time in s\": 25.275945 }, { \"step\": 1564, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7683941138835573, \"MicroF1\": 0.7683941138835573, \"MacroF1\": 0.7616623934037686, \"Memory in Mb\": 0.4257621765136719, \"Time in s\": 26.66678 }, { \"step\": 1610, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7681789931634556, \"MicroF1\": 0.7681789931634556, \"MacroF1\": 0.7606779105029744, \"Memory in Mb\": 0.4257850646972656, \"Time in s\": 28.096857 }, { \"step\": 1656, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7685800604229607, \"MicroF1\": 0.7685800604229607, \"MacroF1\": 0.7611818346958917, \"Memory in Mb\": 0.4257850646972656, \"Time in s\": 29.563118 }, { \"step\": 1702, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7683715461493239, \"MicroF1\": 0.768371546149324, \"MacroF1\": 0.7630805397579306, \"Memory in Mb\": 0.4257850646972656, \"Time in s\": 31.065673 }, { \"step\": 1748, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7716084716657127, \"MicroF1\": 0.7716084716657126, \"MacroF1\": 0.7661058855209445, \"Memory in Mb\": 0.4257850646972656, \"Time in s\": 32.607308 }, { \"step\": 1794, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7730061349693251, \"MicroF1\": 0.7730061349693251, \"MacroF1\": 0.76613283717613, \"Memory in Mb\": 0.4257583618164062, \"Time in s\": 34.185135 }, { \"step\": 1840, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7743338771071234, \"MicroF1\": 0.7743338771071234, \"MacroF1\": 0.7676486165305356, \"Memory in Mb\": 0.4258193969726562, \"Time in s\": 35.798944000000006 }, { \"step\": 1886, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7729442970822281, \"MicroF1\": 0.7729442970822282, \"MacroF1\": 0.7669643117326908, \"Memory in Mb\": 0.4258193969726562, \"Time in s\": 37.451807 }, { \"step\": 1932, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7736923873640601, \"MicroF1\": 0.7736923873640601, \"MacroF1\": 0.7669808567090198, \"Memory in Mb\": 0.4258193969726562, \"Time in s\": 39.140782 }, { \"step\": 1978, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7744056651492159, \"MicroF1\": 0.7744056651492159, \"MacroF1\": 0.7669005381948409, \"Memory in Mb\": 0.4258193969726562, \"Time in s\": 40.865953000000005 }, { \"step\": 2024, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7755808205635195, \"MicroF1\": 0.7755808205635196, \"MacroF1\": 0.7665616644775576, \"Memory in Mb\": 0.4258193969726562, \"Time in s\": 42.627552 }, { \"step\": 2070, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7752537457709038, \"MicroF1\": 0.7752537457709039, \"MacroF1\": 0.7663566554091733, \"Memory in Mb\": 0.4258193969726562, \"Time in s\": 44.428542 }, { \"step\": 2116, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.775886524822695, \"MicroF1\": 0.775886524822695, \"MacroF1\": 0.7661827507972012, \"Memory in Mb\": 0.4258193969726562, \"Time in s\": 46.266451 }, { \"step\": 2162, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7764923646459972, \"MicroF1\": 0.7764923646459972, \"MacroF1\": 0.7663510353808046, \"Memory in Mb\": 0.4258193969726562, \"Time in s\": 48.14124 }, { \"step\": 2208, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7784322609877662, \"MicroF1\": 0.7784322609877662, \"MacroF1\": 0.767276937076619, \"Memory in Mb\": 0.4258193969726562, \"Time in s\": 50.054953000000005 }, { \"step\": 2254, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.775410563692854, \"MicroF1\": 0.775410563692854, \"MacroF1\": 0.7642399015136985, \"Memory in Mb\": 0.4258193969726562, \"Time in s\": 52.004875000000006 }, { \"step\": 2300, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7746846454980426, \"MicroF1\": 0.7746846454980426, \"MacroF1\": 0.7634961218545901, \"Memory in Mb\": 0.4258193969726562, \"Time in s\": 53.99744400000001 }, { \"step\": 1056, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.6161137440758294, \"MicroF1\": 0.6161137440758294, \"MacroF1\": 0.5813841513331479, \"Memory in Mb\": 0.6684322357177734, \"Time in s\": 1.380603 }, { \"step\": 2112, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.6120322122216959, \"MicroF1\": 0.6120322122216959, \"MacroF1\": 0.5792161554760864, \"Memory in Mb\": 0.6684932708740234, \"Time in s\": 3.972211 }, { \"step\": 3168, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.6049889485317335, \"MicroF1\": 0.6049889485317335, \"MacroF1\": 0.5721633809277146, \"Memory in Mb\": 0.6685543060302734, \"Time in s\": 7.807885 }, { \"step\": 4224, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.603125739995264, \"MicroF1\": 0.603125739995264, \"MacroF1\": 0.5703574432462962, \"Memory in Mb\": 0.6685543060302734, \"Time in s\": 12.856232 }, { \"step\": 5280, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.6061754120098504, \"MicroF1\": 0.6061754120098504, \"MacroF1\": 0.5722430970062696, \"Memory in Mb\": 0.6686153411865234, \"Time in s\": 19.046562 }, { \"step\": 6336, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.5995264404104184, \"MicroF1\": 0.5995264404104184, \"MacroF1\": 0.5671511237518186, \"Memory in Mb\": 0.6686153411865234, \"Time in s\": 26.345703 }, { \"step\": 7392, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.5972128264104992, \"MicroF1\": 0.5972128264104992, \"MacroF1\": 0.5650210504998666, \"Memory in Mb\": 0.6686153411865234, \"Time in s\": 34.702495 }, { \"step\": 8448, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.5989108559251806, \"MicroF1\": 0.5989108559251806, \"MacroF1\": 0.566418690076869, \"Memory in Mb\": 0.6686153411865234, \"Time in s\": 44.110114 }, { \"step\": 9504, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.5962327685993897, \"MicroF1\": 0.5962327685993897, \"MacroF1\": 0.5633780031885508, \"Memory in Mb\": 0.6686153411865234, \"Time in s\": 54.569631 }, { \"step\": 10560, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.5964579979164694, \"MicroF1\": 0.5964579979164694, \"MacroF1\": 0.5634236596216465, \"Memory in Mb\": 0.6686763763427734, \"Time in s\": 66.07704199999999 }, { \"step\": 11616, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.594317692638829, \"MicroF1\": 0.594317692638829, \"MacroF1\": 0.5620068495149612, \"Memory in Mb\": 0.6686763763427734, \"Time in s\": 78.631408 }, { \"step\": 12672, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.5975061163286244, \"MicroF1\": 0.5975061163286244, \"MacroF1\": 0.567518061449456, \"Memory in Mb\": 0.6686763763427734, \"Time in s\": 92.232933 }, { \"step\": 13728, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.6097472135207984, \"MicroF1\": 0.6097472135207984, \"MacroF1\": 0.5927729676671933, \"Memory in Mb\": 0.6686763763427734, \"Time in s\": 106.889718 }, { \"step\": 14784, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.6001488195900697, \"MicroF1\": 0.6001488195900697, \"MacroF1\": 0.5832911478837771, \"Memory in Mb\": 0.6683712005615234, \"Time in s\": 122.594966 }, { \"step\": 15840, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.5673969316244712, \"MicroF1\": 0.5673969316244712, \"MacroF1\": 0.5522471754341497, \"Memory in Mb\": 0.8954944610595703, \"Time in s\": 139.36423 }, { \"step\": 16896, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.5712340929269014, \"MicroF1\": 0.5712340929269014, \"MacroF1\": 0.559038323684958, \"Memory in Mb\": 1.4438505172729492, \"Time in s\": 157.284858 }, { \"step\": 17952, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.5741184335134533, \"MicroF1\": 0.5741184335134533, \"MacroF1\": 0.5632919959429029, \"Memory in Mb\": 1.874833106994629, \"Time in s\": 176.64490099999998 }, { \"step\": 19008, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.5867312042931552, \"MicroF1\": 0.5867312042931552, \"MacroF1\": 0.5723846445183199, \"Memory in Mb\": 0.4898128509521484, \"Time in s\": 197.018382 }, { \"step\": 20064, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.5939789662562927, \"MicroF1\": 0.5939789662562927, \"MacroF1\": 0.5773993022741072, \"Memory in Mb\": 0.6687717437744141, \"Time in s\": 218.485499 }, { \"step\": 21120, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.595908897201572, \"MicroF1\": 0.595908897201572, \"MacroF1\": 0.5788762098776178, \"Memory in Mb\": 0.6688938140869141, \"Time in s\": 241.01085 }, { \"step\": 22176, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.5977452085682075, \"MicroF1\": 0.5977452085682075, \"MacroF1\": 0.5801804614049403, \"Memory in Mb\": 1.2152299880981443, \"Time in s\": 264.603623 }, { \"step\": 23232, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.5997158968619517, \"MicroF1\": 0.5997158968619517, \"MacroF1\": 0.5818597835760811, \"Memory in Mb\": 1.3294572830200195, \"Time in s\": 289.451742 }, { \"step\": 24288, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.6025033968789888, \"MicroF1\": 0.6025033968789888, \"MacroF1\": 0.5841484049015139, \"Memory in Mb\": 1.3295183181762695, \"Time in s\": 315.702926 }, { \"step\": 25344, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.6047823856686264, \"MicroF1\": 0.6047823856686264, \"MacroF1\": 0.5859943093850892, \"Memory in Mb\": 1.3296403884887695, \"Time in s\": 343.366363 }, { \"step\": 26400, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.6074472517898405, \"MicroF1\": 0.6074472517898405, \"MacroF1\": 0.5878557237787366, \"Memory in Mb\": 1.3296403884887695, \"Time in s\": 372.430483 }, { \"step\": 27456, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.6086323074121289, \"MicroF1\": 0.6086323074121289, \"MacroF1\": 0.5880340775890752, \"Memory in Mb\": 1.3298234939575195, \"Time in s\": 402.902886 }, { \"step\": 28512, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.6087124267826453, \"MicroF1\": 0.6087124267826453, \"MacroF1\": 0.5895354690395743, \"Memory in Mb\": 1.3298234939575195, \"Time in s\": 434.780231 }, { \"step\": 29568, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.6080765718537559, \"MicroF1\": 0.6080765718537559, \"MacroF1\": 0.5920130278134075, \"Memory in Mb\": 1.3298234939575195, \"Time in s\": 468.066846 }, { \"step\": 30624, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.6071253632890311, \"MicroF1\": 0.6071253632890311, \"MacroF1\": 0.5937369304389161, \"Memory in Mb\": 1.3293352127075195, \"Time in s\": 502.755803 }, { \"step\": 31680, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.6071845702200196, \"MicroF1\": 0.6071845702200196, \"MacroF1\": 0.5960066132315273, \"Memory in Mb\": 1.3295793533325195, \"Time in s\": 538.8589619999999 }, { \"step\": 32736, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.6079425691156255, \"MicroF1\": 0.6079425691156255, \"MacroF1\": 0.59836863034629, \"Memory in Mb\": 1.3296403884887695, \"Time in s\": 576.3667869999999 }, { \"step\": 33792, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.6027936432777958, \"MicroF1\": 0.6027936432777958, \"MacroF1\": 0.5936321389881086, \"Memory in Mb\": 0.6688251495361328, \"Time in s\": 615.5159199999999 }, { \"step\": 34848, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.6018882543690992, \"MicroF1\": 0.6018882543690992, \"MacroF1\": 0.5927698243358274, \"Memory in Mb\": 0.6689472198486328, \"Time in s\": 655.7217059999999 }, { \"step\": 35904, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.601398211848592, \"MicroF1\": 0.601398211848592, \"MacroF1\": 0.592182344393812, \"Memory in Mb\": 0.6690082550048828, \"Time in s\": 696.988077 }, { \"step\": 36960, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.5999080061689981, \"MicroF1\": 0.5999080061689981, \"MacroF1\": 0.5906275041314122, \"Memory in Mb\": 0.6690082550048828, \"Time in s\": 739.3140189999999 }, { \"step\": 38016, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.5996054189135868, \"MicroF1\": 0.5996054189135868, \"MacroF1\": 0.5899615119365567, \"Memory in Mb\": 0.6690082550048828, \"Time in s\": 782.70332 }, { \"step\": 39072, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.5989608661155332, \"MicroF1\": 0.5989608661155332, \"MacroF1\": 0.5889868403975307, \"Memory in Mb\": 0.6687030792236328, \"Time in s\": 827.150323 }, { \"step\": 40128, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.5947865526951928, \"MicroF1\": 0.5947865526951928, \"MacroF1\": 0.5855600636799734, \"Memory in Mb\": 0.6687030792236328, \"Time in s\": 872.66104 }, { \"step\": 41184, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.5926717334822621, \"MicroF1\": 0.5926717334822621, \"MacroF1\": 0.5840930914391779, \"Memory in Mb\": 0.6688861846923828, \"Time in s\": 919.237173 }, { \"step\": 42240, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.5913018774118706, \"MicroF1\": 0.5913018774118706, \"MacroF1\": 0.5832685369240246, \"Memory in Mb\": 0.6689472198486328, \"Time in s\": 966.874788 }, { \"step\": 43296, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.5898833583554683, \"MicroF1\": 0.5898833583554683, \"MacroF1\": 0.5823904732646675, \"Memory in Mb\": 0.6690082550048828, \"Time in s\": 1015.577362 }, { \"step\": 44352, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.5883745575071588, \"MicroF1\": 0.5883745575071588, \"MacroF1\": 0.5813207633940128, \"Memory in Mb\": 1.112539291381836, \"Time in s\": 1065.314826 }, { \"step\": 45408, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.5863853590856035, \"MicroF1\": 0.5863853590856035, \"MacroF1\": 0.5797569747943008, \"Memory in Mb\": 1.3286066055297852, \"Time in s\": 1116.269805 }, { \"step\": 46464, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.5850461657663086, \"MicroF1\": 0.5850461657663086, \"MacroF1\": 0.5780695197887614, \"Memory in Mb\": 1.3287897109985352, \"Time in s\": 1168.116792 }, { \"step\": 47520, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.5867968602032871, \"MicroF1\": 0.5867968602032871, \"MacroF1\": 0.5799343284152632, \"Memory in Mb\": 1.328934669494629, \"Time in s\": 1221.156775 }, { \"step\": 48576, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.5917035512094699, \"MicroF1\": 0.5917035512094699, \"MacroF1\": 0.5847625919047718, \"Memory in Mb\": 1.329483985900879, \"Time in s\": 1275.6038119999998 }, { \"step\": 49632, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.5968447139892405, \"MicroF1\": 0.5968447139892405, \"MacroF1\": 0.5895877351185161, \"Memory in Mb\": 1.329422950744629, \"Time in s\": 1331.456239 }, { \"step\": 50688, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.601673012804072, \"MicroF1\": 0.601673012804072, \"MacroF1\": 0.5939045014873635, \"Memory in Mb\": 1.329606056213379, \"Time in s\": 1388.7152379999998 }, { \"step\": 51744, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.6067487389598593, \"MicroF1\": 0.6067487389598593, \"MacroF1\": 0.5983547975185618, \"Memory in Mb\": 1.329606056213379, \"Time in s\": 1447.387746 }, { \"step\": 52800, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.6119623477717381, \"MicroF1\": 0.6119623477717381, \"MacroF1\": 0.6029934068442723, \"Memory in Mb\": 0.147679328918457, \"Time in s\": 1507.071307 }, { \"step\": 408, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9803439803439804, \"MicroF1\": 0.9803439803439804, \"MacroF1\": 0.4950372208436724, \"Memory in Mb\": 0.2342357635498047, \"Time in s\": 0.174096 }, { \"step\": 816, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.943558282208589, \"MicroF1\": 0.943558282208589, \"MacroF1\": 0.7669956277713079, \"Memory in Mb\": 0.3298597335815429, \"Time in s\": 0.617741 }, { \"step\": 1224, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8863450531479967, \"MicroF1\": 0.8863450531479967, \"MacroF1\": 0.8786592421362931, \"Memory in Mb\": 0.4254837036132812, \"Time in s\": 1.425386 }, { \"step\": 1632, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.891477621091355, \"MicroF1\": 0.891477621091355, \"MacroF1\": 0.8818548670971932, \"Memory in Mb\": 0.5215349197387695, \"Time in s\": 2.789107 }, { \"step\": 2040, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.889651790093183, \"MicroF1\": 0.889651790093183, \"MacroF1\": 0.8812768038030504, \"Memory in Mb\": 0.6287555694580078, \"Time in s\": 4.864698000000001 }, { \"step\": 2448, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8414384961176952, \"MicroF1\": 0.8414384961176952, \"MacroF1\": 0.8420581397672002, \"Memory in Mb\": 0.7242574691772461, \"Time in s\": 7.621751000000001 }, { \"step\": 2856, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8500875656742557, \"MicroF1\": 0.8500875656742557, \"MacroF1\": 0.834558203718852, \"Memory in Mb\": 0.8199424743652344, \"Time in s\": 10.917147 }, { \"step\": 3264, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8406374501992032, \"MicroF1\": 0.8406374501992032, \"MacroF1\": 0.8151418555553325, \"Memory in Mb\": 0.9155054092407228, \"Time in s\": 14.806837 }, { \"step\": 3672, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8321983110868973, \"MicroF1\": 0.8321983110868973, \"MacroF1\": 0.8307198315203921, \"Memory in Mb\": 1.011190414428711, \"Time in s\": 19.353183 }, { \"step\": 4080, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.826182887962736, \"MicroF1\": 0.826182887962736, \"MacroF1\": 0.812376785603362, \"Memory in Mb\": 1.1319761276245115, \"Time in s\": 24.603589 }, { \"step\": 4488, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.809226654780477, \"MicroF1\": 0.809226654780477, \"MacroF1\": 0.8196273526663149, \"Memory in Mb\": 1.2275390625, \"Time in s\": 30.588099 }, { \"step\": 4896, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8081716036772216, \"MicroF1\": 0.8081716036772216, \"MacroF1\": 0.815232111826365, \"Memory in Mb\": 1.3230409622192385, \"Time in s\": 37.350443 }, { \"step\": 5304, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8057703186875353, \"MicroF1\": 0.8057703186875353, \"MacroF1\": 0.7903391475861199, \"Memory in Mb\": 1.4186649322509766, \"Time in s\": 44.935392 }, { \"step\": 5712, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7860269655051655, \"MicroF1\": 0.7860269655051656, \"MacroF1\": 0.7895763142947655, \"Memory in Mb\": 1.5144109725952148, \"Time in s\": 53.372574 }, { \"step\": 6120, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.784441902271613, \"MicroF1\": 0.784441902271613, \"MacroF1\": 0.7657785418705475, \"Memory in Mb\": 1.6098518371582031, \"Time in s\": 62.716148 }, { \"step\": 6528, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7585414432357898, \"MicroF1\": 0.7585414432357898, \"MacroF1\": 0.751418836389106, \"Memory in Mb\": 1.7056589126586914, \"Time in s\": 73.022548 }, { \"step\": 6936, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7473684210526316, \"MicroF1\": 0.7473684210526316, \"MacroF1\": 0.7484284412750403, \"Memory in Mb\": 1.8010997772216797, \"Time in s\": 84.351871 }, { \"step\": 7344, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7565027917744791, \"MicroF1\": 0.7565027917744791, \"MacroF1\": 0.7526701844923946, \"Memory in Mb\": 1.898371696472168, \"Time in s\": 96.7592 }, { \"step\": 7752, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7577086827506129, \"MicroF1\": 0.7577086827506129, \"MacroF1\": 0.7557350658705178, \"Memory in Mb\": 1.9939956665039065, \"Time in s\": 110.303168 }, { \"step\": 8160, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7617355068023042, \"MicroF1\": 0.7617355068023042, \"MacroF1\": 0.7576049653668415, \"Memory in Mb\": 2.0895586013793945, \"Time in s\": 125.047569 }, { \"step\": 8568, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7604762460604646, \"MicroF1\": 0.7604762460604646, \"MacroF1\": 0.7596175662696861, \"Memory in Mb\": 2.2332935333251958, \"Time in s\": 141.03972299999998 }, { \"step\": 8976, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.756991643454039, \"MicroF1\": 0.7569916434540391, \"MacroF1\": 0.7575313939177277, \"Memory in Mb\": 2.328978538513184, \"Time in s\": 158.34463899999997 }, { \"step\": 9384, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7558350207822658, \"MicroF1\": 0.7558350207822658, \"MacroF1\": 0.7548436696787698, \"Memory in Mb\": 2.424480438232422, \"Time in s\": 177.02417899999998 }, { \"step\": 9792, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.748340312531917, \"MicroF1\": 0.7483403125319169, \"MacroF1\": 0.7443908596260193, \"Memory in Mb\": 2.52004337310791, \"Time in s\": 197.139779 }, { \"step\": 10200, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7393862143347387, \"MicroF1\": 0.7393862143347387, \"MacroF1\": 0.7315892779928432, \"Memory in Mb\": 2.6156673431396484, \"Time in s\": 218.763511 }, { \"step\": 10608, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7196191194494201, \"MicroF1\": 0.7196191194494201, \"MacroF1\": 0.7089541376321257, \"Memory in Mb\": 2.7114133834838867, \"Time in s\": 241.930258 }, { \"step\": 11016, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7123921924648207, \"MicroF1\": 0.7123921924648208, \"MacroF1\": 0.7092068316988943, \"Memory in Mb\": 2.806976318359375, \"Time in s\": 266.699543 }, { \"step\": 11424, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7062943184802591, \"MicroF1\": 0.7062943184802591, \"MacroF1\": 0.694671323095531, \"Memory in Mb\": 2.9026002883911133, \"Time in s\": 293.16281 }, { \"step\": 11832, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.6967289324655566, \"MicroF1\": 0.6967289324655566, \"MacroF1\": 0.6902328307983061, \"Memory in Mb\": 2.9981632232666016, \"Time in s\": 321.350926 }, { \"step\": 12240, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7007108423890841, \"MicroF1\": 0.7007108423890841, \"MacroF1\": 0.6983689907908355, \"Memory in Mb\": 3.09378719329834, \"Time in s\": 351.321335 }, { \"step\": 12648, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.6969241717403337, \"MicroF1\": 0.6969241717403337, \"MacroF1\": 0.6892508246262707, \"Memory in Mb\": 3.189472198486328, \"Time in s\": 383.138799 }, { \"step\": 13056, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.6836461126005362, \"MicroF1\": 0.6836461126005362, \"MacroF1\": 0.6755391962059191, \"Memory in Mb\": 3.2851572036743164, \"Time in s\": 416.860837 }, { \"step\": 13464, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.6793433855752804, \"MicroF1\": 0.6793433855752804, \"MacroF1\": 0.6754035266161622, \"Memory in Mb\": 3.3807201385498047, \"Time in s\": 452.545925 }, { \"step\": 13872, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.6769519140653161, \"MicroF1\": 0.6769519140653161, \"MacroF1\": 0.6742482232309566, \"Memory in Mb\": 3.476466178894043, \"Time in s\": 490.25119899999993 }, { \"step\": 14280, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.6728762518383641, \"MicroF1\": 0.6728762518383641, \"MacroF1\": 0.6689356443053496, \"Memory in Mb\": 3.5720291137695312, \"Time in s\": 530.0355519999999 }, { \"step\": 14688, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.6762442976782188, \"MicroF1\": 0.6762442976782188, \"MacroF1\": 0.6753292472514647, \"Memory in Mb\": 3.66759204864502, \"Time in s\": 571.9481539999999 }, { \"step\": 15096, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.6830076184166942, \"MicroF1\": 0.6830076184166942, \"MacroF1\": 0.6822311287838643, \"Memory in Mb\": 3.763277053833008, \"Time in s\": 616.057788 }, { \"step\": 15504, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.6818035218989873, \"MicroF1\": 0.6818035218989873, \"MacroF1\": 0.6788656596145115, \"Memory in Mb\": 3.858839988708496, \"Time in s\": 662.434182 }, { \"step\": 15912, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.6816039218150964, \"MicroF1\": 0.6816039218150964, \"MacroF1\": 0.6801525397911032, \"Memory in Mb\": 0.2779512405395508, \"Time in s\": 710.461266 }, { \"step\": 16320, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.6858263373981249, \"MicroF1\": 0.6858263373981249, \"MacroF1\": 0.6851912800185752, \"Memory in Mb\": 0.4695272445678711, \"Time in s\": 759.2533930000001 }, { \"step\": 16728, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.6896634184253004, \"MicroF1\": 0.6896634184253004, \"MacroF1\": 0.6890226069872225, \"Memory in Mb\": 0.6609811782836914, \"Time in s\": 808.8937860000001 }, { \"step\": 17136, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.6925007295010213, \"MicroF1\": 0.6925007295010213, \"MacroF1\": 0.691863544221197, \"Memory in Mb\": 0.9803314208984376, \"Time in s\": 859.476205 }, { \"step\": 17544, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.6990252522373597, \"MicroF1\": 0.6990252522373597, \"MacroF1\": 0.6986638608261282, \"Memory in Mb\": 0.2722988128662109, \"Time in s\": 910.506546 }, { \"step\": 17952, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7040833379756003, \"MicroF1\": 0.7040833379756003, \"MacroF1\": 0.7034973599095433, \"Memory in Mb\": 0.1428661346435547, \"Time in s\": 961.768777 }, { \"step\": 18360, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7102783376000872, \"MicroF1\": 0.7102783376000872, \"MacroF1\": 0.7096708693716106, \"Memory in Mb\": 0.2385511398315429, \"Time in s\": 1013.187005 }, { \"step\": 18768, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7155645548036447, \"MicroF1\": 0.7155645548036447, \"MacroF1\": 0.714820465744771, \"Memory in Mb\": 0.3341751098632812, \"Time in s\": 1064.826228 }, { \"step\": 19176, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7183833116036505, \"MicroF1\": 0.7183833116036505, \"MacroF1\": 0.7174783905571958, \"Memory in Mb\": 0.4296159744262695, \"Time in s\": 1116.742805 }, { \"step\": 19584, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7229229433692489, \"MicroF1\": 0.7229229433692489, \"MacroF1\": 0.7220221994049509, \"Memory in Mb\": 0.5253620147705078, \"Time in s\": 1168.993402 }, { \"step\": 19992, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7224751138012105, \"MicroF1\": 0.7224751138012104, \"MacroF1\": 0.7211832505275634, \"Memory in Mb\": 0.6323385238647461, \"Time in s\": 1221.636335 }, { \"step\": 20400, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7237119466640521, \"MicroF1\": 0.7237119466640521, \"MacroF1\": 0.7223930256436224, \"Memory in Mb\": 0.7279014587402344, \"Time in s\": 1274.7271140000005 }, { \"step\": 46, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.4222222222222222, \"MicroF1\": 0.4222222222222222, \"MacroF1\": 0.3590236094437775, \"Memory in Mb\": 0.9732446670532228, \"Time in s\": 0.50913 }, { \"step\": 92, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.5604395604395604, \"MicroF1\": 0.5604395604395604, \"MacroF1\": 0.5746538615446178, \"Memory in Mb\": 1.0627803802490234, \"Time in s\": 1.324173 }, { \"step\": 138, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.5766423357664233, \"MicroF1\": 0.5766423357664233, \"MacroF1\": 0.598257695340355, \"Memory in Mb\": 1.355058670043945, \"Time in s\": 2.399595 }, { \"step\": 184, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.6229508196721312, \"MicroF1\": 0.6229508196721312, \"MacroF1\": 0.6451744040758779, \"Memory in Mb\": 1.424909591674805, \"Time in s\": 3.72413 }, { \"step\": 230, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.6506550218340611, \"MicroF1\": 0.6506550218340611, \"MacroF1\": 0.668065528002595, \"Memory in Mb\": 1.5721073150634766, \"Time in s\": 5.289042 }, { \"step\": 276, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.6727272727272727, \"MicroF1\": 0.6727272727272727, \"MacroF1\": 0.6900672130049011, \"Memory in Mb\": 1.7710065841674805, \"Time in s\": 7.016464 }, { \"step\": 322, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7040498442367601, \"MicroF1\": 0.7040498442367601, \"MacroF1\": 0.7087861936875777, \"Memory in Mb\": 1.8489313125610352, \"Time in s\": 8.876771 }, { \"step\": 368, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7302452316076294, \"MicroF1\": 0.7302452316076294, \"MacroF1\": 0.7285991575377422, \"Memory in Mb\": 1.987476348876953, \"Time in s\": 10.883345 }, { \"step\": 414, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7457627118644068, \"MicroF1\": 0.7457627118644068, \"MacroF1\": 0.7430362907281778, \"Memory in Mb\": 2.008787155151367, \"Time in s\": 13.045158 }, { \"step\": 460, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7342047930283224, \"MicroF1\": 0.7342047930283224, \"MacroF1\": 0.7271744800226859, \"Memory in Mb\": 1.8246965408325195, \"Time in s\": 15.362221000000002 }, { \"step\": 506, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7405940594059406, \"MicroF1\": 0.7405940594059406, \"MacroF1\": 0.7304322149686578, \"Memory in Mb\": 1.7282800674438477, \"Time in s\": 17.823546 }, { \"step\": 552, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7368421052631579, \"MicroF1\": 0.7368421052631579, \"MacroF1\": 0.7267508109083203, \"Memory in Mb\": 1.5214414596557615, \"Time in s\": 20.437237 }, { \"step\": 598, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7403685092127303, \"MicroF1\": 0.7403685092127302, \"MacroF1\": 0.7318978254380312, \"Memory in Mb\": 1.6621322631835938, \"Time in s\": 23.204591 }, { \"step\": 644, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7325038880248833, \"MicroF1\": 0.7325038880248833, \"MacroF1\": 0.7248107612258206, \"Memory in Mb\": 1.7895660400390625, \"Time in s\": 26.125323 }, { \"step\": 690, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7242380261248186, \"MicroF1\": 0.7242380261248187, \"MacroF1\": 0.7153272190465999, \"Memory in Mb\": 1.929594039916992, \"Time in s\": 29.195315 }, { \"step\": 736, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7251700680272108, \"MicroF1\": 0.725170068027211, \"MacroF1\": 0.7148466398758337, \"Memory in Mb\": 2.079819679260254, \"Time in s\": 32.416287000000004 }, { \"step\": 782, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7259923175416133, \"MicroF1\": 0.7259923175416134, \"MacroF1\": 0.7134712280209222, \"Memory in Mb\": 2.0407657623291016, \"Time in s\": 35.785816000000004 }, { \"step\": 828, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.727932285368803, \"MicroF1\": 0.727932285368803, \"MacroF1\": 0.7177600265828429, \"Memory in Mb\": 2.245401382446289, \"Time in s\": 39.307391 }, { \"step\": 874, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7353951890034365, \"MicroF1\": 0.7353951890034366, \"MacroF1\": 0.7262567978322628, \"Memory in Mb\": 2.3208675384521484, \"Time in s\": 42.982338000000006 }, { \"step\": 920, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7431991294885746, \"MicroF1\": 0.7431991294885745, \"MacroF1\": 0.7345004589126253, \"Memory in Mb\": 2.463038444519043, \"Time in s\": 46.81357700000001 }, { \"step\": 966, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7471502590673575, \"MicroF1\": 0.7471502590673575, \"MacroF1\": 0.7368855656689401, \"Memory in Mb\": 2.4979677200317383, \"Time in s\": 50.81254500000001 }, { \"step\": 1012, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7546983184965381, \"MicroF1\": 0.754698318496538, \"MacroF1\": 0.7446216664767904, \"Memory in Mb\": 2.589772224426269, \"Time in s\": 54.96693400000001 }, { \"step\": 1058, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.760643330179754, \"MicroF1\": 0.760643330179754, \"MacroF1\": 0.7502594177262459, \"Memory in Mb\": 2.824686050415039, \"Time in s\": 59.28579400000001 }, { \"step\": 1104, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7624660018132366, \"MicroF1\": 0.7624660018132366, \"MacroF1\": 0.7523020427630668, \"Memory in Mb\": 2.512765884399414, \"Time in s\": 63.76907700000001 }, { \"step\": 1150, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7650130548302873, \"MicroF1\": 0.7650130548302874, \"MacroF1\": 0.7555087521342715, \"Memory in Mb\": 2.350802421569824, \"Time in s\": 68.40298100000001 }, { \"step\": 1196, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7690376569037657, \"MicroF1\": 0.7690376569037657, \"MacroF1\": 0.7603504370239863, \"Memory in Mb\": 2.0774078369140625, \"Time in s\": 73.17908000000001 }, { \"step\": 1242, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7719580983078163, \"MicroF1\": 0.7719580983078163, \"MacroF1\": 0.7638249032322542, \"Memory in Mb\": 2.143113136291504, \"Time in s\": 78.09633200000002 }, { \"step\": 1288, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7746697746697747, \"MicroF1\": 0.7746697746697747, \"MacroF1\": 0.7668828628349821, \"Memory in Mb\": 2.3053293228149414, \"Time in s\": 83.14236000000002 }, { \"step\": 1334, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7771942985746436, \"MicroF1\": 0.7771942985746436, \"MacroF1\": 0.7696789046658701, \"Memory in Mb\": 2.4279375076293945, \"Time in s\": 88.31606900000003 }, { \"step\": 1380, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7817258883248731, \"MicroF1\": 0.7817258883248731, \"MacroF1\": 0.7754511149783998, \"Memory in Mb\": 2.350360870361328, \"Time in s\": 93.61768400000004 }, { \"step\": 1426, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7866666666666666, \"MicroF1\": 0.7866666666666666, \"MacroF1\": 0.7797171864703156, \"Memory in Mb\": 2.461531639099121, \"Time in s\": 99.04413800000005 }, { \"step\": 1472, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7912984364377974, \"MicroF1\": 0.7912984364377974, \"MacroF1\": 0.7836430453045393, \"Memory in Mb\": 2.5941333770751958, \"Time in s\": 104.59649900000004 }, { \"step\": 1518, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7963085036255768, \"MicroF1\": 0.7963085036255768, \"MacroF1\": 0.7883976288226552, \"Memory in Mb\": 2.7080554962158203, \"Time in s\": 110.30036700000004 }, { \"step\": 1564, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7984644913627639, \"MicroF1\": 0.7984644913627639, \"MacroF1\": 0.7915512335737709, \"Memory in Mb\": 2.379396438598633, \"Time in s\": 116.13112600000004 }, { \"step\": 1610, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.798011187072716, \"MicroF1\": 0.7980111870727161, \"MacroF1\": 0.7913527809122488, \"Memory in Mb\": 2.557906150817871, \"Time in s\": 122.09210800000004 }, { \"step\": 1656, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7987915407854985, \"MicroF1\": 0.7987915407854985, \"MacroF1\": 0.7921693301011166, \"Memory in Mb\": 2.5870275497436523, \"Time in s\": 128.19249900000003 }, { \"step\": 1702, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7995296884185773, \"MicroF1\": 0.7995296884185774, \"MacroF1\": 0.7947635312368726, \"Memory in Mb\": 2.441390991210937, \"Time in s\": 134.42240900000002 }, { \"step\": 1748, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8019461934745278, \"MicroF1\": 0.8019461934745278, \"MacroF1\": 0.7968342396743014, \"Memory in Mb\": 2.619420051574707, \"Time in s\": 140.777643 }, { \"step\": 1794, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8059118795315114, \"MicroF1\": 0.8059118795315114, \"MacroF1\": 0.8002313091513137, \"Memory in Mb\": 2.70393180847168, \"Time in s\": 147.25850100000002 }, { \"step\": 1840, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8058727569331158, \"MicroF1\": 0.8058727569331158, \"MacroF1\": 0.8006185305294855, \"Memory in Mb\": 3.167543411254883, \"Time in s\": 153.88034500000003 }, { \"step\": 1886, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8084880636604774, \"MicroF1\": 0.8084880636604774, \"MacroF1\": 0.8041348438460234, \"Memory in Mb\": 3.187774658203125, \"Time in s\": 160.64099900000002 }, { \"step\": 1932, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8089073019161056, \"MicroF1\": 0.8089073019161055, \"MacroF1\": 0.8042053366874767, \"Memory in Mb\": 3.4328765869140625, \"Time in s\": 167.537774 }, { \"step\": 1978, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8108244815376834, \"MicroF1\": 0.8108244815376834, \"MacroF1\": 0.8062422218151643, \"Memory in Mb\": 3.621993064880371, \"Time in s\": 174.56717600000002 }, { \"step\": 2024, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8111715274345032, \"MicroF1\": 0.8111715274345032, \"MacroF1\": 0.805670935248126, \"Memory in Mb\": 3.783546447753906, \"Time in s\": 181.730034 }, { \"step\": 2070, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8134364427259546, \"MicroF1\": 0.8134364427259546, \"MacroF1\": 0.8085538776813638, \"Memory in Mb\": 3.740958213806152, \"Time in s\": 189.029553 }, { \"step\": 2116, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.816548463356974, \"MicroF1\": 0.816548463356974, \"MacroF1\": 0.8113031614777911, \"Memory in Mb\": 3.760796546936035, \"Time in s\": 196.461518 }, { \"step\": 2162, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8167515039333642, \"MicroF1\": 0.8167515039333642, \"MacroF1\": 0.8113905234748385, \"Memory in Mb\": 4.035200119018555, \"Time in s\": 204.024576 }, { \"step\": 2208, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.818305391934753, \"MicroF1\": 0.818305391934753, \"MacroF1\": 0.8126353495892602, \"Memory in Mb\": 4.192110061645508, \"Time in s\": 211.717055 }, { \"step\": 2254, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8184642698624057, \"MicroF1\": 0.8184642698624057, \"MacroF1\": 0.8136291554244021, \"Memory in Mb\": 4.486760139465332, \"Time in s\": 219.553296 }, { \"step\": 2300, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8190517616354936, \"MicroF1\": 0.8190517616354936, \"MacroF1\": 0.8144252010220491, \"Memory in Mb\": 4.66081428527832, \"Time in s\": 227.540685 }, { \"step\": 1056, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Insects\", \"Accuracy\": 0.6701421800947868, \"MicroF1\": 0.6701421800947868, \"MacroF1\": 0.6068786932307204, \"Memory in Mb\": 6.831533432006836, \"Time in s\": 4.539822 }, { \"step\": 2112, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Insects\", \"Accuracy\": 0.6887730933207011, \"MicroF1\": 0.6887730933207011, \"MacroF1\": 0.6229217946585527, \"Memory in Mb\": 10.195775032043455, \"Time in s\": 12.638872 }, { \"step\": 3168, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Insects\", \"Accuracy\": 0.6962425007893905, \"MicroF1\": 0.6962425007893905, \"MacroF1\": 0.622910390568452, \"Memory in Mb\": 16.60274887084961, \"Time in s\": 24.371093 }, { \"step\": 4224, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Insects\", \"Accuracy\": 0.7054226852948141, \"MicroF1\": 0.7054226852948141, \"MacroF1\": 0.6279874627708885, \"Memory in Mb\": 17.471903800964355, \"Time in s\": 39.890989 }, { \"step\": 5280, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Insects\", \"Accuracy\": 0.7099829513165372, \"MicroF1\": 0.7099829513165372, \"MacroF1\": 0.6301031937879839, \"Memory in Mb\": 20.888835906982425, \"Time in s\": 58.938014 }, { \"step\": 6336, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Insects\", \"Accuracy\": 0.7108129439621153, \"MicroF1\": 0.7108129439621153, \"MacroF1\": 0.6300557461749893, \"Memory in Mb\": 23.72772216796875, \"Time in s\": 81.51329799999999 }, { \"step\": 7392, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Insects\", \"Accuracy\": 0.7126234609660398, \"MicroF1\": 0.7126234609660397, \"MacroF1\": 0.6287819651813062, \"Memory in Mb\": 28.070199966430664, \"Time in s\": 107.680368 }, { \"step\": 8448, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Insects\", \"Accuracy\": 0.7168225405469397, \"MicroF1\": 0.7168225405469397, \"MacroF1\": 0.6299159911335922, \"Memory in Mb\": 31.613859176635746, \"Time in s\": 137.61638299999998 }, { \"step\": 9504, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Insects\", \"Accuracy\": 0.7214563821950963, \"MicroF1\": 0.7214563821950963, \"MacroF1\": 0.6314635817104112, \"Memory in Mb\": 35.43206214904785, \"Time in s\": 171.294053 }, { \"step\": 10560, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Insects\", \"Accuracy\": 0.7230798371057865, \"MicroF1\": 0.7230798371057865, \"MacroF1\": 0.6311670445333952, \"Memory in Mb\": 35.676584243774414, \"Time in s\": 208.900765 }, { \"step\": 11616, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Insects\", \"Accuracy\": 0.7247524752475247, \"MicroF1\": 0.7247524752475247, \"MacroF1\": 0.6314302971551563, \"Memory in Mb\": 41.6592378616333, \"Time in s\": 250.157475 }, { \"step\": 12672, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Insects\", \"Accuracy\": 0.7252781943019493, \"MicroF1\": 0.7252781943019494, \"MacroF1\": 0.6359647238599803, \"Memory in Mb\": 42.66780757904053, \"Time in s\": 295.204683 }, { \"step\": 13728, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Insects\", \"Accuracy\": 0.7402928535004006, \"MicroF1\": 0.7402928535004006, \"MacroF1\": 0.7348419335996624, \"Memory in Mb\": 24.025733947753903, \"Time in s\": 342.696854 }, { \"step\": 14784, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Insects\", \"Accuracy\": 0.7468714063451262, \"MicroF1\": 0.7468714063451262, \"MacroF1\": 0.7455387452701401, \"Memory in Mb\": 2.272738456726074, \"Time in s\": 392.697558 }, { \"step\": 15840, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Insects\", \"Accuracy\": 0.7388092682618852, \"MicroF1\": 0.7388092682618853, \"MacroF1\": 0.7393651674564367, \"Memory in Mb\": 6.547223091125488, \"Time in s\": 445.901262 }, { \"step\": 16896, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Insects\", \"Accuracy\": 0.7343000887836638, \"MicroF1\": 0.7343000887836638, \"MacroF1\": 0.7364396291092657, \"Memory in Mb\": 11.097336769104004, \"Time in s\": 502.213806 }, { \"step\": 17952, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Insects\", \"Accuracy\": 0.7271461199933151, \"MicroF1\": 0.7271461199933151, \"MacroF1\": 0.7303078098029304, \"Memory in Mb\": 16.88492202758789, \"Time in s\": 561.702561 }, { \"step\": 19008, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Insects\", \"Accuracy\": 0.7373073078339559, \"MicroF1\": 0.7373073078339558, \"MacroF1\": 0.7369507693389319, \"Memory in Mb\": 6.800461769104004, \"Time in s\": 623.6949609999999 }, { \"step\": 20064, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Insects\", \"Accuracy\": 0.7412650152021133, \"MicroF1\": 0.7412650152021133, \"MacroF1\": 0.7370000710650216, \"Memory in Mb\": 3.369675636291504, \"Time in s\": 688.4094249999999 }, { \"step\": 21120, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Insects\", \"Accuracy\": 0.7425540982054074, \"MicroF1\": 0.7425540982054074, \"MacroF1\": 0.7353012584659039, \"Memory in Mb\": 5.941231727600098, \"Time in s\": 756.7149029999999 }, { \"step\": 22176, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Insects\", \"Accuracy\": 0.7435851183765502, \"MicroF1\": 0.7435851183765501, \"MacroF1\": 0.7334480812988377, \"Memory in Mb\": 8.846389770507812, \"Time in s\": 828.508796 }, { \"step\": 23232, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Insects\", \"Accuracy\": 0.7452111402866859, \"MicroF1\": 0.7452111402866859, \"MacroF1\": 0.7324964055744654, \"Memory in Mb\": 9.471121788024902, \"Time in s\": 903.877525 }, { \"step\": 24288, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Insects\", \"Accuracy\": 0.7463663688392967, \"MicroF1\": 0.7463663688392966, \"MacroF1\": 0.7310050929424414, \"Memory in Mb\": 12.406947135925291, \"Time in s\": 982.696529 }, { \"step\": 25344, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Insects\", \"Accuracy\": 0.7474647831748412, \"MicroF1\": 0.7474647831748412, \"MacroF1\": 0.7298615493429103, \"Memory in Mb\": 15.979948043823242, \"Time in s\": 1064.886272 }, { \"step\": 26400, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Insects\", \"Accuracy\": 0.7483616803666806, \"MicroF1\": 0.7483616803666806, \"MacroF1\": 0.7285096183890708, \"Memory in Mb\": 19.665884017944336, \"Time in s\": 1150.466447 }, { \"step\": 27456, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Insects\", \"Accuracy\": 0.749626661810235, \"MicroF1\": 0.749626661810235, \"MacroF1\": 0.7275235594970662, \"Memory in Mb\": 24.26569175720215, \"Time in s\": 1239.482455 }, { \"step\": 28512, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Insects\", \"Accuracy\": 0.7465188874469503, \"MicroF1\": 0.7465188874469504, \"MacroF1\": 0.7258897093263847, \"Memory in Mb\": 8.914395332336426, \"Time in s\": 1332.2242660000002 }, { \"step\": 29568, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Insects\", \"Accuracy\": 0.7451550715324518, \"MicroF1\": 0.7451550715324519, \"MacroF1\": 0.7292330017207805, \"Memory in Mb\": 8.459691047668457, \"Time in s\": 1428.2455670000002 }, { \"step\": 30624, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Insects\", \"Accuracy\": 0.7443751428664729, \"MicroF1\": 0.7443751428664729, \"MacroF1\": 0.7327893612754602, \"Memory in Mb\": 12.943696022033691, \"Time in s\": 1527.20672 }, { \"step\": 31680, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Insects\", \"Accuracy\": 0.7437103443921841, \"MicroF1\": 0.7437103443921841, \"MacroF1\": 0.7357305076230832, \"Memory in Mb\": 18.80640697479248, \"Time in s\": 1629.363326 }, { \"step\": 32736, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Insects\", \"Accuracy\": 0.7432717275087827, \"MicroF1\": 0.7432717275087827, \"MacroF1\": 0.7381285892142362, \"Memory in Mb\": 16.162379264831543, \"Time in s\": 1734.350472 }, { \"step\": 33792, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Insects\", \"Accuracy\": 0.7377408185611554, \"MicroF1\": 0.7377408185611554, \"MacroF1\": 0.7340057348640155, \"Memory in Mb\": 11.18346881866455, \"Time in s\": 1842.731877 }, { \"step\": 34848, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Insects\", \"Accuracy\": 0.7340373633311332, \"MicroF1\": 0.7340373633311332, \"MacroF1\": 0.7302084976112027, \"Memory in Mb\": 5.613262176513672, \"Time in s\": 1955.023471 }, { \"step\": 35904, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Insects\", \"Accuracy\": 0.7312759379439044, \"MicroF1\": 0.7312759379439044, \"MacroF1\": 0.7271196230245338, \"Memory in Mb\": 9.756120681762695, \"Time in s\": 2071.1719540000004 }, { \"step\": 36960, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Insects\", \"Accuracy\": 0.7278335452799047, \"MicroF1\": 0.7278335452799047, \"MacroF1\": 0.7234434079919367, \"Memory in Mb\": 11.990450859069824, \"Time in s\": 2191.1586210000005 }, { \"step\": 38016, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Insects\", \"Accuracy\": 0.7254241746678942, \"MicroF1\": 0.7254241746678942, \"MacroF1\": 0.7207605796154644, \"Memory in Mb\": 14.404197692871094, \"Time in s\": 2314.6725880000004 }, { \"step\": 39072, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Insects\", \"Accuracy\": 0.7250390315067441, \"MicroF1\": 0.7250390315067441, \"MacroF1\": 0.7205508934526729, \"Memory in Mb\": 7.651473045349121, \"Time in s\": 2441.666332 }, { \"step\": 40128, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Insects\", \"Accuracy\": 0.7236524036185112, \"MicroF1\": 0.7236524036185111, \"MacroF1\": 0.7196200887167502, \"Memory in Mb\": 7.583705902099609, \"Time in s\": 2572.286548 }, { \"step\": 41184, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Insects\", \"Accuracy\": 0.7235995435009591, \"MicroF1\": 0.7235995435009591, \"MacroF1\": 0.7199895911465058, \"Memory in Mb\": 12.209360122680664, \"Time in s\": 2706.295895 }, { \"step\": 42240, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Insects\", \"Accuracy\": 0.7235966760576719, \"MicroF1\": 0.7235966760576719, \"MacroF1\": 0.7203672841246517, \"Memory in Mb\": 15.002169609069824, \"Time in s\": 2843.875227 }, { \"step\": 43296, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Insects\", \"Accuracy\": 0.7241713823767179, \"MicroF1\": 0.7241713823767179, \"MacroF1\": 0.7213145862540888, \"Memory in Mb\": 17.433518409729004, \"Time in s\": 2985.305163 }, { \"step\": 44352, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Insects\", \"Accuracy\": 0.7245608892696895, \"MicroF1\": 0.7245608892696895, \"MacroF1\": 0.7219384327675483, \"Memory in Mb\": 20.337363243103027, \"Time in s\": 3130.446815 }, { \"step\": 45408, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Insects\", \"Accuracy\": 0.7253947629220164, \"MicroF1\": 0.7253947629220163, \"MacroF1\": 0.7227741676779873, \"Memory in Mb\": 20.507991790771484, \"Time in s\": 3279.062075 }, { \"step\": 46464, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Insects\", \"Accuracy\": 0.7263198674213891, \"MicroF1\": 0.7263198674213891, \"MacroF1\": 0.7236028172229397, \"Memory in Mb\": 24.947001457214355, \"Time in s\": 3431.085655 }, { \"step\": 47520, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Insects\", \"Accuracy\": 0.7259622466802753, \"MicroF1\": 0.7259622466802753, \"MacroF1\": 0.7234132526915972, \"Memory in Mb\": 9.389252662658691, \"Time in s\": 3586.979459 }, { \"step\": 48576, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Insects\", \"Accuracy\": 0.7297581060216161, \"MicroF1\": 0.7297581060216161, \"MacroF1\": 0.7273884829439242, \"Memory in Mb\": 9.12541389465332, \"Time in s\": 3745.95156 }, { \"step\": 49632, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Insects\", \"Accuracy\": 0.7336543692450284, \"MicroF1\": 0.7336543692450284, \"MacroF1\": 0.7312645046388119, \"Memory in Mb\": 8.804935455322266, \"Time in s\": 3907.504223 }, { \"step\": 50688, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Insects\", \"Accuracy\": 0.7372501824925524, \"MicroF1\": 0.7372501824925524, \"MacroF1\": 0.7346466630802606, \"Memory in Mb\": 12.506796836853027, \"Time in s\": 4071.296369 }, { \"step\": 51744, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Insects\", \"Accuracy\": 0.741182382157973, \"MicroF1\": 0.741182382157973, \"MacroF1\": 0.7382896911640772, \"Memory in Mb\": 15.31224250793457, \"Time in s\": 4237.296913 }, { \"step\": 52800, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Insects\", \"Accuracy\": 0.7442565200098487, \"MicroF1\": 0.7442565200098487, \"MacroF1\": 0.7419321396565435, \"Memory in Mb\": 0.3696470260620117, \"Time in s\": 4404.707415 }, { \"step\": 408, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9803439803439804, \"MicroF1\": 0.9803439803439804, \"MacroF1\": 0.4950372208436724, \"Memory in Mb\": 0.3514842987060547, \"Time in s\": 0.595241 }, { \"step\": 816, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9730061349693252, \"MicroF1\": 0.9730061349693252, \"MacroF1\": 0.7867307803099512, \"Memory in Mb\": 1.312638282775879, \"Time in s\": 2.098583 }, { \"step\": 1224, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9705641864268192, \"MicroF1\": 0.9705641864268192, \"MacroF1\": 0.93705029195588, \"Memory in Mb\": 2.2586374282836914, \"Time in s\": 4.31371 }, { \"step\": 1632, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9711833231146536, \"MicroF1\": 0.9711833231146536, \"MacroF1\": 0.9377953913100076, \"Memory in Mb\": 3.394951820373535, \"Time in s\": 7.338651 }, { \"step\": 2040, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.969592937714566, \"MicroF1\": 0.969592937714566, \"MacroF1\": 0.9445939973353388, \"Memory in Mb\": 5.254854202270508, \"Time in s\": 11.230817 }, { \"step\": 2448, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.970167552104618, \"MicroF1\": 0.970167552104618, \"MacroF1\": 0.9654865811906564, \"Memory in Mb\": 2.048126220703125, \"Time in s\": 15.860121 }, { \"step\": 2856, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9726795096322242, \"MicroF1\": 0.9726795096322242, \"MacroF1\": 0.9705770446236132, \"Memory in Mb\": 2.732625961303711, \"Time in s\": 21.099127 }, { \"step\": 3264, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.971805087342936, \"MicroF1\": 0.971805087342936, \"MacroF1\": 0.9627836140542232, \"Memory in Mb\": 2.790935516357422, \"Time in s\": 26.971176 }, { \"step\": 3672, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9733042767638246, \"MicroF1\": 0.9733042767638246, \"MacroF1\": 0.9719148371902758, \"Memory in Mb\": 2.987569808959961, \"Time in s\": 33.477629 }, { \"step\": 4080, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9698455503799952, \"MicroF1\": 0.9698455503799952, \"MacroF1\": 0.958802050565698, \"Memory in Mb\": 4.571287155151367, \"Time in s\": 40.764845 }, { \"step\": 4488, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9710274125250724, \"MicroF1\": 0.9710274125250724, \"MacroF1\": 0.970190142555116, \"Memory in Mb\": 1.7459039688110352, \"Time in s\": 48.697823 }, { \"step\": 4896, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9722165474974463, \"MicroF1\": 0.9722165474974463, \"MacroF1\": 0.971936417428158, \"Memory in Mb\": 2.7892093658447266, \"Time in s\": 57.250967 }, { \"step\": 5304, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9720912690929664, \"MicroF1\": 0.9720912690929664, \"MacroF1\": 0.970282662152698, \"Memory in Mb\": 2.895453453063965, \"Time in s\": 66.54505900000001 }, { \"step\": 5712, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9723340921029592, \"MicroF1\": 0.9723340921029592, \"MacroF1\": 0.9718828908328702, \"Memory in Mb\": 4.064221382141113, \"Time in s\": 76.51574600000001 }, { \"step\": 6120, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9718908318352673, \"MicroF1\": 0.9718908318352673, \"MacroF1\": 0.9703726237787478, \"Memory in Mb\": 5.130434989929199, \"Time in s\": 87.20768000000001 }, { \"step\": 6528, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.97196261682243, \"MicroF1\": 0.97196261682243, \"MacroF1\": 0.9714458378209956, \"Memory in Mb\": 2.455193519592285, \"Time in s\": 98.596053 }, { \"step\": 6936, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9733237202595528, \"MicroF1\": 0.9733237202595528, \"MacroF1\": 0.9740372626056704, \"Memory in Mb\": 2.4587574005126958, \"Time in s\": 110.618692 }, { \"step\": 7344, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9735802805392892, \"MicroF1\": 0.9735802805392892, \"MacroF1\": 0.973376514333954, \"Memory in Mb\": 3.893580436706543, \"Time in s\": 123.504669 }, { \"step\": 7752, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9729067217133271, \"MicroF1\": 0.9729067217133271, \"MacroF1\": 0.972110994169212, \"Memory in Mb\": 4.61766529083252, \"Time in s\": 137.14807100000002 }, { \"step\": 8160, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.97217796298566, \"MicroF1\": 0.97217796298566, \"MacroF1\": 0.9713389113158796, \"Memory in Mb\": 5.2350358963012695, \"Time in s\": 151.61678500000002 }, { \"step\": 8568, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9725691607330454, \"MicroF1\": 0.9725691607330454, \"MacroF1\": 0.9726516232305996, \"Memory in Mb\": 3.659168243408203, \"Time in s\": 166.92608600000003 }, { \"step\": 8976, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9733704735376044, \"MicroF1\": 0.9733704735376044, \"MacroF1\": 0.973745927183376, \"Memory in Mb\": 5.072476387023926, \"Time in s\": 182.940013 }, { \"step\": 9384, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9733560694873707, \"MicroF1\": 0.9733560694873707, \"MacroF1\": 0.9732604538569352, \"Memory in Mb\": 5.722126007080078, \"Time in s\": 199.767059 }, { \"step\": 9792, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9731385966704116, \"MicroF1\": 0.9731385966704116, \"MacroF1\": 0.9729642609350584, \"Memory in Mb\": 4.404660224914551, \"Time in s\": 217.43706000000003 }, { \"step\": 10200, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9725463280713796, \"MicroF1\": 0.9725463280713796, \"MacroF1\": 0.9722080895483168, \"Memory in Mb\": 2.652709007263184, \"Time in s\": 235.89310200000003 }, { \"step\": 10608, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9726595644385784, \"MicroF1\": 0.9726595644385784, \"MacroF1\": 0.972708084817296, \"Memory in Mb\": 1.1834087371826172, \"Time in s\": 254.958035 }, { \"step\": 11016, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9729459827507944, \"MicroF1\": 0.9729459827507944, \"MacroF1\": 0.973083018444042, \"Memory in Mb\": 1.520833969116211, \"Time in s\": 274.61158 }, { \"step\": 11424, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9724240567276548, \"MicroF1\": 0.9724240567276548, \"MacroF1\": 0.972236101273467, \"Memory in Mb\": 2.823396682739258, \"Time in s\": 294.969552 }, { \"step\": 11832, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9720226523539852, \"MicroF1\": 0.9720226523539852, \"MacroF1\": 0.9719096687987197, \"Memory in Mb\": 2.493410110473633, \"Time in s\": 316.08929700000004 }, { \"step\": 12240, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9726284827191763, \"MicroF1\": 0.9726284827191763, \"MacroF1\": 0.9728780734732722, \"Memory in Mb\": 2.3478269577026367, \"Time in s\": 337.99406600000003 }, { \"step\": 12648, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9727998734877836, \"MicroF1\": 0.9727998734877836, \"MacroF1\": 0.9729097588140672, \"Memory in Mb\": 2.5516576766967773, \"Time in s\": 360.586004 }, { \"step\": 13056, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9726541554959786, \"MicroF1\": 0.9726541554959786, \"MacroF1\": 0.9726709194030316, \"Memory in Mb\": 3.304943084716797, \"Time in s\": 383.87924 }, { \"step\": 13464, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9722201589541708, \"MicroF1\": 0.9722201589541708, \"MacroF1\": 0.9721650267620996, \"Memory in Mb\": 4.003572463989258, \"Time in s\": 407.932153 }, { \"step\": 13872, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.97246052916156, \"MicroF1\": 0.97246052916156, \"MacroF1\": 0.972591005704606, \"Memory in Mb\": 4.270735740661621, \"Time in s\": 432.78626 }, { \"step\": 14280, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9713565375726592, \"MicroF1\": 0.9713565375726592, \"MacroF1\": 0.9711654862365112, \"Memory in Mb\": 4.102839469909668, \"Time in s\": 458.452707 }, { \"step\": 14688, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9718118063593654, \"MicroF1\": 0.9718118063593654, \"MacroF1\": 0.9719655808676524, \"Memory in Mb\": 3.971695899963379, \"Time in s\": 484.888355 }, { \"step\": 15096, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9724412056972508, \"MicroF1\": 0.9724412056972508, \"MacroF1\": 0.9726138064055022, \"Memory in Mb\": 4.612870216369629, \"Time in s\": 512.116119 }, { \"step\": 15504, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.972327936528414, \"MicroF1\": 0.972327936528414, \"MacroF1\": 0.9723669009986284, \"Memory in Mb\": 3.2941598892211914, \"Time in s\": 540.133825 }, { \"step\": 15912, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.97240902520269, \"MicroF1\": 0.97240902520269, \"MacroF1\": 0.9724748506273226, \"Memory in Mb\": 5.215278625488281, \"Time in s\": 569.064511 }, { \"step\": 16320, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9718119982842086, \"MicroF1\": 0.9718119982842086, \"MacroF1\": 0.9717822259045504, \"Memory in Mb\": 2.705050468444824, \"Time in s\": 598.831932 }, { \"step\": 16728, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9713636635379924, \"MicroF1\": 0.9713636635379924, \"MacroF1\": 0.971358198091739, \"Memory in Mb\": 1.4999914169311523, \"Time in s\": 629.214947 }, { \"step\": 17136, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9716953603735046, \"MicroF1\": 0.9716953603735046, \"MacroF1\": 0.9717778191727772, \"Memory in Mb\": 1.5952835083007812, \"Time in s\": 660.25813 }, { \"step\": 17544, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9716696118109788, \"MicroF1\": 0.9716696118109788, \"MacroF1\": 0.971712982907841, \"Memory in Mb\": 2.6761178970336914, \"Time in s\": 692.002463 }, { \"step\": 17952, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9709765472675616, \"MicroF1\": 0.9709765472675616, \"MacroF1\": 0.970966525204854, \"Memory in Mb\": 3.671113014221192, \"Time in s\": 724.550435 }, { \"step\": 18360, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9709679176425732, \"MicroF1\": 0.9709679176425732, \"MacroF1\": 0.9710033330464194, \"Memory in Mb\": 4.91295337677002, \"Time in s\": 757.867405 }, { \"step\": 18768, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.971012948260244, \"MicroF1\": 0.971012948260244, \"MacroF1\": 0.9710485326344032, \"Memory in Mb\": 5.05178165435791, \"Time in s\": 792.058633 }, { \"step\": 19176, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.97116036505867, \"MicroF1\": 0.97116036505867, \"MacroF1\": 0.9711938240802872, \"Memory in Mb\": 5.466279983520508, \"Time in s\": 827.148813 }, { \"step\": 19584, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9707909921871012, \"MicroF1\": 0.9707909921871012, \"MacroF1\": 0.9708057459916865, \"Memory in Mb\": 5.881702423095703, \"Time in s\": 863.142165 }, { \"step\": 19992, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9705867640438196, \"MicroF1\": 0.9705867640438196, \"MacroF1\": 0.9706070593086332, \"Memory in Mb\": 5.831451416015625, \"Time in s\": 900.075184 }, { \"step\": 20400, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9698514633070248, \"MicroF1\": 0.9698514633070248, \"MacroF1\": 0.9698673821244655, \"Memory in Mb\": 2.3371658325195312, \"Time in s\": 937.846308 }, { \"step\": 46, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.3111111111111111, \"MicroF1\": 0.3111111111111111, \"MacroF1\": 0.2220238095238095, \"Memory in Mb\": 2.606511116027832, \"Time in s\": 1.431937 }, { \"step\": 92, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.4945054945054945, \"MicroF1\": 0.4945054945054945, \"MacroF1\": 0.5053729602697932, \"Memory in Mb\": 2.609585762023926, \"Time in s\": 3.886705 }, { \"step\": 138, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.5109489051094891, \"MicroF1\": 0.5109489051094891, \"MacroF1\": 0.5310665055578762, \"Memory in Mb\": 2.6113672256469727, \"Time in s\": 7.103774 }, { \"step\": 184, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.5737704918032787, \"MicroF1\": 0.5737704918032787, \"MacroF1\": 0.5886643910747036, \"Memory in Mb\": 2.6136903762817383, \"Time in s\": 11.095617 }, { \"step\": 230, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.6026200873362445, \"MicroF1\": 0.6026200873362445, \"MacroF1\": 0.6106719627755607, \"Memory in Mb\": 2.614529609680176, \"Time in s\": 15.833752 }, { \"step\": 276, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.6181818181818182, \"MicroF1\": 0.6181818181818182, \"MacroF1\": 0.6264208209498925, \"Memory in Mb\": 2.6147661209106445, \"Time in s\": 21.302563 }, { \"step\": 322, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.6448598130841121, \"MicroF1\": 0.6448598130841121, \"MacroF1\": 0.6378728366046057, \"Memory in Mb\": 2.616147041320801, \"Time in s\": 27.471138 }, { \"step\": 368, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.667574931880109, \"MicroF1\": 0.667574931880109, \"MacroF1\": 0.6581306320431076, \"Memory in Mb\": 2.6166696548461914, \"Time in s\": 34.32642 }, { \"step\": 414, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.6803874092009685, \"MicroF1\": 0.6803874092009685, \"MacroF1\": 0.6704325632692101, \"Memory in Mb\": 2.6175050735473637, \"Time in s\": 41.86551 }, { \"step\": 460, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.6884531590413944, \"MicroF1\": 0.6884531590413944, \"MacroF1\": 0.6760149332924277, \"Memory in Mb\": 2.617680549621582, \"Time in s\": 50.055222 }, { \"step\": 506, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.691089108910891, \"MicroF1\": 0.691089108910891, \"MacroF1\": 0.6769247074861785, \"Memory in Mb\": 2.617680549621582, \"Time in s\": 58.91147 }, { \"step\": 552, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.691470054446461, \"MicroF1\": 0.691470054446461, \"MacroF1\": 0.6803521213965826, \"Memory in Mb\": 2.6178178787231445, \"Time in s\": 68.422832 }, { \"step\": 598, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.6968174204355109, \"MicroF1\": 0.6968174204355109, \"MacroF1\": 0.6854975219125513, \"Memory in Mb\": 2.617863655090332, \"Time in s\": 78.595742 }, { \"step\": 644, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.6936236391912908, \"MicroF1\": 0.6936236391912908, \"MacroF1\": 0.6835764097697864, \"Memory in Mb\": 2.6192026138305664, \"Time in s\": 89.423453 }, { \"step\": 690, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.6966618287373004, \"MicroF1\": 0.6966618287373004, \"MacroF1\": 0.6871604229696352, \"Memory in Mb\": 2.6194162368774414, \"Time in s\": 100.907013 }, { \"step\": 736, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.6965986394557823, \"MicroF1\": 0.6965986394557823, \"MacroF1\": 0.6884795420777536, \"Memory in Mb\": 2.6194887161254883, \"Time in s\": 113.056901 }, { \"step\": 782, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7016645326504481, \"MicroF1\": 0.7016645326504481, \"MacroF1\": 0.6927955715819348, \"Memory in Mb\": 2.6197519302368164, \"Time in s\": 125.871728 }, { \"step\": 828, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7037484885126964, \"MicroF1\": 0.7037484885126964, \"MacroF1\": 0.6971811816445675, \"Memory in Mb\": 2.61989688873291, \"Time in s\": 139.34805 }, { \"step\": 874, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7124856815578465, \"MicroF1\": 0.7124856815578465, \"MacroF1\": 0.7027179013602759, \"Memory in Mb\": 2.61989688873291, \"Time in s\": 153.488556 }, { \"step\": 920, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7127312295973884, \"MicroF1\": 0.7127312295973884, \"MacroF1\": 0.7019247882761857, \"Memory in Mb\": 2.61989688873291, \"Time in s\": 168.286881 }, { \"step\": 966, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7119170984455958, \"MicroF1\": 0.7119170984455958, \"MacroF1\": 0.7013991197312313, \"Memory in Mb\": 2.61989688873291, \"Time in s\": 183.731934 }, { \"step\": 1012, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7111770524233432, \"MicroF1\": 0.7111770524233432, \"MacroF1\": 0.7000689942734505, \"Memory in Mb\": 2.61989688873291, \"Time in s\": 199.829198 }, { \"step\": 1058, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7123935666982024, \"MicroF1\": 0.7123935666982024, \"MacroF1\": 0.700757485135609, \"Memory in Mb\": 2.620041847229004, \"Time in s\": 216.594079 }, { \"step\": 1104, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7116953762466002, \"MicroF1\": 0.7116953762466002, \"MacroF1\": 0.6997536275311635, \"Memory in Mb\": 2.6201601028442383, \"Time in s\": 234.008444 }, { \"step\": 1150, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7136640557006092, \"MicroF1\": 0.7136640557006092, \"MacroF1\": 0.7002507718266925, \"Memory in Mb\": 2.6201601028442383, \"Time in s\": 252.079326 }, { \"step\": 1196, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7154811715481172, \"MicroF1\": 0.7154811715481171, \"MacroF1\": 0.7029614354817431, \"Memory in Mb\": 2.530026435852051, \"Time in s\": 270.790044 }, { \"step\": 1242, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.717163577759871, \"MicroF1\": 0.717163577759871, \"MacroF1\": 0.7059650228666394, \"Memory in Mb\": 2.753697395324707, \"Time in s\": 290.035925 }, { \"step\": 1288, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7187257187257188, \"MicroF1\": 0.7187257187257188, \"MacroF1\": 0.706699668165461, \"Memory in Mb\": 3.664814949035645, \"Time in s\": 309.61323300000004 }, { \"step\": 1334, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.719429857464366, \"MicroF1\": 0.719429857464366, \"MacroF1\": 0.7094425115390415, \"Memory in Mb\": 4.463783264160156, \"Time in s\": 329.52005700000007 }, { \"step\": 1380, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7251631617113851, \"MicroF1\": 0.725163161711385, \"MacroF1\": 0.7174387625572534, \"Memory in Mb\": 4.938790321350098, \"Time in s\": 349.76822000000004 }, { \"step\": 1426, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7319298245614035, \"MicroF1\": 0.7319298245614035, \"MacroF1\": 0.7244482628352659, \"Memory in Mb\": 5.045901298522949, \"Time in s\": 370.340987 }, { \"step\": 1472, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7335146159075459, \"MicroF1\": 0.7335146159075459, \"MacroF1\": 0.7247675805597543, \"Memory in Mb\": 5.884430885314941, \"Time in s\": 391.25731 }, { \"step\": 1518, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7251153592617007, \"MicroF1\": 0.7251153592617007, \"MacroF1\": 0.7184902268106362, \"Memory in Mb\": 6.2875261306762695, \"Time in s\": 412.53433 }, { \"step\": 1564, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7204094689699296, \"MicroF1\": 0.7204094689699295, \"MacroF1\": 0.7171509654034274, \"Memory in Mb\": 6.316588401794434, \"Time in s\": 434.180383 }, { \"step\": 1610, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7165941578620261, \"MicroF1\": 0.7165941578620262, \"MacroF1\": 0.7136076251491865, \"Memory in Mb\": 6.364602088928223, \"Time in s\": 456.188448 }, { \"step\": 1656, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7202416918429003, \"MicroF1\": 0.7202416918429003, \"MacroF1\": 0.7179265770125135, \"Memory in Mb\": 6.460197448730469, \"Time in s\": 478.554693 }, { \"step\": 1702, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.721928277483833, \"MicroF1\": 0.7219282774838331, \"MacroF1\": 0.7220156076184944, \"Memory in Mb\": 6.666633605957031, \"Time in s\": 501.28713 }, { \"step\": 1748, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7263880938752146, \"MicroF1\": 0.7263880938752146, \"MacroF1\": 0.7263874723147012, \"Memory in Mb\": 6.882956504821777, \"Time in s\": 524.384863 }, { \"step\": 1794, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7328499721137758, \"MicroF1\": 0.7328499721137758, \"MacroF1\": 0.7320714565315939, \"Memory in Mb\": 6.874361991882324, \"Time in s\": 547.829739 }, { \"step\": 1840, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.734094616639478, \"MicroF1\": 0.734094616639478, \"MacroF1\": 0.7334477172925166, \"Memory in Mb\": 7.857270240783691, \"Time in s\": 571.634536 }, { \"step\": 1886, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7358090185676393, \"MicroF1\": 0.7358090185676393, \"MacroF1\": 0.736235296466255, \"Memory in Mb\": 8.041683197021484, \"Time in s\": 595.832215 }, { \"step\": 1932, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7369238736406007, \"MicroF1\": 0.7369238736406007, \"MacroF1\": 0.7364098924240724, \"Memory in Mb\": 8.212060928344727, \"Time in s\": 620.406916 }, { \"step\": 1978, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7369752149721801, \"MicroF1\": 0.73697521497218, \"MacroF1\": 0.7356260672719533, \"Memory in Mb\": 8.416284561157227, \"Time in s\": 645.365163 }, { \"step\": 2024, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7409787444389521, \"MicroF1\": 0.7409787444389521, \"MacroF1\": 0.7385453010661254, \"Memory in Mb\": 8.869349479675293, \"Time in s\": 670.737914 }, { \"step\": 2070, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7438376027066216, \"MicroF1\": 0.7438376027066217, \"MacroF1\": 0.7418803204845174, \"Memory in Mb\": 9.001053810119629, \"Time in s\": 696.540108 }, { \"step\": 2116, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7475177304964539, \"MicroF1\": 0.7475177304964539, \"MacroF1\": 0.7450940881618369, \"Memory in Mb\": 9.427652359008787, \"Time in s\": 722.759269 }, { \"step\": 2162, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7482646922720962, \"MicroF1\": 0.7482646922720962, \"MacroF1\": 0.7457425826498583, \"Memory in Mb\": 9.724228858947754, \"Time in s\": 749.427824 }, { \"step\": 2208, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7521522428636158, \"MicroF1\": 0.7521522428636158, \"MacroF1\": 0.7492034954191574, \"Memory in Mb\": 9.71615219116211, \"Time in s\": 776.532359 }, { \"step\": 2254, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7532179316466933, \"MicroF1\": 0.7532179316466933, \"MacroF1\": 0.7508205496072249, \"Memory in Mb\": 10.198495864868164, \"Time in s\": 804.090452 }, { \"step\": 2300, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7546759460635059, \"MicroF1\": 0.754675946063506, \"MacroF1\": 0.7527273841922961, \"Memory in Mb\": 10.425667762756348, \"Time in s\": 832.069921 }, { \"step\": 1056, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Insects\", \"Accuracy\": 0.6265402843601896, \"MicroF1\": 0.6265402843601896, \"MacroF1\": 0.5882776540607534, \"Memory in Mb\": 10.90817928314209, \"Time in s\": 22.534162 }, { \"step\": 2112, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Insects\", \"Accuracy\": 0.6570345807674088, \"MicroF1\": 0.6570345807674088, \"MacroF1\": 0.61544126739188, \"Memory in Mb\": 21.709880828857425, \"Time in s\": 65.027277 }, { \"step\": 3168, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Insects\", \"Accuracy\": 0.6684559520050521, \"MicroF1\": 0.6684559520050521, \"MacroF1\": 0.6242294974630811, \"Memory in Mb\": 28.635205268859863, \"Time in s\": 131.537407 }, { \"step\": 4224, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Insects\", \"Accuracy\": 0.6810324413923751, \"MicroF1\": 0.6810324413923751, \"MacroF1\": 0.6325456686453049, \"Memory in Mb\": 36.43542194366455, \"Time in s\": 221.773783 }, { \"step\": 5280, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Insects\", \"Accuracy\": 0.6910399696912294, \"MicroF1\": 0.6910399696912294, \"MacroF1\": 0.6411255615252124, \"Memory in Mb\": 45.614484786987305, \"Time in s\": 335.089181 }, { \"step\": 6336, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Insects\", \"Accuracy\": 0.6937647987371744, \"MicroF1\": 0.6937647987371744, \"MacroF1\": 0.6440375279924044, \"Memory in Mb\": 53.59738254547119, \"Time in s\": 471.883196 }, { \"step\": 7392, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Insects\", \"Accuracy\": 0.6988228927073468, \"MicroF1\": 0.6988228927073468, \"MacroF1\": 0.6494865599203364, \"Memory in Mb\": 66.1818675994873, \"Time in s\": 633.159586 }, { \"step\": 8448, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Insects\", \"Accuracy\": 0.7001302237480762, \"MicroF1\": 0.7001302237480762, \"MacroF1\": 0.6494906800979877, \"Memory in Mb\": 76.50763607025146, \"Time in s\": 819.592493 }, { \"step\": 9504, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Insects\", \"Accuracy\": 0.7055666631590024, \"MicroF1\": 0.7055666631590024, \"MacroF1\": 0.6515748182594757, \"Memory in Mb\": 80.03414821624756, \"Time in s\": 1031.228873 }, { \"step\": 10560, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Insects\", \"Accuracy\": 0.7099157117151246, \"MicroF1\": 0.7099157117151246, \"MacroF1\": 0.6536141909419667, \"Memory in Mb\": 75.23120212554932, \"Time in s\": 1268.49353 }, { \"step\": 11616, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Insects\", \"Accuracy\": 0.7112354713732243, \"MicroF1\": 0.7112354713732243, \"MacroF1\": 0.6532930257397846, \"Memory in Mb\": 87.85937118530273, \"Time in s\": 1530.837041 }, { \"step\": 12672, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Insects\", \"Accuracy\": 0.7140715018546286, \"MicroF1\": 0.7140715018546285, \"MacroF1\": 0.6586632134486646, \"Memory in Mb\": 96.90367698669434, \"Time in s\": 1818.32799 }, { \"step\": 13728, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Insects\", \"Accuracy\": 0.7196765498652291, \"MicroF1\": 0.7196765498652291, \"MacroF1\": 0.7110222921473365, \"Memory in Mb\": 56.69392013549805, \"Time in s\": 2124.564995 }, { \"step\": 14784, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Insects\", \"Accuracy\": 0.7275248596360685, \"MicroF1\": 0.7275248596360685, \"MacroF1\": 0.7243727970733626, \"Memory in Mb\": 23.290308952331543, \"Time in s\": 2446.995357 }, { \"step\": 15840, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Insects\", \"Accuracy\": 0.7219521434433992, \"MicroF1\": 0.7219521434433992, \"MacroF1\": 0.7204121258981635, \"Memory in Mb\": 12.419946670532228, \"Time in s\": 2789.3598920000004 }, { \"step\": 16896, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Insects\", \"Accuracy\": 0.7180822728617934, \"MicroF1\": 0.7180822728617934, \"MacroF1\": 0.7177336146344276, \"Memory in Mb\": 18.459078788757324, \"Time in s\": 3150.9630650000004 }, { \"step\": 17952, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Insects\", \"Accuracy\": 0.7130521976491561, \"MicroF1\": 0.713052197649156, \"MacroF1\": 0.7136298242976093, \"Memory in Mb\": 32.946556091308594, \"Time in s\": 3531.3081450000004 }, { \"step\": 19008, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Insects\", \"Accuracy\": 0.7221023833324565, \"MicroF1\": 0.7221023833324565, \"MacroF1\": 0.7193994629254835, \"Memory in Mb\": 14.42181396484375, \"Time in s\": 3928.2542010000006 }, { \"step\": 20064, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Insects\", \"Accuracy\": 0.7273089767233215, \"MicroF1\": 0.7273089767233214, \"MacroF1\": 0.721146893328104, \"Memory in Mb\": 20.33617401123047, \"Time in s\": 4340.108142000001 }, { \"step\": 21120, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Insects\", \"Accuracy\": 0.7289170888773142, \"MicroF1\": 0.7289170888773142, \"MacroF1\": 0.7201390592471967, \"Memory in Mb\": 29.71843242645264, \"Time in s\": 4775.145968000001 }, { \"step\": 22176, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Insects\", \"Accuracy\": 0.7305524239007892, \"MicroF1\": 0.7305524239007891, \"MacroF1\": 0.719265816341323, \"Memory in Mb\": 21.72282314300537, \"Time in s\": 5233.162328 }, { \"step\": 23232, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Insects\", \"Accuracy\": 0.7328569583745856, \"MicroF1\": 0.7328569583745856, \"MacroF1\": 0.7192472788421966, \"Memory in Mb\": 31.907146453857425, \"Time in s\": 5712.0379140000005 }, { \"step\": 24288, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Insects\", \"Accuracy\": 0.7349610902952196, \"MicroF1\": 0.7349610902952196, \"MacroF1\": 0.7190161489472059, \"Memory in Mb\": 36.71180248260498, \"Time in s\": 6211.046404000001 }, { \"step\": 25344, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Insects\", \"Accuracy\": 0.7382314643096713, \"MicroF1\": 0.7382314643096713, \"MacroF1\": 0.7202655895968563, \"Memory in Mb\": 44.65183067321777, \"Time in s\": 6729.485654000001 }, { \"step\": 26400, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Insects\", \"Accuracy\": 0.7397249895829388, \"MicroF1\": 0.7397249895829386, \"MacroF1\": 0.7198095986730461, \"Memory in Mb\": 54.357375144958496, \"Time in s\": 7266.820082000001 }, { \"step\": 27456, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Insects\", \"Accuracy\": 0.7418685121107267, \"MicroF1\": 0.7418685121107267, \"MacroF1\": 0.7199133187431289, \"Memory in Mb\": 60.99125003814697, \"Time in s\": 7822.667714000001 }, { \"step\": 28512, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Insects\", \"Accuracy\": 0.7388727157939041, \"MicroF1\": 0.7388727157939041, \"MacroF1\": 0.7182833957431396, \"Memory in Mb\": 26.944812774658203, \"Time in s\": 8398.518895000001 }, { \"step\": 29568, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Insects\", \"Accuracy\": 0.7376128792234586, \"MicroF1\": 0.7376128792234586, \"MacroF1\": 0.7214769633664444, \"Memory in Mb\": 24.290247917175293, \"Time in s\": 8990.352305 }, { \"step\": 30624, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Insects\", \"Accuracy\": 0.7372889658100121, \"MicroF1\": 0.7372889658100121, \"MacroF1\": 0.7255972176885724, \"Memory in Mb\": 19.85909652709961, \"Time in s\": 9598.262147 }, { \"step\": 31680, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Insects\", \"Accuracy\": 0.7371444805707251, \"MicroF1\": 0.737144480570725, \"MacroF1\": 0.7291466686667684, \"Memory in Mb\": 32.85751724243164, \"Time in s\": 10221.399895 }, { \"step\": 32736, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Insects\", \"Accuracy\": 0.7374064457003208, \"MicroF1\": 0.7374064457003208, \"MacroF1\": 0.7322831246511409, \"Memory in Mb\": 38.75182342529297, \"Time in s\": 10860.215509 }, { \"step\": 33792, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Insects\", \"Accuracy\": 0.7329170489183511, \"MicroF1\": 0.7329170489183511, \"MacroF1\": 0.7291423789419403, \"Memory in Mb\": 76.79454803466797, \"Time in s\": 11517.947008 }, { \"step\": 34848, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Insects\", \"Accuracy\": 0.7290728039716475, \"MicroF1\": 0.7290728039716475, \"MacroF1\": 0.7252059051088736, \"Memory in Mb\": 37.93787670135498, \"Time in s\": 12198.376918999998 }, { \"step\": 35904, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Insects\", \"Accuracy\": 0.726791633011169, \"MicroF1\": 0.7267916330111689, \"MacroF1\": 0.72277521319889, \"Memory in Mb\": 30.2938232421875, \"Time in s\": 12898.613101 }, { \"step\": 36960, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Insects\", \"Accuracy\": 0.7233150247571634, \"MicroF1\": 0.7233150247571634, \"MacroF1\": 0.7191521630945247, \"Memory in Mb\": 34.07670021057129, \"Time in s\": 13619.767749 }, { \"step\": 38016, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Insects\", \"Accuracy\": 0.7210837827173484, \"MicroF1\": 0.7210837827173484, \"MacroF1\": 0.7166085958184295, \"Memory in Mb\": 39.338196754455566, \"Time in s\": 14364.864177 }, { \"step\": 39072, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Insects\", \"Accuracy\": 0.7203040618361445, \"MicroF1\": 0.7203040618361445, \"MacroF1\": 0.7160627724850469, \"Memory in Mb\": 41.774664878845215, \"Time in s\": 15133.386085 }, { \"step\": 40128, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Insects\", \"Accuracy\": 0.7183193361078576, \"MicroF1\": 0.7183193361078576, \"MacroF1\": 0.7145670483840382, \"Memory in Mb\": 44.649410247802734, \"Time in s\": 15926.835433 }, { \"step\": 41184, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Insects\", \"Accuracy\": 0.7176990505791224, \"MicroF1\": 0.7176990505791223, \"MacroF1\": 0.7142800617937591, \"Memory in Mb\": 38.580246925354, \"Time in s\": 16742.181512 }, { \"step\": 42240, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Insects\", \"Accuracy\": 0.7177489997395772, \"MicroF1\": 0.7177489997395772, \"MacroF1\": 0.7147225222929322, \"Memory in Mb\": 44.2959041595459, \"Time in s\": 17577.282826 }, { \"step\": 43296, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Insects\", \"Accuracy\": 0.7185818223813374, \"MicroF1\": 0.7185818223813374, \"MacroF1\": 0.7159354160738768, \"Memory in Mb\": 44.74843406677246, \"Time in s\": 18431.175396 }, { \"step\": 44352, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Insects\", \"Accuracy\": 0.7192171540664246, \"MicroF1\": 0.7192171540664247, \"MacroF1\": 0.7168891106233332, \"Memory in Mb\": 50.63485240936279, \"Time in s\": 19303.296412 }, { \"step\": 45408, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Insects\", \"Accuracy\": 0.7197128196093113, \"MicroF1\": 0.7197128196093113, \"MacroF1\": 0.7173999204613543, \"Memory in Mb\": 48.77041816711426, \"Time in s\": 20195.421521 }, { \"step\": 46464, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Insects\", \"Accuracy\": 0.7207240169597314, \"MicroF1\": 0.7207240169597314, \"MacroF1\": 0.7184187872009821, \"Memory in Mb\": 56.04546070098877, \"Time in s\": 21107.071122 }, { \"step\": 47520, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Insects\", \"Accuracy\": 0.7212062543403691, \"MicroF1\": 0.7212062543403692, \"MacroF1\": 0.7191280088329424, \"Memory in Mb\": 48.19489002227783, \"Time in s\": 22037.88723 }, { \"step\": 48576, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Insects\", \"Accuracy\": 0.7252084405558414, \"MicroF1\": 0.7252084405558414, \"MacroF1\": 0.7232782847500743, \"Memory in Mb\": 54.32844257354736, \"Time in s\": 22988.011007 }, { \"step\": 49632, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Insects\", \"Accuracy\": 0.7291813584251778, \"MicroF1\": 0.7291813584251778, \"MacroF1\": 0.7271951034706091, \"Memory in Mb\": 53.6518030166626, \"Time in s\": 23956.634362 }, { \"step\": 50688, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Insects\", \"Accuracy\": 0.7326928009154221, \"MicroF1\": 0.7326928009154221, \"MacroF1\": 0.7304439468758875, \"Memory in Mb\": 27.42653465270996, \"Time in s\": 24940.316756 }, { \"step\": 51744, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Insects\", \"Accuracy\": 0.7367180101656262, \"MicroF1\": 0.7367180101656263, \"MacroF1\": 0.7341247480346391, \"Memory in Mb\": 36.39958953857422, \"Time in s\": 25936.798718 }, { \"step\": 52800, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Insects\", \"Accuracy\": 0.7395784011060815, \"MicroF1\": 0.7395784011060814, \"MacroF1\": 0.737512125998823, \"Memory in Mb\": 8.341936111450195, \"Time in s\": 26942.262482 }, { \"step\": 408, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9803439803439804, \"MicroF1\": 0.9803439803439804, \"MacroF1\": 0.4950372208436724, \"Memory in Mb\": 1.551915168762207, \"Time in s\": 2.576854 }, { \"step\": 816, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.974233128834356, \"MicroF1\": 0.974233128834356, \"MacroF1\": 0.8747406597440331, \"Memory in Mb\": 4.161267280578613, \"Time in s\": 7.918686 }, { \"step\": 1224, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9672935404742437, \"MicroF1\": 0.9672935404742437, \"MacroF1\": 0.9345378451161834, \"Memory in Mb\": 7.904744148254394, \"Time in s\": 16.17177 }, { \"step\": 1632, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9662783568362968, \"MicroF1\": 0.9662783568362968, \"MacroF1\": 0.920078959712528, \"Memory in Mb\": 12.156608581542969, \"Time in s\": 27.617862 }, { \"step\": 2040, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9632172633643944, \"MicroF1\": 0.9632172633643944, \"MacroF1\": 0.9392069284616192, \"Memory in Mb\": 18.052184104919437, \"Time in s\": 42.519185 }, { \"step\": 2448, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9591336330200244, \"MicroF1\": 0.9591336330200244, \"MacroF1\": 0.952707267188964, \"Memory in Mb\": 17.44593620300293, \"Time in s\": 61.058317 }, { \"step\": 2856, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9600700525394046, \"MicroF1\": 0.9600700525394046, \"MacroF1\": 0.9487475492194613, \"Memory in Mb\": 23.22895908355713, \"Time in s\": 83.383302 }, { \"step\": 3264, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9589334967821024, \"MicroF1\": 0.9589334967821024, \"MacroF1\": 0.9481804303110768, \"Memory in Mb\": 30.014172554016117, \"Time in s\": 110.001368 }, { \"step\": 3672, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.957504767093435, \"MicroF1\": 0.957504767093435, \"MacroF1\": 0.948270905442242, \"Memory in Mb\": 37.68842315673828, \"Time in s\": 141.307336 }, { \"step\": 4080, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9529296396175532, \"MicroF1\": 0.9529296396175532, \"MacroF1\": 0.9350591426916868, \"Memory in Mb\": 43.92499256134033, \"Time in s\": 178.285036 }, { \"step\": 4488, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9558725206151104, \"MicroF1\": 0.9558725206151104, \"MacroF1\": 0.958348874105129, \"Memory in Mb\": 23.460043907165527, \"Time in s\": 220.514164 }, { \"step\": 4896, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.957711950970378, \"MicroF1\": 0.957711950970378, \"MacroF1\": 0.9572545884780326, \"Memory in Mb\": 21.815909385681152, \"Time in s\": 267.806579 }, { \"step\": 5304, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9581369036394494, \"MicroF1\": 0.9581369036394494, \"MacroF1\": 0.9564558175945328, \"Memory in Mb\": 29.116984367370605, \"Time in s\": 320.079148 }, { \"step\": 5712, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.959376641568902, \"MicroF1\": 0.959376641568902, \"MacroF1\": 0.9590743474150508, \"Memory in Mb\": 34.215229988098145, \"Time in s\": 377.359359 }, { \"step\": 6120, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9576728223565942, \"MicroF1\": 0.9576728223565942, \"MacroF1\": 0.9540539138154064, \"Memory in Mb\": 42.24546051025391, \"Time in s\": 440.303036 }, { \"step\": 6528, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.957254481385016, \"MicroF1\": 0.9572544813850162, \"MacroF1\": 0.9569914463415944, \"Memory in Mb\": 19.71925640106201, \"Time in s\": 508.401183 }, { \"step\": 6936, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9586157173756308, \"MicroF1\": 0.9586157173756308, \"MacroF1\": 0.9593505106134974, \"Memory in Mb\": 22.396859169006348, \"Time in s\": 580.778744 }, { \"step\": 7344, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9592809478414815, \"MicroF1\": 0.9592809478414815, \"MacroF1\": 0.9593459120031488, \"Memory in Mb\": 26.322874069213867, \"Time in s\": 657.980924 }, { \"step\": 7752, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9600051606244356, \"MicroF1\": 0.9600051606244356, \"MacroF1\": 0.9601169971762602, \"Memory in Mb\": 30.259758949279785, \"Time in s\": 740.2744439999999 }, { \"step\": 8160, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.95747027822037, \"MicroF1\": 0.95747027822037, \"MacroF1\": 0.9549133730963548, \"Memory in Mb\": 37.68336868286133, \"Time in s\": 827.6717389999999 }, { \"step\": 8568, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9564608380996849, \"MicroF1\": 0.9564608380996849, \"MacroF1\": 0.9560990529914856, \"Memory in Mb\": 43.737112045288086, \"Time in s\": 921.0092 }, { \"step\": 8976, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9568802228412256, \"MicroF1\": 0.9568802228412256, \"MacroF1\": 0.9569984740230398, \"Memory in Mb\": 33.59728527069092, \"Time in s\": 1020.6233179999998 }, { \"step\": 9384, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9575828626238942, \"MicroF1\": 0.9575828626238942, \"MacroF1\": 0.9578510301970172, \"Memory in Mb\": 34.01332950592041, \"Time in s\": 1125.214274 }, { \"step\": 9792, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9576141354304974, \"MicroF1\": 0.9576141354304974, \"MacroF1\": 0.95758927245962, \"Memory in Mb\": 40.18074893951416, \"Time in s\": 1234.457491 }, { \"step\": 10200, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9558780272575744, \"MicroF1\": 0.9558780272575744, \"MacroF1\": 0.954787839223492, \"Memory in Mb\": 48.97087860107422, \"Time in s\": 1349.23618 }, { \"step\": 10608, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9524842085415292, \"MicroF1\": 0.9524842085415292, \"MacroF1\": 0.9506853107984292, \"Memory in Mb\": 30.6993989944458, \"Time in s\": 1470.124694 }, { \"step\": 11016, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9539718565592374, \"MicroF1\": 0.9539718565592374, \"MacroF1\": 0.9545620457235888, \"Memory in Mb\": 26.549206733703613, \"Time in s\": 1595.074551 }, { \"step\": 11424, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9543902652543116, \"MicroF1\": 0.9543902652543116, \"MacroF1\": 0.9545363240408884, \"Memory in Mb\": 33.6107063293457, \"Time in s\": 1724.543611 }, { \"step\": 11832, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9541881497760122, \"MicroF1\": 0.9541881497760122, \"MacroF1\": 0.954140840579052, \"Memory in Mb\": 25.182985305786133, \"Time in s\": 1859.013318 }, { \"step\": 12240, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.955061688046409, \"MicroF1\": 0.955061688046409, \"MacroF1\": 0.9554321262858616, \"Memory in Mb\": 27.34038543701172, \"Time in s\": 1997.870855 }, { \"step\": 12648, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9546928125247094, \"MicroF1\": 0.9546928125247094, \"MacroF1\": 0.9546233453975912, \"Memory in Mb\": 35.30395698547363, \"Time in s\": 2141.6119080000003 }, { \"step\": 13056, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.953887399463807, \"MicroF1\": 0.953887399463807, \"MacroF1\": 0.9537532269202632, \"Memory in Mb\": 33.51621055603027, \"Time in s\": 2290.943904 }, { \"step\": 13464, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9540221347396568, \"MicroF1\": 0.9540221347396568, \"MacroF1\": 0.954138309472004, \"Memory in Mb\": 33.38596153259277, \"Time in s\": 2445.0119170000003 }, { \"step\": 13872, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9546535938288516, \"MicroF1\": 0.9546535938288516, \"MacroF1\": 0.9549190485054234, \"Memory in Mb\": 31.36033725738525, \"Time in s\": 2603.685647 }, { \"step\": 14280, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9534281112122698, \"MicroF1\": 0.9534281112122698, \"MacroF1\": 0.9532093226981456, \"Memory in Mb\": 38.61776542663574, \"Time in s\": 2767.1788560000005 }, { \"step\": 14688, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9540409886294, \"MicroF1\": 0.9540409886294, \"MacroF1\": 0.9542688403803362, \"Memory in Mb\": 42.8822660446167, \"Time in s\": 2935.9642900000003 }, { \"step\": 15096, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9547532295462072, \"MicroF1\": 0.9547532295462072, \"MacroF1\": 0.9549723528375392, \"Memory in Mb\": 41.949758529663086, \"Time in s\": 3110.4045410000003 }, { \"step\": 15504, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9549764561697736, \"MicroF1\": 0.9549764561697736, \"MacroF1\": 0.9551012466300322, \"Memory in Mb\": 36.29027271270752, \"Time in s\": 3290.0043080000005 }, { \"step\": 15912, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9551253849538056, \"MicroF1\": 0.9551253849538056, \"MacroF1\": 0.955237279627336, \"Memory in Mb\": 33.26945877075195, \"Time in s\": 3474.8467850000006 }, { \"step\": 16320, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9555119799007292, \"MicroF1\": 0.9555119799007292, \"MacroF1\": 0.9556369370454034, \"Memory in Mb\": 38.47606945037842, \"Time in s\": 3664.7140100000006 }, { \"step\": 16728, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.954923178095295, \"MicroF1\": 0.954923178095295, \"MacroF1\": 0.9549151106032768, \"Memory in Mb\": 38.78229522705078, \"Time in s\": 3859.856695 }, { \"step\": 17136, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.955587977823169, \"MicroF1\": 0.955587977823169, \"MacroF1\": 0.9557184838324558, \"Memory in Mb\": 44.56228828430176, \"Time in s\": 4060.267757000001 }, { \"step\": 17544, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9550817990081514, \"MicroF1\": 0.9550817990081514, \"MacroF1\": 0.9550944582439086, \"Memory in Mb\": 49.72221755981445, \"Time in s\": 4266.497535 }, { \"step\": 17952, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9547657512116317, \"MicroF1\": 0.9547657512116317, \"MacroF1\": 0.9547923955213532, \"Memory in Mb\": 44.72002029418945, \"Time in s\": 4478.609232000001 }, { \"step\": 18360, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9553897271093196, \"MicroF1\": 0.9553897271093196, \"MacroF1\": 0.955476322048541, \"Memory in Mb\": 52.00297737121582, \"Time in s\": 4696.983244000001 }, { \"step\": 18768, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.955507006980338, \"MicroF1\": 0.955507006980338, \"MacroF1\": 0.9555572955831596, \"Memory in Mb\": 59.27475929260254, \"Time in s\": 4921.837298000001 }, { \"step\": 19176, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9548891786179922, \"MicroF1\": 0.9548891786179922, \"MacroF1\": 0.9549038695373788, \"Memory in Mb\": 71.70181655883789, \"Time in s\": 5153.196879000001 }, { \"step\": 19584, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.954858806107338, \"MicroF1\": 0.954858806107338, \"MacroF1\": 0.9548865417655428, \"Memory in Mb\": 78.58561515808105, \"Time in s\": 5390.578061000001 }, { \"step\": 19992, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9539292681706768, \"MicroF1\": 0.9539292681706768, \"MacroF1\": 0.9539347026376764, \"Memory in Mb\": 85.24763870239258, \"Time in s\": 5634.910465000001 }, { \"step\": 20400, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9532330016177264, \"MicroF1\": 0.9532330016177264, \"MacroF1\": 0.9532392337717848, \"Memory in Mb\": 74.55205345153809, \"Time in s\": 5886.477404000001 }, { \"step\": 46, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.5555555555555556, \"MicroF1\": 0.5555555555555556, \"MacroF1\": 0.4458032432860809, \"Memory in Mb\": 0.061410903930664, \"Time in s\": 0.019654 }, { \"step\": 92, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.6483516483516484, \"MicroF1\": 0.6483516483516484, \"MacroF1\": 0.646491610589355, \"Memory in Mb\": 0.1154079437255859, \"Time in s\": 0.0623149999999999 }, { \"step\": 138, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.708029197080292, \"MicroF1\": 0.708029197080292, \"MacroF1\": 0.7216654146545566, \"Memory in Mb\": 0.1258535385131836, \"Time in s\": 0.135768 }, { \"step\": 184, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7431693989071039, \"MicroF1\": 0.743169398907104, \"MacroF1\": 0.7576794034998369, \"Memory in Mb\": 0.1263303756713867, \"Time in s\": 0.24048 }, { \"step\": 230, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7641921397379913, \"MicroF1\": 0.7641921397379913, \"MacroF1\": 0.7751275973499576, \"Memory in Mb\": 0.126317024230957, \"Time in s\": 0.376459 }, { \"step\": 276, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7672727272727272, \"MicroF1\": 0.7672727272727272, \"MacroF1\": 0.7799448750812884, \"Memory in Mb\": 0.1262655258178711, \"Time in s\": 0.543817 }, { \"step\": 322, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7757009345794392, \"MicroF1\": 0.7757009345794392, \"MacroF1\": 0.781311030606134, \"Memory in Mb\": 0.1267433166503906, \"Time in s\": 0.742198 }, { \"step\": 368, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.782016348773842, \"MicroF1\": 0.782016348773842, \"MacroF1\": 0.7830988277979799, \"Memory in Mb\": 0.1267471313476562, \"Time in s\": 0.972069 }, { \"step\": 414, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7893462469733656, \"MicroF1\": 0.7893462469733655, \"MacroF1\": 0.7891834545778567, \"Memory in Mb\": 0.1262693405151367, \"Time in s\": 1.233319 }, { \"step\": 460, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7799564270152506, \"MicroF1\": 0.7799564270152506, \"MacroF1\": 0.778762654261754, \"Memory in Mb\": 0.1262626647949218, \"Time in s\": 1.526064 }, { \"step\": 506, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7841584158415842, \"MicroF1\": 0.7841584158415842, \"MacroF1\": 0.7830263284725031, \"Memory in Mb\": 0.126779556274414, \"Time in s\": 1.849975 }, { \"step\": 552, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7840290381125227, \"MicroF1\": 0.7840290381125228, \"MacroF1\": 0.7833214841514466, \"Memory in Mb\": 0.1267738342285156, \"Time in s\": 2.2053070000000004 }, { \"step\": 598, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7839195979899497, \"MicroF1\": 0.7839195979899497, \"MacroF1\": 0.7851401823229054, \"Memory in Mb\": 0.1262836456298828, \"Time in s\": 2.5919950000000003 }, { \"step\": 644, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7884914463452566, \"MicroF1\": 0.7884914463452566, \"MacroF1\": 0.790931132142264, \"Memory in Mb\": 0.1262893676757812, \"Time in s\": 3.010019 }, { \"step\": 690, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.795355587808418, \"MicroF1\": 0.795355587808418, \"MacroF1\": 0.7973717331367783, \"Memory in Mb\": 0.1267967224121093, \"Time in s\": 3.459217 }, { \"step\": 736, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7918367346938775, \"MicroF1\": 0.7918367346938775, \"MacroF1\": 0.79371924750244, \"Memory in Mb\": 0.1262922286987304, \"Time in s\": 3.93961 }, { \"step\": 782, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8015364916773368, \"MicroF1\": 0.8015364916773368, \"MacroF1\": 0.8027236936866887, \"Memory in Mb\": 0.1262769699096679, \"Time in s\": 4.451398 }, { \"step\": 828, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7980652962515115, \"MicroF1\": 0.7980652962515115, \"MacroF1\": 0.8001612113332863, \"Memory in Mb\": 0.1267776489257812, \"Time in s\": 4.994585 }, { \"step\": 874, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8041237113402062, \"MicroF1\": 0.8041237113402062, \"MacroF1\": 0.8058476562214167, \"Memory in Mb\": 0.1267652511596679, \"Time in s\": 5.568929 }, { \"step\": 920, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8063112078346029, \"MicroF1\": 0.8063112078346029, \"MacroF1\": 0.8071524109530731, \"Memory in Mb\": 0.1262378692626953, \"Time in s\": 6.174556 }, { \"step\": 966, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8072538860103627, \"MicroF1\": 0.8072538860103627, \"MacroF1\": 0.8069383576906736, \"Memory in Mb\": 0.1262502670288086, \"Time in s\": 6.811568 }, { \"step\": 1012, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8120672601384767, \"MicroF1\": 0.8120672601384767, \"MacroF1\": 0.8103691514865562, \"Memory in Mb\": 0.1267623901367187, \"Time in s\": 7.479958 }, { \"step\": 1058, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8117313150425733, \"MicroF1\": 0.8117313150425733, \"MacroF1\": 0.8093057999862455, \"Memory in Mb\": 0.1267585754394531, \"Time in s\": 8.179363 }, { \"step\": 1104, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8105167724388033, \"MicroF1\": 0.8105167724388033, \"MacroF1\": 0.8087453181575575, \"Memory in Mb\": 0.126260757446289, \"Time in s\": 8.909729 }, { \"step\": 1150, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8120104438642297, \"MicroF1\": 0.8120104438642298, \"MacroF1\": 0.8093458779132273, \"Memory in Mb\": 0.1267480850219726, \"Time in s\": 9.671618 }, { \"step\": 1196, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8125523012552301, \"MicroF1\": 0.8125523012552303, \"MacroF1\": 0.8098995946687924, \"Memory in Mb\": 0.1267566680908203, \"Time in s\": 10.464471 }, { \"step\": 1242, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8170829975825947, \"MicroF1\": 0.8170829975825946, \"MacroF1\": 0.8146737825459542, \"Memory in Mb\": 0.1263046264648437, \"Time in s\": 11.288498 }, { \"step\": 1288, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8174048174048174, \"MicroF1\": 0.8174048174048174, \"MacroF1\": 0.8149699191034137, \"Memory in Mb\": 0.1263151168823242, \"Time in s\": 12.143769999999998 }, { \"step\": 1334, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8169542385596399, \"MicroF1\": 0.8169542385596399, \"MacroF1\": 0.8144172630221828, \"Memory in Mb\": 0.1267910003662109, \"Time in s\": 13.030243999999998 }, { \"step\": 1380, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8165337200870196, \"MicroF1\": 0.8165337200870196, \"MacroF1\": 0.8142638589810781, \"Memory in Mb\": 0.1267881393432617, \"Time in s\": 13.947682999999998 }, { \"step\": 1426, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8210526315789474, \"MicroF1\": 0.8210526315789475, \"MacroF1\": 0.8177443463463022, \"Memory in Mb\": 0.1262807846069336, \"Time in s\": 14.896382999999998 }, { \"step\": 1472, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.822569680489463, \"MicroF1\": 0.822569680489463, \"MacroF1\": 0.8180682540474884, \"Memory in Mb\": 0.1267719268798828, \"Time in s\": 15.876335 }, { \"step\": 1518, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8233355306526038, \"MicroF1\": 0.8233355306526038, \"MacroF1\": 0.8183049909694801, \"Memory in Mb\": 0.1267585754394531, \"Time in s\": 16.887748 }, { \"step\": 1564, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8227767114523352, \"MicroF1\": 0.8227767114523352, \"MacroF1\": 0.8180063024943973, \"Memory in Mb\": 0.1262645721435547, \"Time in s\": 17.930424 }, { \"step\": 1610, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8228713486637663, \"MicroF1\": 0.8228713486637663, \"MacroF1\": 0.818440484251979, \"Memory in Mb\": 0.1262655258178711, \"Time in s\": 19.004458 }, { \"step\": 1656, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.824773413897281, \"MicroF1\": 0.824773413897281, \"MacroF1\": 0.8207684581521858, \"Memory in Mb\": 0.1267824172973632, \"Time in s\": 20.10985 }, { \"step\": 1702, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.824808935920047, \"MicroF1\": 0.824808935920047, \"MacroF1\": 0.8222541912553749, \"Memory in Mb\": 0.1268024444580078, \"Time in s\": 21.24685 }, { \"step\": 1748, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8259874069834001, \"MicroF1\": 0.8259874069834001, \"MacroF1\": 0.8228660744170171, \"Memory in Mb\": 0.1263065338134765, \"Time in s\": 22.415483 }, { \"step\": 1794, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.826547685443391, \"MicroF1\": 0.826547685443391, \"MacroF1\": 0.8226613560637924, \"Memory in Mb\": 0.126774787902832, \"Time in s\": 23.615758 }, { \"step\": 1840, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8254486133768353, \"MicroF1\": 0.8254486133768353, \"MacroF1\": 0.8217381124058762, \"Memory in Mb\": 0.1267585754394531, \"Time in s\": 24.847279 }, { \"step\": 1886, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8249336870026526, \"MicroF1\": 0.8249336870026526, \"MacroF1\": 0.8216008133499116, \"Memory in Mb\": 0.1262845993041992, \"Time in s\": 26.110083 }, { \"step\": 1932, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8234075608493009, \"MicroF1\": 0.8234075608493009, \"MacroF1\": 0.8193527544316537, \"Memory in Mb\": 0.1262779235839843, \"Time in s\": 27.403915 }, { \"step\": 1978, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8234699038947901, \"MicroF1\": 0.8234699038947901, \"MacroF1\": 0.8195124114516217, \"Memory in Mb\": 0.1267585754394531, \"Time in s\": 28.728915 }, { \"step\": 2024, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8220464656450815, \"MicroF1\": 0.8220464656450815, \"MacroF1\": 0.8172381305352, \"Memory in Mb\": 0.126774787902832, \"Time in s\": 30.084907 }, { \"step\": 2070, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8206863218946351, \"MicroF1\": 0.8206863218946351, \"MacroF1\": 0.8164336862763343, \"Memory in Mb\": 0.1262893676757812, \"Time in s\": 31.472199 }, { \"step\": 2116, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8217494089834515, \"MicroF1\": 0.8217494089834515, \"MacroF1\": 0.8168455585843762, \"Memory in Mb\": 0.1262645721435547, \"Time in s\": 32.891028 }, { \"step\": 2162, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8204534937528922, \"MicroF1\": 0.8204534937528921, \"MacroF1\": 0.8154843900985335, \"Memory in Mb\": 0.1267728805541992, \"Time in s\": 34.340933 }, { \"step\": 2208, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.822383325781604, \"MicroF1\": 0.822383325781604, \"MacroF1\": 0.8171788245797035, \"Memory in Mb\": 0.1262683868408203, \"Time in s\": 35.822171 }, { \"step\": 2254, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.821127385707945, \"MicroF1\": 0.821127385707945, \"MacroF1\": 0.8170261701336431, \"Memory in Mb\": 0.126255989074707, \"Time in s\": 37.335093 }, { \"step\": 2300, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8199217050891692, \"MicroF1\": 0.8199217050891693, \"MacroF1\": 0.8158945802523674, \"Memory in Mb\": 0.1267604827880859, \"Time in s\": 38.879423 }, { \"step\": 1056, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Insects\", \"Accuracy\": 0.6322274881516587, \"MicroF1\": 0.6322274881516587, \"MacroF1\": 0.5639948035153092, \"Memory in Mb\": 0.2159481048583984, \"Time in s\": 1.014879 }, { \"step\": 2112, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Insects\", \"Accuracy\": 0.636191378493605, \"MicroF1\": 0.636191378493605, \"MacroF1\": 0.5686546251961576, \"Memory in Mb\": 0.2164621353149414, \"Time in s\": 3.11274 }, { \"step\": 3168, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Insects\", \"Accuracy\": 0.6274076413009156, \"MicroF1\": 0.6274076413009156, \"MacroF1\": 0.5664829980315041, \"Memory in Mb\": 0.2159862518310547, \"Time in s\": 6.352824 }, { \"step\": 4224, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Insects\", \"Accuracy\": 0.6317783566185177, \"MicroF1\": 0.6317783566185177, \"MacroF1\": 0.5676004628647836, \"Memory in Mb\": 0.216461181640625, \"Time in s\": 10.71472 }, { \"step\": 5280, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Insects\", \"Accuracy\": 0.6277704110627013, \"MicroF1\": 0.6277704110627013, \"MacroF1\": 0.5651907052085646, \"Memory in Mb\": 0.215977668762207, \"Time in s\": 16.153805 }, { \"step\": 6336, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Insects\", \"Accuracy\": 0.6244672454617206, \"MicroF1\": 0.6244672454617206, \"MacroF1\": 0.5642758642399058, \"Memory in Mb\": 0.2164630889892578, \"Time in s\": 22.622426 }, { \"step\": 7392, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Insects\", \"Accuracy\": 0.621160871329996, \"MicroF1\": 0.621160871329996, \"MacroF1\": 0.5621999618118433, \"Memory in Mb\": 0.2159862518310547, \"Time in s\": 30.095944 }, { \"step\": 8448, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Insects\", \"Accuracy\": 0.6183260329110927, \"MicroF1\": 0.6183260329110927, \"MacroF1\": 0.560545956984929, \"Memory in Mb\": 0.2164678573608398, \"Time in s\": 38.53514199999999 }, { \"step\": 9504, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Insects\", \"Accuracy\": 0.6195938124802693, \"MicroF1\": 0.6195938124802693, \"MacroF1\": 0.5612689785887882, \"Memory in Mb\": 0.2159566879272461, \"Time in s\": 47.930055 }, { \"step\": 10560, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Insects\", \"Accuracy\": 0.6209868358746093, \"MicroF1\": 0.6209868358746093, \"MacroF1\": 0.5626902992589761, \"Memory in Mb\": 0.2164936065673828, \"Time in s\": 58.279528 }, { \"step\": 11616, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Insects\", \"Accuracy\": 0.6196297890658631, \"MicroF1\": 0.6196297890658631, \"MacroF1\": 0.5618958864151227, \"Memory in Mb\": 0.215947151184082, \"Time in s\": 69.583507 }, { \"step\": 12672, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Insects\", \"Accuracy\": 0.6235498382132428, \"MicroF1\": 0.6235498382132428, \"MacroF1\": 0.577401509815314, \"Memory in Mb\": 0.2165937423706054, \"Time in s\": 81.841211 }, { \"step\": 13728, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Insects\", \"Accuracy\": 0.6524368033801996, \"MicroF1\": 0.6524368033801996, \"MacroF1\": 0.656066758247117, \"Memory in Mb\": 0.2161521911621093, \"Time in s\": 95.05061 }, { \"step\": 14784, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Insects\", \"Accuracy\": 0.6639383075153893, \"MicroF1\": 0.6639383075153893, \"MacroF1\": 0.6656513873636037, \"Memory in Mb\": 0.2165174484252929, \"Time in s\": 109.208982 }, { \"step\": 15840, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Insects\", \"Accuracy\": 0.6599532798787803, \"MicroF1\": 0.6599532798787803, \"MacroF1\": 0.6660828271082423, \"Memory in Mb\": 0.2159938812255859, \"Time in s\": 124.323768 }, { \"step\": 16896, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Insects\", \"Accuracy\": 0.6583012725658479, \"MicroF1\": 0.6583012725658479, \"MacroF1\": 0.6678320995738946, \"Memory in Mb\": 0.2164812088012695, \"Time in s\": 140.393226 }, { \"step\": 17952, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Insects\", \"Accuracy\": 0.6558966074313409, \"MicroF1\": 0.6558966074313409, \"MacroF1\": 0.6676009154715022, \"Memory in Mb\": 0.2160320281982422, \"Time in s\": 157.416689 }, { \"step\": 19008, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Insects\", \"Accuracy\": 0.6731730415110223, \"MicroF1\": 0.6731730415110223, \"MacroF1\": 0.6774302820037228, \"Memory in Mb\": 0.2165126800537109, \"Time in s\": 175.38966399999998 }, { \"step\": 20064, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Insects\", \"Accuracy\": 0.6798086029008623, \"MicroF1\": 0.6798086029008623, \"MacroF1\": 0.6780616401383449, \"Memory in Mb\": 0.2158927917480468, \"Time in s\": 194.316978 }, { \"step\": 21120, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Insects\", \"Accuracy\": 0.680382593872816, \"MicroF1\": 0.680382593872816, \"MacroF1\": 0.6752117016598617, \"Memory in Mb\": 0.2164011001586914, \"Time in s\": 214.200546 }, { \"step\": 22176, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Insects\", \"Accuracy\": 0.6805862457722661, \"MicroF1\": 0.6805862457722661, \"MacroF1\": 0.6722568877045599, \"Memory in Mb\": 0.2158823013305664, \"Time in s\": 235.037825 }, { \"step\": 23232, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Insects\", \"Accuracy\": 0.6813740260858336, \"MicroF1\": 0.6813740260858336, \"MacroF1\": 0.6702824994179433, \"Memory in Mb\": 0.2163906097412109, \"Time in s\": 256.831775 }, { \"step\": 24288, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Insects\", \"Accuracy\": 0.6815992094536172, \"MicroF1\": 0.6815992094536172, \"MacroF1\": 0.6677450869096582, \"Memory in Mb\": 0.2159175872802734, \"Time in s\": 279.574148 }, { \"step\": 25344, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Insects\", \"Accuracy\": 0.6821212958213313, \"MicroF1\": 0.6821212958213313, \"MacroF1\": 0.6660355323582295, \"Memory in Mb\": 0.2163667678833007, \"Time in s\": 303.273869 }, { \"step\": 26400, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Insects\", \"Accuracy\": 0.6831319368157884, \"MicroF1\": 0.6831319368157884, \"MacroF1\": 0.6646803813034555, \"Memory in Mb\": 0.2159061431884765, \"Time in s\": 327.922368 }, { \"step\": 27456, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Insects\", \"Accuracy\": 0.6836277545073757, \"MicroF1\": 0.6836277545073757, \"MacroF1\": 0.6627124931293528, \"Memory in Mb\": 0.2164020538330078, \"Time in s\": 353.52686 }, { \"step\": 28512, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Insects\", \"Accuracy\": 0.6834905825821612, \"MicroF1\": 0.6834905825821612, \"MacroF1\": 0.664548122616301, \"Memory in Mb\": 0.2161540985107422, \"Time in s\": 380.080866 }, { \"step\": 29568, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Insects\", \"Accuracy\": 0.6814692055331958, \"MicroF1\": 0.6814692055331958, \"MacroF1\": 0.6671975305669872, \"Memory in Mb\": 0.2166757583618164, \"Time in s\": 407.595828 }, { \"step\": 30624, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Insects\", \"Accuracy\": 0.6796525487378767, \"MicroF1\": 0.6796525487378767, \"MacroF1\": 0.669471411791397, \"Memory in Mb\": 0.2161798477172851, \"Time in s\": 436.062936 }, { \"step\": 31680, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Insects\", \"Accuracy\": 0.6779570062186306, \"MicroF1\": 0.6779570062186306, \"MacroF1\": 0.6711290718417154, \"Memory in Mb\": 0.216679573059082, \"Time in s\": 465.4881830000001 }, { \"step\": 32736, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Insects\", \"Accuracy\": 0.6768901787078051, \"MicroF1\": 0.6768901787078051, \"MacroF1\": 0.6727094382078547, \"Memory in Mb\": 0.2161359786987304, \"Time in s\": 495.86473400000006 }, { \"step\": 33792, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Insects\", \"Accuracy\": 0.6734337545500281, \"MicroF1\": 0.6734337545500281, \"MacroF1\": 0.6702378074852682, \"Memory in Mb\": 0.2164754867553711, \"Time in s\": 527.2018280000001 }, { \"step\": 34848, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Insects\", \"Accuracy\": 0.6690676385341636, \"MicroF1\": 0.6690676385341636, \"MacroF1\": 0.6661382581729155, \"Memory in Mb\": 0.215947151184082, \"Time in s\": 559.4897450000001 }, { \"step\": 35904, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Insects\", \"Accuracy\": 0.6663510013090828, \"MicroF1\": 0.6663510013090828, \"MacroF1\": 0.6633778558128317, \"Memory in Mb\": 0.2165002822875976, \"Time in s\": 592.7366190000001 }, { \"step\": 36960, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Insects\", \"Accuracy\": 0.662409697232068, \"MicroF1\": 0.662409697232068, \"MacroF1\": 0.6597878724618786, \"Memory in Mb\": 0.215972900390625, \"Time in s\": 626.9366260000002 }, { \"step\": 38016, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Insects\", \"Accuracy\": 0.6594239116138366, \"MicroF1\": 0.6594239116138366, \"MacroF1\": 0.6567102170776443, \"Memory in Mb\": 0.2164802551269531, \"Time in s\": 662.1000680000002 }, { \"step\": 39072, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Insects\", \"Accuracy\": 0.662409459701569, \"MicroF1\": 0.662409459701569, \"MacroF1\": 0.6591983036871739, \"Memory in Mb\": 0.2159795761108398, \"Time in s\": 698.2204010000002 }, { \"step\": 40128, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Insects\", \"Accuracy\": 0.6615495800832357, \"MicroF1\": 0.6615495800832357, \"MacroF1\": 0.658372148729009, \"Memory in Mb\": 0.2165098190307617, \"Time in s\": 735.3021140000002 }, { \"step\": 41184, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Insects\", \"Accuracy\": 0.6616079450258602, \"MicroF1\": 0.6616079450258602, \"MacroF1\": 0.6583203582230679, \"Memory in Mb\": 0.2160120010375976, \"Time in s\": 773.3352610000002 }, { \"step\": 42240, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Insects\", \"Accuracy\": 0.6620895381045953, \"MicroF1\": 0.6620895381045953, \"MacroF1\": 0.6586855795305535, \"Memory in Mb\": 0.216496467590332, \"Time in s\": 812.3294790000002 }, { \"step\": 43296, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Insects\", \"Accuracy\": 0.6626862224275321, \"MicroF1\": 0.6626862224275321, \"MacroF1\": 0.6591267371039767, \"Memory in Mb\": 0.216012954711914, \"Time in s\": 852.2739140000002 }, { \"step\": 44352, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Insects\", \"Accuracy\": 0.6625104281752384, \"MicroF1\": 0.6625104281752384, \"MacroF1\": 0.6587853710847982, \"Memory in Mb\": 0.2164974212646484, \"Time in s\": 893.1794370000002 }, { \"step\": 45408, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Insects\", \"Accuracy\": 0.6629374325544519, \"MicroF1\": 0.6629374325544519, \"MacroF1\": 0.6587077344895959, \"Memory in Mb\": 0.2159938812255859, \"Time in s\": 935.0370030000004 }, { \"step\": 46464, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Insects\", \"Accuracy\": 0.6634311172330671, \"MicroF1\": 0.6634311172330671, \"MacroF1\": 0.6587873315408634, \"Memory in Mb\": 0.2165002822875976, \"Time in s\": 977.8531990000002 }, { \"step\": 47520, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Insects\", \"Accuracy\": 0.666217723436941, \"MicroF1\": 0.666217723436941, \"MacroF1\": 0.6621071051846, \"Memory in Mb\": 0.2159204483032226, \"Time in s\": 1021.6188110000004 }, { \"step\": 48576, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Insects\", \"Accuracy\": 0.6698507462686567, \"MicroF1\": 0.6698507462686567, \"MacroF1\": 0.6663907774790556, \"Memory in Mb\": 0.2164478302001953, \"Time in s\": 1066.3421490000003 }, { \"step\": 49632, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Insects\", \"Accuracy\": 0.6739940762829683, \"MicroF1\": 0.6739940762829683, \"MacroF1\": 0.6709516060662618, \"Memory in Mb\": 0.2159433364868164, \"Time in s\": 1112.0155100000002 }, { \"step\": 50688, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Insects\", \"Accuracy\": 0.6774715410262987, \"MicroF1\": 0.6774715410262987, \"MacroF1\": 0.6745572423992897, \"Memory in Mb\": 0.2164840698242187, \"Time in s\": 1158.6520200000002 }, { \"step\": 51744, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Insects\", \"Accuracy\": 0.6814834856888855, \"MicroF1\": 0.6814834856888855, \"MacroF1\": 0.6786206144243011, \"Memory in Mb\": 0.2159938812255859, \"Time in s\": 1206.2391940000002 }, { \"step\": 52800, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Insects\", \"Accuracy\": 0.6865470936949564, \"MicroF1\": 0.6865470936949564, \"MacroF1\": 0.6836613373539585, \"Memory in Mb\": 0.2166557312011718, \"Time in s\": 1254.784849 }, { \"step\": 408, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9828009828009828, \"MicroF1\": 0.9828009828009828, \"MacroF1\": 0.6067632850241546, \"Memory in Mb\": 0.2092580795288086, \"Time in s\": 0.626636 }, { \"step\": 816, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9828220858895704, \"MicroF1\": 0.9828220858895704, \"MacroF1\": 0.9550926410288756, \"Memory in Mb\": 0.2098121643066406, \"Time in s\": 1.943188 }, { \"step\": 1224, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9852820932134096, \"MicroF1\": 0.9852820932134096, \"MacroF1\": 0.9672695079711996, \"Memory in Mb\": 0.2093591690063476, \"Time in s\": 3.654696 }, { \"step\": 1632, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9840588595953402, \"MicroF1\": 0.9840588595953402, \"MacroF1\": 0.9604409213604836, \"Memory in Mb\": 0.2098979949951172, \"Time in s\": 5.778702 }, { \"step\": 2040, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.984796468857283, \"MicroF1\": 0.984796468857283, \"MacroF1\": 0.9791423790442798, \"Memory in Mb\": 0.2104520797729492, \"Time in s\": 8.327232 }, { \"step\": 2448, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9861054352268084, \"MicroF1\": 0.9861054352268084, \"MacroF1\": 0.9837809767868474, \"Memory in Mb\": 0.2099990844726562, \"Time in s\": 11.304767 }, { \"step\": 2856, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9859894921190894, \"MicroF1\": 0.9859894921190894, \"MacroF1\": 0.9813641447908844, \"Memory in Mb\": 0.2105531692504882, \"Time in s\": 14.706237 }, { \"step\": 3264, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9871284094391664, \"MicroF1\": 0.9871284094391664, \"MacroF1\": 0.9868437405314092, \"Memory in Mb\": 0.2106037139892578, \"Time in s\": 18.525325 }, { \"step\": 3672, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9880141650776356, \"MicroF1\": 0.9880141650776356, \"MacroF1\": 0.9878382173613446, \"Memory in Mb\": 0.2101507186889648, \"Time in s\": 22.76123 }, { \"step\": 4080, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9877420936504046, \"MicroF1\": 0.9877420936504046, \"MacroF1\": 0.9857777629944036, \"Memory in Mb\": 0.2107048034667968, \"Time in s\": 27.408752 }, { \"step\": 4488, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9881880989525296, \"MicroF1\": 0.9881880989525296, \"MacroF1\": 0.9878235870948694, \"Memory in Mb\": 0.2102518081665039, \"Time in s\": 32.463238 }, { \"step\": 4896, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9885597548518896, \"MicroF1\": 0.9885597548518896, \"MacroF1\": 0.9882962361329112, \"Memory in Mb\": 0.2103023529052734, \"Time in s\": 37.912156 }, { \"step\": 5304, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9867999245709976, \"MicroF1\": 0.9867999245709976, \"MacroF1\": 0.9836140972543967, \"Memory in Mb\": 0.210906982421875, \"Time in s\": 43.748231 }, { \"step\": 5712, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9873927508317284, \"MicroF1\": 0.9873927508317284, \"MacroF1\": 0.9875632488318824, \"Memory in Mb\": 0.210453987121582, \"Time in s\": 49.983611 }, { \"step\": 6120, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9872528190880864, \"MicroF1\": 0.9872528190880864, \"MacroF1\": 0.986679154193125, \"Memory in Mb\": 0.211008071899414, \"Time in s\": 56.586139 }, { \"step\": 6528, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.987130381492263, \"MicroF1\": 0.987130381492263, \"MacroF1\": 0.9866769113371192, \"Memory in Mb\": 0.2110586166381836, \"Time in s\": 63.546493 }, { \"step\": 6936, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9875991348233598, \"MicroF1\": 0.9875991348233598, \"MacroF1\": 0.9877805463370743, \"Memory in Mb\": 0.2106056213378906, \"Time in s\": 70.86485 }, { \"step\": 7344, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.986926324390576, \"MicroF1\": 0.986926324390576, \"MacroF1\": 0.9861386596476128, \"Memory in Mb\": 0.2126245498657226, \"Time in s\": 78.54037100000001 }, { \"step\": 7752, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9865823764675524, \"MicroF1\": 0.9865823764675524, \"MacroF1\": 0.986151116916088, \"Memory in Mb\": 0.2121715545654297, \"Time in s\": 86.582909 }, { \"step\": 8160, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9861502635126854, \"MicroF1\": 0.9861502635126854, \"MacroF1\": 0.9857089041873668, \"Memory in Mb\": 0.2122220993041992, \"Time in s\": 94.988236 }, { \"step\": 8568, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9863429438543247, \"MicroF1\": 0.9863429438543247, \"MacroF1\": 0.986382977302644, \"Memory in Mb\": 0.2127761840820312, \"Time in s\": 103.754213 }, { \"step\": 8976, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9865181058495822, \"MicroF1\": 0.9865181058495822, \"MacroF1\": 0.9865643235024212, \"Memory in Mb\": 0.2123231887817382, \"Time in s\": 112.879589 }, { \"step\": 9384, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9858254289672812, \"MicroF1\": 0.9858254289672812, \"MacroF1\": 0.9853734936692788, \"Memory in Mb\": 0.2128772735595703, \"Time in s\": 122.365291 }, { \"step\": 9792, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9857011541211316, \"MicroF1\": 0.9857011541211316, \"MacroF1\": 0.9856081881161904, \"Memory in Mb\": 0.2129278182983398, \"Time in s\": 132.212374 }, { \"step\": 10200, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9860770663790568, \"MicroF1\": 0.9860770663790568, \"MacroF1\": 0.9862471716083434, \"Memory in Mb\": 0.2124748229980468, \"Time in s\": 142.422221 }, { \"step\": 10608, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.98538700857924, \"MicroF1\": 0.98538700857924, \"MacroF1\": 0.9850628829106896, \"Memory in Mb\": 0.2130289077758789, \"Time in s\": 152.99283200000002 }, { \"step\": 11016, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9855651384475717, \"MicroF1\": 0.9855651384475717, \"MacroF1\": 0.9856470830770891, \"Memory in Mb\": 0.2125759124755859, \"Time in s\": 163.926329 }, { \"step\": 11424, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9857305436400244, \"MicroF1\": 0.9857305436400244, \"MacroF1\": 0.9858087969497248, \"Memory in Mb\": 0.2126264572143554, \"Time in s\": 175.222393 }, { \"step\": 11832, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9858845406136422, \"MicroF1\": 0.9858845406136422, \"MacroF1\": 0.9859589489459036, \"Memory in Mb\": 0.2131805419921875, \"Time in s\": 186.880683 }, { \"step\": 12240, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9861099763052537, \"MicroF1\": 0.9861099763052537, \"MacroF1\": 0.9862068987479334, \"Memory in Mb\": 0.2127275466918945, \"Time in s\": 198.905241 }, { \"step\": 12648, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.986241796473472, \"MicroF1\": 0.986241796473472, \"MacroF1\": 0.9863073128720756, \"Memory in Mb\": 0.2132816314697265, \"Time in s\": 211.292323 }, { \"step\": 13056, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.985905783224818, \"MicroF1\": 0.985905783224818, \"MacroF1\": 0.9858386074980298, \"Memory in Mb\": 0.2133321762084961, \"Time in s\": 224.041487 }, { \"step\": 13464, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9857386912278095, \"MicroF1\": 0.9857386912278095, \"MacroF1\": 0.985725098817589, \"Memory in Mb\": 0.2128791809082031, \"Time in s\": 237.153453 }, { \"step\": 13872, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.985725614591594, \"MicroF1\": 0.985725614591594, \"MacroF1\": 0.9857526199764752, \"Memory in Mb\": 0.2134332656860351, \"Time in s\": 250.627393 }, { \"step\": 14280, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9845927585965404, \"MicroF1\": 0.9845927585965404, \"MacroF1\": 0.9843691165759658, \"Memory in Mb\": 0.2129802703857422, \"Time in s\": 264.463365 }, { \"step\": 14688, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9848845918158916, \"MicroF1\": 0.9848845918158916, \"MacroF1\": 0.9849709956409892, \"Memory in Mb\": 0.2130308151245117, \"Time in s\": 278.661824 }, { \"step\": 15096, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9851606492215964, \"MicroF1\": 0.9851606492215964, \"MacroF1\": 0.9852374033885688, \"Memory in Mb\": 0.2135848999023437, \"Time in s\": 293.222186 }, { \"step\": 15504, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9843901180416692, \"MicroF1\": 0.9843901180416692, \"MacroF1\": 0.9842921251481088, \"Memory in Mb\": 0.2131319046020507, \"Time in s\": 308.144297 }, { \"step\": 15912, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9840362013701212, \"MicroF1\": 0.9840362013701212, \"MacroF1\": 0.9840127534225096, \"Memory in Mb\": 0.2136859893798828, \"Time in s\": 323.428421 }, { \"step\": 16320, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.984067651204118, \"MicroF1\": 0.984067651204118, \"MacroF1\": 0.98409717640125, \"Memory in Mb\": 0.2137365341186523, \"Time in s\": 339.073895 }, { \"step\": 16728, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9838584324744424, \"MicroF1\": 0.9838584324744424, \"MacroF1\": 0.9838587519327452, \"Memory in Mb\": 0.2132835388183593, \"Time in s\": 355.082258 }, { \"step\": 17136, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9840676976947768, \"MicroF1\": 0.9840676976947768, \"MacroF1\": 0.9841085979018744, \"Memory in Mb\": 0.2138376235961914, \"Time in s\": 371.45447 }, { \"step\": 17544, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9840962207148152, \"MicroF1\": 0.9840962207148152, \"MacroF1\": 0.9841170088782344, \"Memory in Mb\": 0.2133846282958984, \"Time in s\": 388.187765 }, { \"step\": 17952, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9840120327558354, \"MicroF1\": 0.9840120327558354, \"MacroF1\": 0.98402212072501, \"Memory in Mb\": 0.2134351730346679, \"Time in s\": 405.281273 }, { \"step\": 18360, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9842039326760716, \"MicroF1\": 0.9842039326760716, \"MacroF1\": 0.9842275892846344, \"Memory in Mb\": 0.2139892578125, \"Time in s\": 422.735746 }, { \"step\": 18768, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.984280918633772, \"MicroF1\": 0.984280918633772, \"MacroF1\": 0.9842944297848302, \"Memory in Mb\": 0.213536262512207, \"Time in s\": 440.550971 }, { \"step\": 19176, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9843024771838332, \"MicroF1\": 0.9843024771838332, \"MacroF1\": 0.9843104669951572, \"Memory in Mb\": 0.214090347290039, \"Time in s\": 458.72564299999993 }, { \"step\": 19584, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9843742021140786, \"MicroF1\": 0.9843742021140786, \"MacroF1\": 0.9843801024949196, \"Memory in Mb\": 0.2141408920288086, \"Time in s\": 477.2614029999999 }, { \"step\": 19992, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9845430443699664, \"MicroF1\": 0.9845430443699664, \"MacroF1\": 0.984546236206973, \"Memory in Mb\": 0.2136878967285156, \"Time in s\": 496.1581819999999 }, { \"step\": 20400, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.984509044561008, \"MicroF1\": 0.984509044561008, \"MacroF1\": 0.984507607652182, \"Memory in Mb\": 0.2142419815063476, \"Time in s\": 515.4151939999999 }, { \"step\": 46, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.3111111111111111, \"MicroF1\": 0.3111111111111111, \"MacroF1\": 0.2457649726557289, \"Memory in Mb\": 4.137397766113281, \"Time in s\": 1.064188 }, { \"step\": 92, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.4835164835164835, \"MicroF1\": 0.4835164835164835, \"MacroF1\": 0.4934752395581889, \"Memory in Mb\": 4.140613555908203, \"Time in s\": 2.631663 }, { \"step\": 138, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.5328467153284672, \"MicroF1\": 0.5328467153284672, \"MacroF1\": 0.5528821792646677, \"Memory in Mb\": 4.140277862548828, \"Time in s\": 4.836076 }, { \"step\": 184, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.5956284153005464, \"MicroF1\": 0.5956284153005464, \"MacroF1\": 0.614143164890895, \"Memory in Mb\": 4.141227722167969, \"Time in s\": 7.573955 }, { \"step\": 230, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.62882096069869, \"MicroF1\": 0.62882096069869, \"MacroF1\": 0.6441389332893815, \"Memory in Mb\": 3.913887023925781, \"Time in s\": 10.681288 }, { \"step\": 276, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.64, \"MicroF1\": 0.64, \"MacroF1\": 0.6559607038460422, \"Memory in Mb\": 4.028352737426758, \"Time in s\": 14.131211 }, { \"step\": 322, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.6697819314641744, \"MicroF1\": 0.6697819314641744, \"MacroF1\": 0.6706320385346652, \"Memory in Mb\": 4.144774436950684, \"Time in s\": 17.917192 }, { \"step\": 368, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.6948228882833788, \"MicroF1\": 0.6948228882833788, \"MacroF1\": 0.6897433526546474, \"Memory in Mb\": 4.144762992858887, \"Time in s\": 22.052519 }, { \"step\": 414, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.711864406779661, \"MicroF1\": 0.711864406779661, \"MacroF1\": 0.706570530482581, \"Memory in Mb\": 4.148934364318848, \"Time in s\": 26.547524 }, { \"step\": 460, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7145969498910676, \"MicroF1\": 0.7145969498910676, \"MacroF1\": 0.7071122267088653, \"Memory in Mb\": 4.148022651672363, \"Time in s\": 31.388583 }, { \"step\": 506, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7247524752475247, \"MicroF1\": 0.7247524752475247, \"MacroF1\": 0.7147973207987898, \"Memory in Mb\": 4.147336006164551, \"Time in s\": 36.558334 }, { \"step\": 552, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7295825771324864, \"MicroF1\": 0.7295825771324864, \"MacroF1\": 0.7210771168277493, \"Memory in Mb\": 4.147068977355957, \"Time in s\": 42.057708 }, { \"step\": 598, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7336683417085427, \"MicroF1\": 0.7336683417085426, \"MacroF1\": 0.7250288715672424, \"Memory in Mb\": 4.14684009552002, \"Time in s\": 47.89146 }, { \"step\": 644, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7325038880248833, \"MicroF1\": 0.7325038880248833, \"MacroF1\": 0.725892488365903, \"Memory in Mb\": 4.150084495544434, \"Time in s\": 54.057887 }, { \"step\": 690, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.737300435413643, \"MicroF1\": 0.737300435413643, \"MacroF1\": 0.730253637873586, \"Memory in Mb\": 4.149851799011231, \"Time in s\": 60.540489 }, { \"step\": 736, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7387755102040816, \"MicroF1\": 0.7387755102040816, \"MacroF1\": 0.7329631379486717, \"Memory in Mb\": 4.149523735046387, \"Time in s\": 67.34570099999999 }, { \"step\": 782, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7439180537772087, \"MicroF1\": 0.7439180537772088, \"MacroF1\": 0.7387105187530085, \"Memory in Mb\": 4.149043083190918, \"Time in s\": 74.478308 }, { \"step\": 828, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7460701330108828, \"MicroF1\": 0.7460701330108827, \"MacroF1\": 0.7425025596154724, \"Memory in Mb\": 4.1487531661987305, \"Time in s\": 81.934305 }, { \"step\": 874, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7514318442153494, \"MicroF1\": 0.7514318442153494, \"MacroF1\": 0.7467163857842192, \"Memory in Mb\": 4.148730278015137, \"Time in s\": 89.700464 }, { \"step\": 920, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.750816104461371, \"MicroF1\": 0.750816104461371, \"MacroF1\": 0.7453933609147307, \"Memory in Mb\": 4.148531913757324, \"Time in s\": 97.776444 }, { \"step\": 966, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7512953367875648, \"MicroF1\": 0.7512953367875648, \"MacroF1\": 0.7451117895470661, \"Memory in Mb\": 4.148127555847168, \"Time in s\": 106.157006 }, { \"step\": 1012, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7507418397626113, \"MicroF1\": 0.7507418397626113, \"MacroF1\": 0.7449630804815479, \"Memory in Mb\": 4.147826194763184, \"Time in s\": 114.848603 }, { \"step\": 1058, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7511825922421949, \"MicroF1\": 0.7511825922421949, \"MacroF1\": 0.7446315489945474, \"Memory in Mb\": 4.149008750915527, \"Time in s\": 123.845956 }, { \"step\": 1104, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7533998186763372, \"MicroF1\": 0.7533998186763373, \"MacroF1\": 0.7466082689908061, \"Memory in Mb\": 4.149382591247559, \"Time in s\": 133.146638 }, { \"step\": 1150, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7563098346388164, \"MicroF1\": 0.7563098346388164, \"MacroF1\": 0.7491651771194965, \"Memory in Mb\": 4.148917198181152, \"Time in s\": 142.738156 }, { \"step\": 1196, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7589958158995815, \"MicroF1\": 0.7589958158995815, \"MacroF1\": 0.7526420027035882, \"Memory in Mb\": 4.148730278015137, \"Time in s\": 152.636035 }, { \"step\": 1242, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.75825946817083, \"MicroF1\": 0.7582594681708301, \"MacroF1\": 0.7524016178277559, \"Memory in Mb\": 4.148566246032715, \"Time in s\": 162.845279 }, { \"step\": 1288, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7637917637917638, \"MicroF1\": 0.7637917637917638, \"MacroF1\": 0.75666252908711, \"Memory in Mb\": 4.14877986907959, \"Time in s\": 173.368823 }, { \"step\": 1334, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7636909227306826, \"MicroF1\": 0.7636909227306825, \"MacroF1\": 0.7569484848610158, \"Memory in Mb\": 4.148688316345215, \"Time in s\": 184.200835 }, { \"step\": 1380, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7650471356055112, \"MicroF1\": 0.7650471356055112, \"MacroF1\": 0.7590436403579585, \"Memory in Mb\": 4.1487226486206055, \"Time in s\": 195.341139 }, { \"step\": 1426, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.767719298245614, \"MicroF1\": 0.767719298245614, \"MacroF1\": 0.7612112896959209, \"Memory in Mb\": 4.148562431335449, \"Time in s\": 206.790743 }, { \"step\": 1472, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7722637661454793, \"MicroF1\": 0.7722637661454793, \"MacroF1\": 0.7640566966433581, \"Memory in Mb\": 4.148623466491699, \"Time in s\": 218.546701 }, { \"step\": 1518, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7732366512854317, \"MicroF1\": 0.7732366512854317, \"MacroF1\": 0.7642341334147652, \"Memory in Mb\": 4.148673057556152, \"Time in s\": 230.6041 }, { \"step\": 1564, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7735124760076776, \"MicroF1\": 0.7735124760076776, \"MacroF1\": 0.7653316001442942, \"Memory in Mb\": 4.148703575134277, \"Time in s\": 242.961148 }, { \"step\": 1610, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7737725295214419, \"MicroF1\": 0.7737725295214419, \"MacroF1\": 0.7647353044337892, \"Memory in Mb\": 4.148566246032715, \"Time in s\": 255.638001 }, { \"step\": 1656, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7734138972809668, \"MicroF1\": 0.7734138972809667, \"MacroF1\": 0.7645730180903106, \"Memory in Mb\": 4.148055076599121, \"Time in s\": 268.628995 }, { \"step\": 1702, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7724867724867724, \"MicroF1\": 0.7724867724867724, \"MacroF1\": 0.7656182355666586, \"Memory in Mb\": 4.148245811462402, \"Time in s\": 281.916269 }, { \"step\": 1748, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7750429307384087, \"MicroF1\": 0.7750429307384087, \"MacroF1\": 0.7677424040514297, \"Memory in Mb\": 4.148360252380371, \"Time in s\": 295.50082 }, { \"step\": 1794, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7763524818739542, \"MicroF1\": 0.7763524818739542, \"MacroF1\": 0.7677176136548695, \"Memory in Mb\": 4.148287773132324, \"Time in s\": 309.399686 }, { \"step\": 1840, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7775965198477434, \"MicroF1\": 0.7775965198477434, \"MacroF1\": 0.7691578918725354, \"Memory in Mb\": 4.147894859313965, \"Time in s\": 323.61456 }, { \"step\": 1886, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7761273209549071, \"MicroF1\": 0.7761273209549071, \"MacroF1\": 0.7681560201617949, \"Memory in Mb\": 4.147856712341309, \"Time in s\": 338.130858 }, { \"step\": 1932, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7762817193164163, \"MicroF1\": 0.7762817193164163, \"MacroF1\": 0.7674170460709654, \"Memory in Mb\": 4.147791862487793, \"Time in s\": 352.957512 }, { \"step\": 1978, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7769347496206374, \"MicroF1\": 0.7769347496206374, \"MacroF1\": 0.7672843880004774, \"Memory in Mb\": 4.147627830505371, \"Time in s\": 368.093168 }, { \"step\": 2024, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7790410281759763, \"MicroF1\": 0.7790410281759763, \"MacroF1\": 0.7681802739952505, \"Memory in Mb\": 4.147582054138184, \"Time in s\": 383.545184 }, { \"step\": 2070, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.778153697438376, \"MicroF1\": 0.7781536974383759, \"MacroF1\": 0.7675304391667319, \"Memory in Mb\": 4.147578239440918, \"Time in s\": 399.300197 }, { \"step\": 2116, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7787234042553192, \"MicroF1\": 0.778723404255319, \"MacroF1\": 0.7673415220519754, \"Memory in Mb\": 4.147555351257324, \"Time in s\": 415.3667640000001 }, { \"step\": 2162, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7797316057380842, \"MicroF1\": 0.7797316057380842, \"MacroF1\": 0.7679341969633587, \"Memory in Mb\": 4.147627830505371, \"Time in s\": 431.738527 }, { \"step\": 2208, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7816039873130947, \"MicroF1\": 0.7816039873130947, \"MacroF1\": 0.7687944234581563, \"Memory in Mb\": 4.1476240158081055, \"Time in s\": 448.4359490000001 }, { \"step\": 2254, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7785175321793165, \"MicroF1\": 0.7785175321793165, \"MacroF1\": 0.7657018899401807, \"Memory in Mb\": 4.147597312927246, \"Time in s\": 465.434175 }, { \"step\": 2300, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7777294475859069, \"MicroF1\": 0.7777294475859068, \"MacroF1\": 0.7649119672933201, \"Memory in Mb\": 4.14768123626709, \"Time in s\": 482.736348 }, { \"step\": 1056, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.6360189573459716, \"MicroF1\": 0.6360189573459716, \"MacroF1\": 0.5970323052762561, \"Memory in Mb\": 6.54005241394043, \"Time in s\": 11.482596 }, { \"step\": 2112, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.62482235907153, \"MicroF1\": 0.62482235907153, \"MacroF1\": 0.5890580890213498, \"Memory in Mb\": 6.540731430053711, \"Time in s\": 32.930503 }, { \"step\": 3168, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.6157246605620461, \"MicroF1\": 0.6157246605620461, \"MacroF1\": 0.5802533923244892, \"Memory in Mb\": 6.541685104370117, \"Time in s\": 64.407359 }, { \"step\": 4224, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.6107032914989344, \"MicroF1\": 0.6107032914989344, \"MacroF1\": 0.574850135712032, \"Memory in Mb\": 6.54176139831543, \"Time in s\": 105.889955 }, { \"step\": 5280, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.614889183557492, \"MicroF1\": 0.614889183557492, \"MacroF1\": 0.5777842549225518, \"Memory in Mb\": 6.542509078979492, \"Time in s\": 157.317574 }, { \"step\": 6336, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.608997632202052, \"MicroF1\": 0.608997632202052, \"MacroF1\": 0.5733157350789627, \"Memory in Mb\": 6.541296005249023, \"Time in s\": 218.706259 }, { \"step\": 7392, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.6057367068055743, \"MicroF1\": 0.6057367068055743, \"MacroF1\": 0.5703382690867538, \"Memory in Mb\": 6.541265487670898, \"Time in s\": 290.118972 }, { \"step\": 8448, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.6069610512608027, \"MicroF1\": 0.6069610512608027, \"MacroF1\": 0.5711427916016896, \"Memory in Mb\": 6.541204452514648, \"Time in s\": 371.535386 }, { \"step\": 9504, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.6039145532989583, \"MicroF1\": 0.6039145532989583, \"MacroF1\": 0.5678102867297488, \"Memory in Mb\": 6.541570663452148, \"Time in s\": 462.975881 }, { \"step\": 10560, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.6034662373330808, \"MicroF1\": 0.6034662373330808, \"MacroF1\": 0.567425153452482, \"Memory in Mb\": 6.541746139526367, \"Time in s\": 564.403412 }, { \"step\": 11616, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.6005165733964701, \"MicroF1\": 0.6005165733964701, \"MacroF1\": 0.5651283239572901, \"Memory in Mb\": 6.541906356811523, \"Time in s\": 675.845474 }, { \"step\": 12672, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.6031883829216321, \"MicroF1\": 0.6031883829216321, \"MacroF1\": 0.5703828979306639, \"Memory in Mb\": 6.542104721069336, \"Time in s\": 797.3461219999999 }, { \"step\": 13728, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.6152108982297662, \"MicroF1\": 0.6152108982297662, \"MacroF1\": 0.5959760515786451, \"Memory in Mb\": 6.026429176330566, \"Time in s\": 928.242385 }, { \"step\": 14784, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.6060339579246432, \"MicroF1\": 0.6060339579246432, \"MacroF1\": 0.5869142505177357, \"Memory in Mb\": 6.546758651733398, \"Time in s\": 1068.594912 }, { \"step\": 15840, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.5713744554580465, \"MicroF1\": 0.5713744554580465, \"MacroF1\": 0.5537658591956377, \"Memory in Mb\": 6.547109603881836, \"Time in s\": 1218.89566 }, { \"step\": 16896, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.545546019532406, \"MicroF1\": 0.545546019532406, \"MacroF1\": 0.5286479939306437, \"Memory in Mb\": 6.431658744812012, \"Time in s\": 1379.1626680000002 }, { \"step\": 17952, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.526767311013314, \"MicroF1\": 0.526767311013314, \"MacroF1\": 0.509587529402725, \"Memory in Mb\": 6.54762077331543, \"Time in s\": 1549.227957 }, { \"step\": 19008, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.517756615983585, \"MicroF1\": 0.517756615983585, \"MacroF1\": 0.4976462434137419, \"Memory in Mb\": 4.743686676025391, \"Time in s\": 1728.135143 }, { \"step\": 20064, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.5296815032647162, \"MicroF1\": 0.5296815032647162, \"MacroF1\": 0.5080882715573688, \"Memory in Mb\": 10.447637557983398, \"Time in s\": 1914.608483 }, { \"step\": 21120, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.539750935176855, \"MicroF1\": 0.539750935176855, \"MacroF1\": 0.5184934777423561, \"Memory in Mb\": 11.000249862670898, \"Time in s\": 2110.852221 }, { \"step\": 22176, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.5468771138669674, \"MicroF1\": 0.5468771138669674, \"MacroF1\": 0.5259709774382829, \"Memory in Mb\": 10.998456954956056, \"Time in s\": 2316.603266 }, { \"step\": 23232, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.5551633593043778, \"MicroF1\": 0.5551633593043778, \"MacroF1\": 0.5340735310276195, \"Memory in Mb\": 12.317106246948242, \"Time in s\": 2531.7014360000003 }, { \"step\": 24288, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.5615761518507844, \"MicroF1\": 0.5615761518507844, \"MacroF1\": 0.5396852076547555, \"Memory in Mb\": 12.966436386108398, \"Time in s\": 2756.048529 }, { \"step\": 25344, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.5679280274632048, \"MicroF1\": 0.5679280274632048, \"MacroF1\": 0.5455634192548012, \"Memory in Mb\": 13.622279167175291, \"Time in s\": 2989.801767 }, { \"step\": 26400, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.5727868479866661, \"MicroF1\": 0.5727868479866661, \"MacroF1\": 0.5496374434570931, \"Memory in Mb\": 13.72577953338623, \"Time in s\": 3232.991513 }, { \"step\": 27456, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.5754143143325442, \"MicroF1\": 0.5754143143325442, \"MacroF1\": 0.5513680135969626, \"Memory in Mb\": 13.724169731140137, \"Time in s\": 3485.670293 }, { \"step\": 28512, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.5772859598049875, \"MicroF1\": 0.5772859598049875, \"MacroF1\": 0.5551350356863173, \"Memory in Mb\": 13.7214994430542, \"Time in s\": 3747.946766 }, { \"step\": 29568, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.577772516657084, \"MicroF1\": 0.577772516657084, \"MacroF1\": 0.559086133229251, \"Memory in Mb\": 13.720248222351074, \"Time in s\": 4020.178573 }, { \"step\": 30624, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.578225516768442, \"MicroF1\": 0.578225516768442, \"MacroF1\": 0.5625516131192055, \"Memory in Mb\": 12.85925006866455, \"Time in s\": 4302.520044 }, { \"step\": 31680, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.5795637488557088, \"MicroF1\": 0.5795637488557088, \"MacroF1\": 0.5663363640160618, \"Memory in Mb\": 12.858540534973145, \"Time in s\": 4594.920222 }, { \"step\": 32736, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.5811211241790133, \"MicroF1\": 0.5811211241790133, \"MacroF1\": 0.5696723582178382, \"Memory in Mb\": 12.857670783996582, \"Time in s\": 4897.257331 }, { \"step\": 33792, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.575804208221124, \"MicroF1\": 0.575804208221124, \"MacroF1\": 0.5647934119551398, \"Memory in Mb\": 13.070902824401855, \"Time in s\": 5209.806865 }, { \"step\": 34848, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.5701495107182828, \"MicroF1\": 0.5701495107182828, \"MacroF1\": 0.559068023359177, \"Memory in Mb\": 13.0708646774292, \"Time in s\": 5532.790806 }, { \"step\": 35904, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.5657744478177311, \"MicroF1\": 0.5657744478177311, \"MacroF1\": 0.5542573482740074, \"Memory in Mb\": 13.072970390319824, \"Time in s\": 5866.228602 }, { \"step\": 36960, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.5611894261208366, \"MicroF1\": 0.5611894261208366, \"MacroF1\": 0.5493152777162592, \"Memory in Mb\": 13.618464469909668, \"Time in s\": 6210.1901960000005 }, { \"step\": 38016, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.558779429172695, \"MicroF1\": 0.558779429172695, \"MacroF1\": 0.5463982360776033, \"Memory in Mb\": 13.620196342468262, \"Time in s\": 6564.708960000001 }, { \"step\": 39072, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.5546825010877633, \"MicroF1\": 0.5546825010877633, \"MacroF1\": 0.5426283860139581, \"Memory in Mb\": 14.406596183776855, \"Time in s\": 6929.528370000001 }, { \"step\": 40128, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.5542153662122761, \"MicroF1\": 0.5542153662122761, \"MacroF1\": 0.5429626632180721, \"Memory in Mb\": 15.257904052734377, \"Time in s\": 7304.280479000001 }, { \"step\": 41184, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.5541364155112547, \"MicroF1\": 0.5541364155112547, \"MacroF1\": 0.5435420562964656, \"Memory in Mb\": 15.358447074890137, \"Time in s\": 7688.491709000001 }, { \"step\": 42240, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.5542981604678141, \"MicroF1\": 0.5542981604678141, \"MacroF1\": 0.5443914000180358, \"Memory in Mb\": 15.357035636901855, \"Time in s\": 8082.149982000001 }, { \"step\": 43296, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.554151749624668, \"MicroF1\": 0.554151749624668, \"MacroF1\": 0.5448486588729108, \"Memory in Mb\": 13.518179893493652, \"Time in s\": 8485.318247000001 }, { \"step\": 44352, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.5536290049829767, \"MicroF1\": 0.5536290049829767, \"MacroF1\": 0.5448029815059025, \"Memory in Mb\": 13.742095947265623, \"Time in s\": 8897.925593000002 }, { \"step\": 45408, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.5541436342414165, \"MicroF1\": 0.5541436342414165, \"MacroF1\": 0.5454957405719211, \"Memory in Mb\": 14.286569595336914, \"Time in s\": 9319.685836000002 }, { \"step\": 46464, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.5553020683124207, \"MicroF1\": 0.5553020683124207, \"MacroF1\": 0.546961663735647, \"Memory in Mb\": 15.266200065612791, \"Time in s\": 9750.326660000002 }, { \"step\": 47520, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.5579662871693428, \"MicroF1\": 0.5579662871693428, \"MacroF1\": 0.5498636684303295, \"Memory in Mb\": 14.323851585388184, \"Time in s\": 10190.236686000002 }, { \"step\": 48576, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.5627586206896552, \"MicroF1\": 0.5627586206896552, \"MacroF1\": 0.5545030394801858, \"Memory in Mb\": 14.950955390930176, \"Time in s\": 10639.519510000002 }, { \"step\": 49632, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.5677701436602124, \"MicroF1\": 0.5677701436602124, \"MacroF1\": 0.5591808574875289, \"Memory in Mb\": 15.350643157958984, \"Time in s\": 11098.085262000002 }, { \"step\": 50688, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.5730463432438297, \"MicroF1\": 0.5730463432438297, \"MacroF1\": 0.5639878919164368, \"Memory in Mb\": 16.015583038330078, \"Time in s\": 11565.627356000005 }, { \"step\": 51744, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.5791894555785324, \"MicroF1\": 0.5791894555785324, \"MacroF1\": 0.5695807960578061, \"Memory in Mb\": 16.33325481414795, \"Time in s\": 12041.828478000005 }, { \"step\": 52800, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.5794238527244834, \"MicroF1\": 0.5794238527244834, \"MacroF1\": 0.5701364277094956, \"Memory in Mb\": 15.444610595703123, \"Time in s\": 12525.893185000004 }, { \"step\": 408, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9828009828009828, \"MicroF1\": 0.9828009828009828, \"MacroF1\": 0.6067632850241546, \"Memory in Mb\": 2.2430334091186523, \"Time in s\": 1.187339 }, { \"step\": 816, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9411042944785276, \"MicroF1\": 0.9411042944785276, \"MacroF1\": 0.7377235942917068, \"Memory in Mb\": 3.19038200378418, \"Time in s\": 4.698555 }, { \"step\": 1224, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8879803761242846, \"MicroF1\": 0.8879803761242846, \"MacroF1\": 0.873420796574987, \"Memory in Mb\": 4.134529113769531, \"Time in s\": 10.466629 }, { \"step\": 1632, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8988350705088902, \"MicroF1\": 0.8988350705088902, \"MacroF1\": 0.8792834531664682, \"Memory in Mb\": 5.086630821228027, \"Time in s\": 18.824542 }, { \"step\": 2040, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8950465914664051, \"MicroF1\": 0.8950465914664051, \"MacroF1\": 0.8828407845486113, \"Memory in Mb\": 6.147420883178711, \"Time in s\": 30.316821 }, { \"step\": 2448, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.856559051900286, \"MicroF1\": 0.856559051900286, \"MacroF1\": 0.8543242501248514, \"Memory in Mb\": 6.513773918151856, \"Time in s\": 45.490026 }, { \"step\": 2856, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8640980735551663, \"MicroF1\": 0.8640980735551663, \"MacroF1\": 0.8525227127090282, \"Memory in Mb\": 7.461577415466309, \"Time in s\": 64.600537 }, { \"step\": 3264, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.855654305853509, \"MicroF1\": 0.855654305853509, \"MacroF1\": 0.8307453339686874, \"Memory in Mb\": 8.407819747924805, \"Time in s\": 88.28106700000001 }, { \"step\": 3672, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8469081994007083, \"MicroF1\": 0.8469081994007084, \"MacroF1\": 0.8445950801753395, \"Memory in Mb\": 9.06855297088623, \"Time in s\": 117.068452 }, { \"step\": 4080, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.839911743074283, \"MicroF1\": 0.839911743074283, \"MacroF1\": 0.8273018519986841, \"Memory in Mb\": 10.241823196411133, \"Time in s\": 151.440392 }, { \"step\": 4488, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8279474036104302, \"MicroF1\": 0.8279474036104302, \"MacroF1\": 0.8381848634946416, \"Memory in Mb\": 11.187081336975098, \"Time in s\": 191.919368 }, { \"step\": 4896, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8294177732379979, \"MicroF1\": 0.8294177732379979, \"MacroF1\": 0.8370944525285466, \"Memory in Mb\": 8.72095775604248, \"Time in s\": 237.683057 }, { \"step\": 5304, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.832736187063926, \"MicroF1\": 0.832736187063926, \"MacroF1\": 0.8304665020850452, \"Memory in Mb\": 9.573864936828612, \"Time in s\": 288.722663 }, { \"step\": 5712, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8254246191560147, \"MicroF1\": 0.8254246191560147, \"MacroF1\": 0.8318293629616008, \"Memory in Mb\": 10.363824844360352, \"Time in s\": 345.486273 }, { \"step\": 6120, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8238274227815002, \"MicroF1\": 0.8238274227815002, \"MacroF1\": 0.8134447828524414, \"Memory in Mb\": 11.40614128112793, \"Time in s\": 408.45138 }, { \"step\": 6528, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8043511567335683, \"MicroF1\": 0.8043511567335683, \"MacroF1\": 0.8054460603633147, \"Memory in Mb\": 12.15697956085205, \"Time in s\": 478.205713 }, { \"step\": 6936, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8005767844268205, \"MicroF1\": 0.8005767844268206, \"MacroF1\": 0.8067791986535922, \"Memory in Mb\": 11.58870792388916, \"Time in s\": 555.142755 }, { \"step\": 7344, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8081165736075173, \"MicroF1\": 0.8081165736075173, \"MacroF1\": 0.8106639227074198, \"Memory in Mb\": 12.00939655303955, \"Time in s\": 638.5654069999999 }, { \"step\": 7752, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8097019739388466, \"MicroF1\": 0.8097019739388466, \"MacroF1\": 0.8127585051729247, \"Memory in Mb\": 13.156713485717772, \"Time in s\": 728.9317779999999 }, { \"step\": 8160, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8134575315602403, \"MicroF1\": 0.8134575315602401, \"MacroF1\": 0.8148392057777913, \"Memory in Mb\": 13.88283348083496, \"Time in s\": 826.9412199999999 }, { \"step\": 8568, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.817672464106455, \"MicroF1\": 0.817672464106455, \"MacroF1\": 0.8208026583224199, \"Memory in Mb\": 15.191060066223145, \"Time in s\": 933.116233 }, { \"step\": 8976, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8213927576601672, \"MicroF1\": 0.8213927576601672, \"MacroF1\": 0.8243856825821874, \"Memory in Mb\": 16.25563907623291, \"Time in s\": 1048.180156 }, { \"step\": 9384, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8219119684535863, \"MicroF1\": 0.8219119684535864, \"MacroF1\": 0.8243183344026902, \"Memory in Mb\": 17.071918487548828, \"Time in s\": 1172.5966959999998 }, { \"step\": 9792, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8224900418751915, \"MicroF1\": 0.8224900418751915, \"MacroF1\": 0.8248306232761192, \"Memory in Mb\": 18.233381271362305, \"Time in s\": 1306.7477949999998 }, { \"step\": 10200, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.819197960584371, \"MicroF1\": 0.819197960584371, \"MacroF1\": 0.8170259665463304, \"Memory in Mb\": 19.36789894104004, \"Time in s\": 1451.4292829999995 }, { \"step\": 10608, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.805788630149901, \"MicroF1\": 0.8057886301499011, \"MacroF1\": 0.8022367569175978, \"Memory in Mb\": 20.506345748901367, \"Time in s\": 1607.1296199999997 }, { \"step\": 11016, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.802088061733999, \"MicroF1\": 0.8020880617339992, \"MacroF1\": 0.8038074645550285, \"Memory in Mb\": 19.16464138031006, \"Time in s\": 1773.9352609999996 }, { \"step\": 11424, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8018909218243894, \"MicroF1\": 0.8018909218243894, \"MacroF1\": 0.8005729972530424, \"Memory in Mb\": 19.133995056152344, \"Time in s\": 1951.246828 }, { \"step\": 11832, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8002704758684811, \"MicroF1\": 0.800270475868481, \"MacroF1\": 0.8004166941842216, \"Memory in Mb\": 20.079543113708496, \"Time in s\": 2139.48669 }, { \"step\": 12240, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8035787237519405, \"MicroF1\": 0.8035787237519405, \"MacroF1\": 0.8060123607032721, \"Memory in Mb\": 17.827110290527344, \"Time in s\": 2338.9088759999995 }, { \"step\": 12648, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8088084130623864, \"MicroF1\": 0.8088084130623864, \"MacroF1\": 0.8108606005777994, \"Memory in Mb\": 15.629319190979004, \"Time in s\": 2547.8446989999998 }, { \"step\": 13056, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8079662964381463, \"MicroF1\": 0.8079662964381463, \"MacroF1\": 0.8077709771623751, \"Memory in Mb\": 16.39698314666748, \"Time in s\": 2766.5130659999995 }, { \"step\": 13464, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8068038327267325, \"MicroF1\": 0.8068038327267325, \"MacroF1\": 0.807905549135964, \"Memory in Mb\": 17.24907112121582, \"Time in s\": 2995.4784789999994 }, { \"step\": 13872, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.810107418354841, \"MicroF1\": 0.810107418354841, \"MacroF1\": 0.8115061911206084, \"Memory in Mb\": 18.023069381713867, \"Time in s\": 3234.9291429999994 }, { \"step\": 14280, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.813432313187198, \"MicroF1\": 0.813432313187198, \"MacroF1\": 0.814709519180665, \"Memory in Mb\": 19.25601577758789, \"Time in s\": 3485.414837 }, { \"step\": 14688, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8169810036086335, \"MicroF1\": 0.8169810036086335, \"MacroF1\": 0.8183348126971706, \"Memory in Mb\": 19.79203414916992, \"Time in s\": 3747.476331 }, { \"step\": 15096, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8210665783371978, \"MicroF1\": 0.8210665783371978, \"MacroF1\": 0.8224533109934684, \"Memory in Mb\": 20.76410961151123, \"Time in s\": 4021.7266099999993 }, { \"step\": 15504, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8227439850351544, \"MicroF1\": 0.8227439850351544, \"MacroF1\": 0.8236860076332361, \"Memory in Mb\": 21.70703220367432, \"Time in s\": 4308.908110999999 }, { \"step\": 15912, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8177361573754006, \"MicroF1\": 0.8177361573754006, \"MacroF1\": 0.8170714187961161, \"Memory in Mb\": 22.97433376312256, \"Time in s\": 4609.388637999999 }, { \"step\": 16320, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8135915190881794, \"MicroF1\": 0.8135915190881794, \"MacroF1\": 0.8136474897036394, \"Memory in Mb\": 23.700613975524902, \"Time in s\": 4923.859395999999 }, { \"step\": 16728, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8133556525378132, \"MicroF1\": 0.8133556525378132, \"MacroF1\": 0.8142218072403056, \"Memory in Mb\": 24.764389038085938, \"Time in s\": 5252.522612999999 }, { \"step\": 17136, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8092792529909542, \"MicroF1\": 0.8092792529909542, \"MacroF1\": 0.8090411402278314, \"Memory in Mb\": 26.198601722717285, \"Time in s\": 5595.909021 }, { \"step\": 17544, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8062475061278003, \"MicroF1\": 0.8062475061278003, \"MacroF1\": 0.8065701979489333, \"Memory in Mb\": 25.60452175140381, \"Time in s\": 5954.543495 }, { \"step\": 17952, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8078101498523759, \"MicroF1\": 0.8078101498523759, \"MacroF1\": 0.8084559739072698, \"Memory in Mb\": 26.552149772644043, \"Time in s\": 6328.536349 }, { \"step\": 18360, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8103927229151915, \"MicroF1\": 0.8103927229151915, \"MacroF1\": 0.8111272646261444, \"Memory in Mb\": 27.50138759613037, \"Time in s\": 6718.347567 }, { \"step\": 18768, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.813022859274258, \"MicroF1\": 0.813022859274258, \"MacroF1\": 0.8138485204649677, \"Memory in Mb\": 28.353083610534668, \"Time in s\": 7124.657721 }, { \"step\": 19176, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8090743155149935, \"MicroF1\": 0.8090743155149935, \"MacroF1\": 0.8093701596568051, \"Memory in Mb\": 29.32584285736084, \"Time in s\": 7547.838048 }, { \"step\": 19584, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8110606137976817, \"MicroF1\": 0.8110606137976817, \"MacroF1\": 0.8116953495842238, \"Memory in Mb\": 30.33370780944824, \"Time in s\": 7988.342591 }, { \"step\": 19992, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8081136511430144, \"MicroF1\": 0.8081136511430144, \"MacroF1\": 0.8084718836746521, \"Memory in Mb\": 31.28043556213379, \"Time in s\": 8446.728501 }, { \"step\": 20400, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8058238148928869, \"MicroF1\": 0.805823814892887, \"MacroF1\": 0.8062504565207905, \"Memory in Mb\": 32.1812219619751, \"Time in s\": 8923.60629 }, { \"step\": 46, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.1111111111111111, \"MicroF1\": 0.1111111111111111, \"MacroF1\": 0.0815018315018315, \"Memory in Mb\": 3.44619369506836, \"Time in s\": 0.804099 }, { \"step\": 92, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.2307692307692307, \"MicroF1\": 0.2307692307692307, \"MacroF1\": 0.2226391771283412, \"Memory in Mb\": 4.129319190979004, \"Time in s\": 2.027411 }, { \"step\": 138, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.4233576642335766, \"MicroF1\": 0.4233576642335766, \"MacroF1\": 0.4463537718619156, \"Memory in Mb\": 4.129193305969238, \"Time in s\": 3.599985 }, { \"step\": 184, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.5355191256830601, \"MicroF1\": 0.5355191256830601, \"MacroF1\": 0.5617062146473911, \"Memory in Mb\": 4.129368782043457, \"Time in s\": 5.452412 }, { \"step\": 230, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.5938864628820961, \"MicroF1\": 0.5938864628820961, \"MacroF1\": 0.6236530662596055, \"Memory in Mb\": 4.12935733795166, \"Time in s\": 7.651963 }, { \"step\": 276, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.6290909090909091, \"MicroF1\": 0.6290909090909091, \"MacroF1\": 0.6558170665459355, \"Memory in Mb\": 4.129300117492676, \"Time in s\": 10.207424 }, { \"step\": 322, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.660436137071651, \"MicroF1\": 0.660436137071651, \"MacroF1\": 0.6785747202615152, \"Memory in Mb\": 4.128628730773926, \"Time in s\": 13.08877 }, { \"step\": 368, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.6920980926430518, \"MicroF1\": 0.6920980926430518, \"MacroF1\": 0.7041680355881775, \"Memory in Mb\": 4.12868595123291, \"Time in s\": 16.291428 }, { \"step\": 414, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7167070217917676, \"MicroF1\": 0.7167070217917676, \"MacroF1\": 0.7259075149442815, \"Memory in Mb\": 4.128170967102051, \"Time in s\": 19.832325 }, { \"step\": 460, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7254901960784313, \"MicroF1\": 0.7254901960784313, \"MacroF1\": 0.732501171084948, \"Memory in Mb\": 4.128491401672363, \"Time in s\": 23.76135 }, { \"step\": 506, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7386138613861386, \"MicroF1\": 0.7386138613861386, \"MacroF1\": 0.7428621938273078, \"Memory in Mb\": 4.128743171691895, \"Time in s\": 28.024553 }, { \"step\": 552, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7422867513611615, \"MicroF1\": 0.7422867513611615, \"MacroF1\": 0.7453719085253248, \"Memory in Mb\": 4.128548622131348, \"Time in s\": 32.646215 }, { \"step\": 598, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7487437185929648, \"MicroF1\": 0.7487437185929648, \"MacroF1\": 0.7504522188790484, \"Memory in Mb\": 4.128659248352051, \"Time in s\": 37.596323 }, { \"step\": 644, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7465007776049767, \"MicroF1\": 0.7465007776049767, \"MacroF1\": 0.7482323503576439, \"Memory in Mb\": 4.128731727600098, \"Time in s\": 42.92857 }, { \"step\": 690, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7489114658925979, \"MicroF1\": 0.748911465892598, \"MacroF1\": 0.7488472102580619, \"Memory in Mb\": 4.128785133361816, \"Time in s\": 48.576103 }, { \"step\": 736, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7523809523809524, \"MicroF1\": 0.7523809523809524, \"MacroF1\": 0.75182837230991, \"Memory in Mb\": 4.1286211013793945, \"Time in s\": 54.551686 }, { \"step\": 782, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7541613316261203, \"MicroF1\": 0.7541613316261204, \"MacroF1\": 0.7531089046321313, \"Memory in Mb\": 4.128552436828613, \"Time in s\": 60.838379 }, { \"step\": 828, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7557436517533253, \"MicroF1\": 0.7557436517533253, \"MacroF1\": 0.7552013614952863, \"Memory in Mb\": 4.128499031066895, \"Time in s\": 67.464563 }, { \"step\": 874, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7617411225658648, \"MicroF1\": 0.7617411225658649, \"MacroF1\": 0.7601066395856337, \"Memory in Mb\": 4.128571510314941, \"Time in s\": 74.378096 }, { \"step\": 920, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.763873775843308, \"MicroF1\": 0.763873775843308, \"MacroF1\": 0.7623480483274478, \"Memory in Mb\": 4.1285905838012695, \"Time in s\": 81.591089 }, { \"step\": 966, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7678756476683938, \"MicroF1\": 0.7678756476683938, \"MacroF1\": 0.7646598072570266, \"Memory in Mb\": 4.128613471984863, \"Time in s\": 89.10581599999999 }, { \"step\": 1012, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7705242334322453, \"MicroF1\": 0.7705242334322453, \"MacroF1\": 0.7668271197983112, \"Memory in Mb\": 4.128720283508301, \"Time in s\": 96.930863 }, { \"step\": 1058, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7757805108798487, \"MicroF1\": 0.7757805108798487, \"MacroF1\": 0.7714920336037776, \"Memory in Mb\": 4.128579139709473, \"Time in s\": 105.051956 }, { \"step\": 1104, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7760652765185857, \"MicroF1\": 0.7760652765185856, \"MacroF1\": 0.7719206139767609, \"Memory in Mb\": 4.128727912902832, \"Time in s\": 113.491292 }, { \"step\": 1150, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7789382071366405, \"MicroF1\": 0.7789382071366405, \"MacroF1\": 0.7750313949659529, \"Memory in Mb\": 4.128632545471191, \"Time in s\": 122.217257 }, { \"step\": 1196, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7849372384937239, \"MicroF1\": 0.7849372384937239, \"MacroF1\": 0.782000389047251, \"Memory in Mb\": 4.128678321838379, \"Time in s\": 131.239072 }, { \"step\": 1242, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7856567284448026, \"MicroF1\": 0.7856567284448026, \"MacroF1\": 0.7827470902102025, \"Memory in Mb\": 4.128628730773926, \"Time in s\": 140.604336 }, { \"step\": 1288, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7894327894327894, \"MicroF1\": 0.7894327894327894, \"MacroF1\": 0.785982924599392, \"Memory in Mb\": 4.128533363342285, \"Time in s\": 150.25346299999998 }, { \"step\": 1334, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7906976744186046, \"MicroF1\": 0.7906976744186046, \"MacroF1\": 0.7876424482584368, \"Memory in Mb\": 4.128628730773926, \"Time in s\": 160.232262 }, { \"step\": 1380, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7933284989122552, \"MicroF1\": 0.7933284989122552, \"MacroF1\": 0.7906471924204203, \"Memory in Mb\": 4.128582954406738, \"Time in s\": 170.496442 }, { \"step\": 1426, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7978947368421052, \"MicroF1\": 0.7978947368421052, \"MacroF1\": 0.7945020166797493, \"Memory in Mb\": 4.128670692443848, \"Time in s\": 181.024488 }, { \"step\": 1472, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8028552005438477, \"MicroF1\": 0.8028552005438477, \"MacroF1\": 0.7982243751921435, \"Memory in Mb\": 4.128663063049316, \"Time in s\": 191.820653 }, { \"step\": 1518, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8035596572181938, \"MicroF1\": 0.8035596572181938, \"MacroF1\": 0.7981876534181911, \"Memory in Mb\": 4.1286821365356445, \"Time in s\": 202.942587 }, { \"step\": 1564, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8035828534868842, \"MicroF1\": 0.8035828534868842, \"MacroF1\": 0.798634974540431, \"Memory in Mb\": 4.128708839416504, \"Time in s\": 214.370001 }, { \"step\": 1610, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8048477315102548, \"MicroF1\": 0.8048477315102549, \"MacroF1\": 0.7997380784882049, \"Memory in Mb\": 4.128571510314941, \"Time in s\": 226.09596 }, { \"step\": 1656, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8066465256797583, \"MicroF1\": 0.8066465256797583, \"MacroF1\": 0.80161945439383, \"Memory in Mb\": 4.128567695617676, \"Time in s\": 238.115396 }, { \"step\": 1702, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8059964726631393, \"MicroF1\": 0.8059964726631393, \"MacroF1\": 0.8024858564723996, \"Memory in Mb\": 4.128705024719238, \"Time in s\": 250.457722 }, { \"step\": 1748, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8070978820835718, \"MicroF1\": 0.8070978820835718, \"MacroF1\": 0.8029124203507954, \"Memory in Mb\": 4.128613471984863, \"Time in s\": 263.107237 }, { \"step\": 1794, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8081427774679308, \"MicroF1\": 0.8081427774679307, \"MacroF1\": 0.8029834045630978, \"Memory in Mb\": 4.12865161895752, \"Time in s\": 276.028168 }, { \"step\": 1840, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8069603045133225, \"MicroF1\": 0.8069603045133223, \"MacroF1\": 0.8019276227162541, \"Memory in Mb\": 4.128785133361816, \"Time in s\": 289.253241 }, { \"step\": 1886, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8053050397877984, \"MicroF1\": 0.8053050397877984, \"MacroF1\": 0.8006727596367826, \"Memory in Mb\": 4.1285905838012695, \"Time in s\": 302.793979 }, { \"step\": 1932, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8047643707923355, \"MicroF1\": 0.8047643707923355, \"MacroF1\": 0.7995493059800364, \"Memory in Mb\": 4.128586769104004, \"Time in s\": 316.637791 }, { \"step\": 1978, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8057663125948407, \"MicroF1\": 0.8057663125948407, \"MacroF1\": 0.8003960406612561, \"Memory in Mb\": 4.12862491607666, \"Time in s\": 330.791782 }, { \"step\": 2024, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8072170044488384, \"MicroF1\": 0.8072170044488384, \"MacroF1\": 0.8005625942078284, \"Memory in Mb\": 4.1286211013793945, \"Time in s\": 345.23827800000004 }, { \"step\": 2070, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8066698888351861, \"MicroF1\": 0.8066698888351861, \"MacroF1\": 0.8002110568368, \"Memory in Mb\": 4.128506660461426, \"Time in s\": 360.0031 }, { \"step\": 2116, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.807565011820331, \"MicroF1\": 0.807565011820331, \"MacroF1\": 0.8005131307885663, \"Memory in Mb\": 4.128533363342285, \"Time in s\": 375.059377 }, { \"step\": 2162, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8079592781119852, \"MicroF1\": 0.8079592781119852, \"MacroF1\": 0.8006755955605838, \"Memory in Mb\": 4.128510475158691, \"Time in s\": 390.39582400000006 }, { \"step\": 2208, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8087902129587675, \"MicroF1\": 0.8087902129587675, \"MacroF1\": 0.8009921695193861, \"Memory in Mb\": 4.128510475158691, \"Time in s\": 405.9976 }, { \"step\": 2254, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8060363959165557, \"MicroF1\": 0.8060363959165557, \"MacroF1\": 0.7987732120640717, \"Memory in Mb\": 4.128533363342285, \"Time in s\": 421.95304 }, { \"step\": 2300, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8051326663766856, \"MicroF1\": 0.8051326663766856, \"MacroF1\": 0.7980778928096751, \"Memory in Mb\": 4.128533363342285, \"Time in s\": 438.2190000000001 }, { \"step\": 1056, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Insects\", \"Accuracy\": 0.6360189573459716, \"MicroF1\": 0.6360189573459716, \"MacroF1\": 0.5992691812827112, \"Memory in Mb\": 6.522543907165527, \"Time in s\": 11.237994 }, { \"step\": 2112, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Insects\", \"Accuracy\": 0.6110847939365229, \"MicroF1\": 0.6110847939365229, \"MacroF1\": 0.5773210074897359, \"Memory in Mb\": 6.522406578063965, \"Time in s\": 32.531305 }, { \"step\": 3168, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Insects\", \"Accuracy\": 0.6043574360593622, \"MicroF1\": 0.6043574360593622, \"MacroF1\": 0.5704368753709179, \"Memory in Mb\": 6.521971702575684, \"Time in s\": 63.87417000000001 }, { \"step\": 4224, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Insects\", \"Accuracy\": 0.6014681506038362, \"MicroF1\": 0.6014681506038362, \"MacroF1\": 0.5676969561642587, \"Memory in Mb\": 6.521697044372559, \"Time in s\": 105.215764 }, { \"step\": 5280, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Insects\", \"Accuracy\": 0.6057965523773442, \"MicroF1\": 0.6057965523773442, \"MacroF1\": 0.5710016183775801, \"Memory in Mb\": 6.521697044372559, \"Time in s\": 156.441097 }, { \"step\": 6336, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Insects\", \"Accuracy\": 0.5966850828729282, \"MicroF1\": 0.5966850828729282, \"MacroF1\": 0.5635903588556204, \"Memory in Mb\": 6.521857261657715, \"Time in s\": 217.680264 }, { \"step\": 7392, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Insects\", \"Accuracy\": 0.5957245298335814, \"MicroF1\": 0.5957245298335814, \"MacroF1\": 0.5625002603439991, \"Memory in Mb\": 6.52231502532959, \"Time in s\": 288.908384 }, { \"step\": 8448, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Insects\", \"Accuracy\": 0.5982005445720374, \"MicroF1\": 0.5982005445720374, \"MacroF1\": 0.5646892369665863, \"Memory in Mb\": 6.522658348083496, \"Time in s\": 370.082682 }, { \"step\": 9504, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Insects\", \"Accuracy\": 0.596337998526781, \"MicroF1\": 0.596337998526781, \"MacroF1\": 0.5627085514562804, \"Memory in Mb\": 6.523001670837402, \"Time in s\": 461.259323 }, { \"step\": 10560, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Insects\", \"Accuracy\": 0.5965527038545316, \"MicroF1\": 0.5965527038545316, \"MacroF1\": 0.5631320282838163, \"Memory in Mb\": 6.523184776306152, \"Time in s\": 562.3727710000001 }, { \"step\": 11616, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Insects\", \"Accuracy\": 0.5953508394317693, \"MicroF1\": 0.5953508394317693, \"MacroF1\": 0.562671447170627, \"Memory in Mb\": 6.523184776306152, \"Time in s\": 673.482974 }, { \"step\": 12672, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Insects\", \"Accuracy\": 0.5979796385447084, \"MicroF1\": 0.5979796385447084, \"MacroF1\": 0.5680559575776837, \"Memory in Mb\": 6.522841453552246, \"Time in s\": 794.598712 }, { \"step\": 13728, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Insects\", \"Accuracy\": 0.610767101333139, \"MicroF1\": 0.610767101333139, \"MacroF1\": 0.5941277335666079, \"Memory in Mb\": 6.522337913513184, \"Time in s\": 925.382156 }, { \"step\": 14784, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Insects\", \"Accuracy\": 0.6019752418318338, \"MicroF1\": 0.6019752418318338, \"MacroF1\": 0.5851264744797858, \"Memory in Mb\": 6.522246360778809, \"Time in s\": 1066.407983 }, { \"step\": 15840, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Insects\", \"Accuracy\": 0.5705536965717533, \"MicroF1\": 0.5705536965717533, \"MacroF1\": 0.5545059657048704, \"Memory in Mb\": 6.522475242614746, \"Time in s\": 1217.74448 }, { \"step\": 16896, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Insects\", \"Accuracy\": 0.548091151228174, \"MicroF1\": 0.548091151228174, \"MacroF1\": 0.5320735507355622, \"Memory in Mb\": 6.522887229919434, \"Time in s\": 1378.917078 }, { \"step\": 17952, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Insects\", \"Accuracy\": 0.5307225224221492, \"MicroF1\": 0.5307225224221492, \"MacroF1\": 0.5138536287616571, \"Memory in Mb\": 6.523138999938965, \"Time in s\": 1549.794627 }, { \"step\": 19008, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Insects\", \"Accuracy\": 0.5182827379386542, \"MicroF1\": 0.5182827379386542, \"MacroF1\": 0.4990809738484312, \"Memory in Mb\": 6.523367881774902, \"Time in s\": 1730.586936 }, { \"step\": 20064, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Insects\", \"Accuracy\": 0.5182176145142801, \"MicroF1\": 0.5182176145142801, \"MacroF1\": 0.497867701567998, \"Memory in Mb\": 8.711265563964844, \"Time in s\": 1921.389763 }, { \"step\": 21120, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Insects\", \"Accuracy\": 0.5272503432927695, \"MicroF1\": 0.5272503432927695, \"MacroF1\": 0.5067114684709674, \"Memory in Mb\": 15.55071258544922, \"Time in s\": 2121.77156 }, { \"step\": 22176, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Insects\", \"Accuracy\": 0.533032694475761, \"MicroF1\": 0.533032694475761, \"MacroF1\": 0.5127471323280748, \"Memory in Mb\": 16.9340763092041, \"Time in s\": 2331.759567 }, { \"step\": 23232, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Insects\", \"Accuracy\": 0.5410442942619775, \"MicroF1\": 0.5410442942619775, \"MacroF1\": 0.5207771198745245, \"Memory in Mb\": 17.15799903869629, \"Time in s\": 2551.0181620000003 }, { \"step\": 24288, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Insects\", \"Accuracy\": 0.5459710956478775, \"MicroF1\": 0.5459710956478775, \"MacroF1\": 0.5251711652768186, \"Memory in Mb\": 17.155046463012695, \"Time in s\": 2778.8449820000005 }, { \"step\": 25344, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Insects\", \"Accuracy\": 0.5532099593576135, \"MicroF1\": 0.5532099593576135, \"MacroF1\": 0.5314216535856217, \"Memory in Mb\": 17.15360450744629, \"Time in s\": 3015.0560080000005 }, { \"step\": 26400, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Insects\", \"Accuracy\": 0.5607788173794462, \"MicroF1\": 0.5607788173794462, \"MacroF1\": 0.5375130024626694, \"Memory in Mb\": 17.265982627868652, \"Time in s\": 3259.2678140000003 }, { \"step\": 27456, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Insects\", \"Accuracy\": 0.5667091604443635, \"MicroF1\": 0.5667091604443635, \"MacroF1\": 0.5418496825562071, \"Memory in Mb\": 17.378803253173828, \"Time in s\": 3511.9238520000004 }, { \"step\": 28512, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Insects\", \"Accuracy\": 0.5692890463329943, \"MicroF1\": 0.5692890463329943, \"MacroF1\": 0.5455529487931667, \"Memory in Mb\": 17.379924774169922, \"Time in s\": 3773.544178 }, { \"step\": 29568, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Insects\", \"Accuracy\": 0.5688436432509216, \"MicroF1\": 0.5688436432509216, \"MacroF1\": 0.5481992899375988, \"Memory in Mb\": 17.380359649658203, \"Time in s\": 4045.250953 }, { \"step\": 30624, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Insects\", \"Accuracy\": 0.5687228553701467, \"MicroF1\": 0.5687228553701467, \"MacroF1\": 0.5505043481720591, \"Memory in Mb\": 17.380290985107422, \"Time in s\": 4327.317909 }, { \"step\": 31680, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Insects\", \"Accuracy\": 0.5691467533697402, \"MicroF1\": 0.5691467533697402, \"MacroF1\": 0.5529220328647554, \"Memory in Mb\": 17.37978744506836, \"Time in s\": 4619.673811000001 }, { \"step\": 32736, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Insects\", \"Accuracy\": 0.5703986558729189, \"MicroF1\": 0.5703986558729189, \"MacroF1\": 0.5556828084411201, \"Memory in Mb\": 17.37948989868164, \"Time in s\": 4922.186926 }, { \"step\": 33792, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Insects\", \"Accuracy\": 0.5650025154626972, \"MicroF1\": 0.5650025154626972, \"MacroF1\": 0.5507695387439543, \"Memory in Mb\": 17.604686737060547, \"Time in s\": 5235.061695 }, { \"step\": 34848, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Insects\", \"Accuracy\": 0.5587568513788849, \"MicroF1\": 0.5587568513788849, \"MacroF1\": 0.5445559443415654, \"Memory in Mb\": 18.05030918121338, \"Time in s\": 5558.17951 }, { \"step\": 35904, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Insects\", \"Accuracy\": 0.554215525165028, \"MicroF1\": 0.554215525165028, \"MacroF1\": 0.5396701176441828, \"Memory in Mb\": 18.150463104248047, \"Time in s\": 5891.808229 }, { \"step\": 36960, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Insects\", \"Accuracy\": 0.5490408290267594, \"MicroF1\": 0.5490408290267594, \"MacroF1\": 0.5342475234810463, \"Memory in Mb\": 19.505443572998047, \"Time in s\": 6235.988455000001 }, { \"step\": 38016, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Insects\", \"Accuracy\": 0.5476522425358411, \"MicroF1\": 0.5476522425358411, \"MacroF1\": 0.5324130893403342, \"Memory in Mb\": 21.324423789978027, \"Time in s\": 6590.662011 }, { \"step\": 39072, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Insects\", \"Accuracy\": 0.5427810908346344, \"MicroF1\": 0.5427810908346344, \"MacroF1\": 0.5280992603544316, \"Memory in Mb\": 22.076537132263184, \"Time in s\": 6955.865912 }, { \"step\": 40128, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Insects\", \"Accuracy\": 0.5417300072270541, \"MicroF1\": 0.5417300072270541, \"MacroF1\": 0.5282649533846114, \"Memory in Mb\": 22.738471031188965, \"Time in s\": 7330.794870000001 }, { \"step\": 41184, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Insects\", \"Accuracy\": 0.5417283830706845, \"MicroF1\": 0.5417283830706845, \"MacroF1\": 0.5295529576867488, \"Memory in Mb\": 23.16494464874268, \"Time in s\": 7714.553666000001 }, { \"step\": 42240, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Insects\", \"Accuracy\": 0.5419635881531286, \"MicroF1\": 0.5419635881531286, \"MacroF1\": 0.5308394560628455, \"Memory in Mb\": 23.606464385986328, \"Time in s\": 8107.062243 }, { \"step\": 43296, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Insects\", \"Accuracy\": 0.5438734264926666, \"MicroF1\": 0.5438734264926666, \"MacroF1\": 0.5334569208328087, \"Memory in Mb\": 23.706249237060547, \"Time in s\": 8507.305821 }, { \"step\": 44352, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Insects\", \"Accuracy\": 0.5453315596040675, \"MicroF1\": 0.5453315596040675, \"MacroF1\": 0.5354029875943346, \"Memory in Mb\": 24.299342155456543, \"Time in s\": 8915.303832 }, { \"step\": 45408, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Insects\", \"Accuracy\": 0.547140308762966, \"MicroF1\": 0.547140308762966, \"MacroF1\": 0.5374156745075451, \"Memory in Mb\": 24.818781852722168, \"Time in s\": 9330.915411 }, { \"step\": 46464, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Insects\", \"Accuracy\": 0.5492327228116997, \"MicroF1\": 0.5492327228116997, \"MacroF1\": 0.5397202270950943, \"Memory in Mb\": 25.030532836914062, \"Time in s\": 9754.309095 }, { \"step\": 47520, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Insects\", \"Accuracy\": 0.5481596834950231, \"MicroF1\": 0.5481596834950231, \"MacroF1\": 0.5387960204161004, \"Memory in Mb\": 25.669864654541016, \"Time in s\": 10186.398449 }, { \"step\": 48576, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Insects\", \"Accuracy\": 0.5455275347400926, \"MicroF1\": 0.5455275347400926, \"MacroF1\": 0.5361266295596548, \"Memory in Mb\": 25.668834686279297, \"Time in s\": 10627.539344 }, { \"step\": 49632, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Insects\", \"Accuracy\": 0.5440752755334367, \"MicroF1\": 0.5440752755334367, \"MacroF1\": 0.534604738581891, \"Memory in Mb\": 26.10423469543457, \"Time in s\": 11077.630938 }, { \"step\": 50688, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Insects\", \"Accuracy\": 0.5484443742971571, \"MicroF1\": 0.5484443742971571, \"MacroF1\": 0.538570218508335, \"Memory in Mb\": 27.224528312683105, \"Time in s\": 11536.66667 }, { \"step\": 51744, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Insects\", \"Accuracy\": 0.5534081904798717, \"MicroF1\": 0.5534081904798717, \"MacroF1\": 0.5429607704191827, \"Memory in Mb\": 28.06478881835937, \"Time in s\": 12004.483564 }, { \"step\": 52800, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Insects\", \"Accuracy\": 0.5540824636830243, \"MicroF1\": 0.5540824636830243, \"MacroF1\": 0.543927330204892, \"Memory in Mb\": 28.290183067321777, \"Time in s\": 12481.284297 }, { \"step\": 408, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9877149877149876, \"MicroF1\": 0.9877149877149876, \"MacroF1\": 0.7696139476961394, \"Memory in Mb\": 2.1705713272094727, \"Time in s\": 1.169245 }, { \"step\": 816, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.988957055214724, \"MicroF1\": 0.988957055214724, \"MacroF1\": 0.9592655637573824, \"Memory in Mb\": 2.994051933288574, \"Time in s\": 4.333642 }, { \"step\": 1224, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9852820932134096, \"MicroF1\": 0.9852820932134096, \"MacroF1\": 0.9482751483180804, \"Memory in Mb\": 4.729727745056152, \"Time in s\": 12.346709 }, { \"step\": 1632, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9822194972409564, \"MicroF1\": 0.9822194972409564, \"MacroF1\": 0.9509896151723368, \"Memory in Mb\": 5.999781608581543, \"Time in s\": 25.091939 }, { \"step\": 2040, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9725355566454144, \"MicroF1\": 0.9725355566454144, \"MacroF1\": 0.928775026512405, \"Memory in Mb\": 7.915155410766602, \"Time in s\": 43.495481 }, { \"step\": 2448, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9550469963220268, \"MicroF1\": 0.9550469963220268, \"MacroF1\": 0.9404929408648164, \"Memory in Mb\": 9.98500156402588, \"Time in s\": 68.010219 }, { \"step\": 2856, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9516637478108582, \"MicroF1\": 0.9516637478108582, \"MacroF1\": 0.9265706247083844, \"Memory in Mb\": 13.281692504882812, \"Time in s\": 95.835033 }, { \"step\": 3264, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9457554397793442, \"MicroF1\": 0.9457554397793442, \"MacroF1\": 0.9273434636455652, \"Memory in Mb\": 16.35391616821289, \"Time in s\": 127.205902 }, { \"step\": 3672, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9417052574230454, \"MicroF1\": 0.9417052574230454, \"MacroF1\": 0.925978466853896, \"Memory in Mb\": 18.91156578063965, \"Time in s\": 163.340984 }, { \"step\": 4080, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9355234126011276, \"MicroF1\": 0.9355234126011276, \"MacroF1\": 0.9181372267911062, \"Memory in Mb\": 22.338744163513184, \"Time in s\": 205.235807 }, { \"step\": 4488, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.931580120347671, \"MicroF1\": 0.931580120347671, \"MacroF1\": 0.9327276252021246, \"Memory in Mb\": 25.31070709228516, \"Time in s\": 252.959286 }, { \"step\": 4896, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9303370786516854, \"MicroF1\": 0.9303370786516854, \"MacroF1\": 0.9257176086775136, \"Memory in Mb\": 28.274658203125, \"Time in s\": 306.688529 }, { \"step\": 5304, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.925325287573072, \"MicroF1\": 0.925325287573072, \"MacroF1\": 0.9165251784293146, \"Memory in Mb\": 32.214202880859375, \"Time in s\": 367.4329340000001 }, { \"step\": 5712, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9226054981614428, \"MicroF1\": 0.9226054981614428, \"MacroF1\": 0.9209111845314156, \"Memory in Mb\": 34.49547863006592, \"Time in s\": 436.878813 }, { \"step\": 6120, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9181238764504004, \"MicroF1\": 0.9181238764504004, \"MacroF1\": 0.9091206319047904, \"Memory in Mb\": 38.86995029449463, \"Time in s\": 513.269664 }, { \"step\": 6528, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9129768653286348, \"MicroF1\": 0.9129768653286348, \"MacroF1\": 0.9114007831703168, \"Memory in Mb\": 42.32428169250488, \"Time in s\": 602.212119 }, { \"step\": 6936, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9114635904830568, \"MicroF1\": 0.9114635904830568, \"MacroF1\": 0.9134311944430068, \"Memory in Mb\": 44.19167232513428, \"Time in s\": 700.8190040000001 }, { \"step\": 7344, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9116165055154568, \"MicroF1\": 0.9116165055154568, \"MacroF1\": 0.9097332482243848, \"Memory in Mb\": 44.84274196624756, \"Time in s\": 807.0835780000001 }, { \"step\": 7752, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9112372597084248, \"MicroF1\": 0.9112372597084248, \"MacroF1\": 0.9111242959524108, \"Memory in Mb\": 46.84857273101807, \"Time in s\": 921.356409 }, { \"step\": 8160, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9094251746537566, \"MicroF1\": 0.9094251746537566, \"MacroF1\": 0.9076128910354778, \"Memory in Mb\": 51.16739654541016, \"Time in s\": 1044.662094 }, { \"step\": 8568, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9066184195167504, \"MicroF1\": 0.9066184195167504, \"MacroF1\": 0.9066450469749988, \"Memory in Mb\": 55.86186981201172, \"Time in s\": 1177.672199 }, { \"step\": 8976, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9056267409470752, \"MicroF1\": 0.9056267409470752, \"MacroF1\": 0.906335380756654, \"Memory in Mb\": 58.41574668884277, \"Time in s\": 1322.047745 }, { \"step\": 9384, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9030160929340296, \"MicroF1\": 0.9030160929340296, \"MacroF1\": 0.9022077684947396, \"Memory in Mb\": 62.26175117492676, \"Time in s\": 1477.298762 }, { \"step\": 9792, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8986824634868757, \"MicroF1\": 0.8986824634868757, \"MacroF1\": 0.8984090939041232, \"Memory in Mb\": 65.20822143554688, \"Time in s\": 1646.0177680000002 }, { \"step\": 10200, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8947936072163938, \"MicroF1\": 0.8947936072163937, \"MacroF1\": 0.8926613887647973, \"Memory in Mb\": 69.96524620056152, \"Time in s\": 1829.707382 }, { \"step\": 10608, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8881870462901857, \"MicroF1\": 0.8881870462901857, \"MacroF1\": 0.8865702773222168, \"Memory in Mb\": 73.60436820983887, \"Time in s\": 2032.787653 }, { \"step\": 11016, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8849750340444847, \"MicroF1\": 0.8849750340444847, \"MacroF1\": 0.8859866133359942, \"Memory in Mb\": 77.43610191345215, \"Time in s\": 2252.324419 }, { \"step\": 11424, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8823426420379935, \"MicroF1\": 0.8823426420379935, \"MacroF1\": 0.8811651142625456, \"Memory in Mb\": 81.9732666015625, \"Time in s\": 2486.561888 }, { \"step\": 11832, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8789620488547037, \"MicroF1\": 0.8789620488547037, \"MacroF1\": 0.8783725809837211, \"Memory in Mb\": 87.50129985809326, \"Time in s\": 2738.033946 }, { \"step\": 12240, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8803823841817142, \"MicroF1\": 0.8803823841817142, \"MacroF1\": 0.8815469015078649, \"Memory in Mb\": 88.71501064300537, \"Time in s\": 3002.626444 }, { \"step\": 12648, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.878231991776706, \"MicroF1\": 0.878231991776706, \"MacroF1\": 0.8774838611192476, \"Memory in Mb\": 93.73236656188963, \"Time in s\": 3282.43662 }, { \"step\": 13056, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8737648410570663, \"MicroF1\": 0.8737648410570663, \"MacroF1\": 0.8731746930338653, \"Memory in Mb\": 98.2464723587036, \"Time in s\": 3580.919978 }, { \"step\": 13464, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.872168164599272, \"MicroF1\": 0.872168164599272, \"MacroF1\": 0.8726091982990895, \"Memory in Mb\": 102.17141056060792, \"Time in s\": 3896.583194 }, { \"step\": 13872, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8693677456564054, \"MicroF1\": 0.8693677456564054, \"MacroF1\": 0.869586320653203, \"Memory in Mb\": 106.91088581085204, \"Time in s\": 4229.831886 }, { \"step\": 14280, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8653967364661391, \"MicroF1\": 0.8653967364661391, \"MacroF1\": 0.8650950015616714, \"Memory in Mb\": 111.61231708526611, \"Time in s\": 4581.249889 }, { \"step\": 14688, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8650507251310683, \"MicroF1\": 0.8650507251310683, \"MacroF1\": 0.8661026270223636, \"Memory in Mb\": 115.34651947021484, \"Time in s\": 4948.063453 }, { \"step\": 15096, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8667770785028155, \"MicroF1\": 0.8667770785028155, \"MacroF1\": 0.8680260482593213, \"Memory in Mb\": 118.24315452575684, \"Time in s\": 5329.445739 }, { \"step\": 15504, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8673805069986454, \"MicroF1\": 0.8673805069986454, \"MacroF1\": 0.8683176015232197, \"Memory in Mb\": 121.64087104797365, \"Time in s\": 5727.804435999999 }, { \"step\": 15912, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8630507196279303, \"MicroF1\": 0.8630507196279303, \"MacroF1\": 0.8630078198630903, \"Memory in Mb\": 126.92564392089844, \"Time in s\": 6152.632774 }, { \"step\": 16320, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8588761566272444, \"MicroF1\": 0.8588761566272444, \"MacroF1\": 0.8591617021179835, \"Memory in Mb\": 132.20891761779785, \"Time in s\": 6602.417036 }, { \"step\": 16728, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8546661086865547, \"MicroF1\": 0.8546661086865547, \"MacroF1\": 0.8551483908890608, \"Memory in Mb\": 137.6782627105713, \"Time in s\": 7072.757626 }, { \"step\": 17136, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.850539830755763, \"MicroF1\": 0.850539830755763, \"MacroF1\": 0.8506967692771638, \"Memory in Mb\": 139.40350437164307, \"Time in s\": 7574.584865 }, { \"step\": 17544, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8456934389785099, \"MicroF1\": 0.8456934389785099, \"MacroF1\": 0.845900693576748, \"Memory in Mb\": 144.32332038879397, \"Time in s\": 8103.947966 }, { \"step\": 17952, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8442983677789538, \"MicroF1\": 0.8442983677789538, \"MacroF1\": 0.8449597117669952, \"Memory in Mb\": 149.55280876159668, \"Time in s\": 8653.396068 }, { \"step\": 18360, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8462879241788769, \"MicroF1\": 0.8462879241788769, \"MacroF1\": 0.8472097835611015, \"Memory in Mb\": 154.20918083190918, \"Time in s\": 9220.958091 }, { \"step\": 18768, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8472851281504769, \"MicroF1\": 0.8472851281504769, \"MacroF1\": 0.8482490326871865, \"Memory in Mb\": 158.50969696044922, \"Time in s\": 9805.946569 }, { \"step\": 19176, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.845632333767927, \"MicroF1\": 0.845632333767927, \"MacroF1\": 0.8464906194356719, \"Memory in Mb\": 163.69990730285645, \"Time in s\": 10414.74471 }, { \"step\": 19584, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8470612265740693, \"MicroF1\": 0.8470612265740693, \"MacroF1\": 0.8481337525939465, \"Memory in Mb\": 167.61861419677734, \"Time in s\": 11041.283472 }, { \"step\": 19992, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8455805112300535, \"MicroF1\": 0.8455805112300535, \"MacroF1\": 0.8467102165017473, \"Memory in Mb\": 172.522611618042, \"Time in s\": 11691.217597 }, { \"step\": 20400, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8424922790332859, \"MicroF1\": 0.8424922790332859, \"MacroF1\": 0.8436347186891262, \"Memory in Mb\": 177.38492488861084, \"Time in s\": 12366.859336 }, { \"step\": 46, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.3111111111111111, \"MicroF1\": 0.3111111111111111, \"MacroF1\": 0.2457649726557289, \"Memory in Mb\": 4.181334495544434, \"Time in s\": 1.091398 }, { \"step\": 92, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.4835164835164835, \"MicroF1\": 0.4835164835164835, \"MacroF1\": 0.4934752395581889, \"Memory in Mb\": 4.184550285339356, \"Time in s\": 2.768304 }, { \"step\": 138, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.5328467153284672, \"MicroF1\": 0.5328467153284672, \"MacroF1\": 0.5528821792646677, \"Memory in Mb\": 4.184275627136231, \"Time in s\": 4.818241 }, { \"step\": 184, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.5956284153005464, \"MicroF1\": 0.5956284153005464, \"MacroF1\": 0.614143164890895, \"Memory in Mb\": 4.184859275817871, \"Time in s\": 7.25269 }, { \"step\": 230, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.62882096069869, \"MicroF1\": 0.62882096069869, \"MacroF1\": 0.6441389332893815, \"Memory in Mb\": 4.184233665466309, \"Time in s\": 10.061851 }, { \"step\": 276, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.64, \"MicroF1\": 0.64, \"MacroF1\": 0.6559607038460422, \"Memory in Mb\": 4.184771537780762, \"Time in s\": 13.254704 }, { \"step\": 322, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.6666666666666666, \"MicroF1\": 0.6666666666666666, \"MacroF1\": 0.6673617488913626, \"Memory in Mb\": 4.184481620788574, \"Time in s\": 16.809438 }, { \"step\": 368, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.6948228882833788, \"MicroF1\": 0.6948228882833788, \"MacroF1\": 0.6911959597548877, \"Memory in Mb\": 4.184699058532715, \"Time in s\": 20.719577 }, { \"step\": 414, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.711864406779661, \"MicroF1\": 0.711864406779661, \"MacroF1\": 0.7079630503641953, \"Memory in Mb\": 4.185038566589356, \"Time in s\": 25.02075 }, { \"step\": 460, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7124183006535948, \"MicroF1\": 0.7124183006535948, \"MacroF1\": 0.7065500352371009, \"Memory in Mb\": 4.184954643249512, \"Time in s\": 29.692393 }, { \"step\": 506, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7207920792079208, \"MicroF1\": 0.7207920792079208, \"MacroF1\": 0.7127593158348896, \"Memory in Mb\": 4.184813499450684, \"Time in s\": 34.691523 }, { \"step\": 552, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7259528130671506, \"MicroF1\": 0.7259528130671506, \"MacroF1\": 0.7192025503807162, \"Memory in Mb\": 4.184779167175293, \"Time in s\": 40.034435 }, { \"step\": 598, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7319932998324958, \"MicroF1\": 0.7319932998324957, \"MacroF1\": 0.7251188986558661, \"Memory in Mb\": 4.185019493103027, \"Time in s\": 45.725681 }, { \"step\": 644, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7309486780715396, \"MicroF1\": 0.7309486780715396, \"MacroF1\": 0.7259740406437202, \"Memory in Mb\": 4.184813499450684, \"Time in s\": 51.77256499999999 }, { \"step\": 690, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7358490566037735, \"MicroF1\": 0.7358490566037735, \"MacroF1\": 0.7304359912942561, \"Memory in Mb\": 4.184943199157715, \"Time in s\": 58.14505499999999 }, { \"step\": 736, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7374149659863946, \"MicroF1\": 0.7374149659863947, \"MacroF1\": 0.7331499347170709, \"Memory in Mb\": 4.185004234313965, \"Time in s\": 64.846683 }, { \"step\": 782, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7426376440460948, \"MicroF1\": 0.7426376440460948, \"MacroF1\": 0.7385597120510639, \"Memory in Mb\": 4.184893608093262, \"Time in s\": 71.895225 }, { \"step\": 828, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7436517533252721, \"MicroF1\": 0.7436517533252721, \"MacroF1\": 0.7412375783772317, \"Memory in Mb\": 4.184882164001465, \"Time in s\": 79.27328899999999 }, { \"step\": 874, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7491408934707904, \"MicroF1\": 0.7491408934707904, \"MacroF1\": 0.7454343548790068, \"Memory in Mb\": 4.185431480407715, \"Time in s\": 86.96748099999999 }, { \"step\": 920, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7486398258977149, \"MicroF1\": 0.7486398258977149, \"MacroF1\": 0.7441307384051415, \"Memory in Mb\": 4.185576438903809, \"Time in s\": 94.98591 }, { \"step\": 966, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7492227979274612, \"MicroF1\": 0.749222797927461, \"MacroF1\": 0.7439306216964365, \"Memory in Mb\": 4.185370445251465, \"Time in s\": 103.316786 }, { \"step\": 1012, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7487636003956478, \"MicroF1\": 0.7487636003956478, \"MacroF1\": 0.7437900284473965, \"Memory in Mb\": 4.185484886169434, \"Time in s\": 111.977749 }, { \"step\": 1058, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.750236518448439, \"MicroF1\": 0.7502365184484389, \"MacroF1\": 0.7448138061687654, \"Memory in Mb\": 4.185519218444824, \"Time in s\": 120.953125 }, { \"step\": 1104, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7524932003626473, \"MicroF1\": 0.7524932003626473, \"MacroF1\": 0.7468314646869902, \"Memory in Mb\": 4.185484886169434, \"Time in s\": 130.25113499999998 }, { \"step\": 1150, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7554395126196692, \"MicroF1\": 0.7554395126196692, \"MacroF1\": 0.7493227137357602, \"Memory in Mb\": 4.185664176940918, \"Time in s\": 139.83544499999996 }, { \"step\": 1196, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7581589958158996, \"MicroF1\": 0.7581589958158996, \"MacroF1\": 0.7527652773681007, \"Memory in Mb\": 4.185568809509277, \"Time in s\": 149.73636899999997 }, { \"step\": 1242, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7574536663980661, \"MicroF1\": 0.7574536663980661, \"MacroF1\": 0.7525915384194215, \"Memory in Mb\": 4.185683250427246, \"Time in s\": 159.94850499999995 }, { \"step\": 1288, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7622377622377622, \"MicroF1\": 0.7622377622377621, \"MacroF1\": 0.7563448085202399, \"Memory in Mb\": 4.185866355895996, \"Time in s\": 170.48539899999994 }, { \"step\": 1334, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7621905476369092, \"MicroF1\": 0.7621905476369092, \"MacroF1\": 0.7566636999776912, \"Memory in Mb\": 4.186026573181152, \"Time in s\": 181.34153899999995 }, { \"step\": 1380, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7635968092820885, \"MicroF1\": 0.7635968092820886, \"MacroF1\": 0.7587252257765656, \"Memory in Mb\": 4.1860761642456055, \"Time in s\": 192.51808799999995 }, { \"step\": 1426, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7663157894736842, \"MicroF1\": 0.7663157894736842, \"MacroF1\": 0.7609139797315135, \"Memory in Mb\": 4.186099052429199, \"Time in s\": 204.01178799999997 }, { \"step\": 1472, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7709041468388851, \"MicroF1\": 0.7709041468388851, \"MacroF1\": 0.763768994920769, \"Memory in Mb\": 4.186240196228027, \"Time in s\": 215.81223399999996 }, { \"step\": 1518, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7719182597231378, \"MicroF1\": 0.7719182597231378, \"MacroF1\": 0.7639714255563932, \"Memory in Mb\": 4.186617851257324, \"Time in s\": 227.92141499999997 }, { \"step\": 1564, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7722328854766475, \"MicroF1\": 0.7722328854766475, \"MacroF1\": 0.765072133508071, \"Memory in Mb\": 4.186800956726074, \"Time in s\": 240.33772499999995 }, { \"step\": 1610, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7725295214418894, \"MicroF1\": 0.7725295214418892, \"MacroF1\": 0.764505787280341, \"Memory in Mb\": 4.186892509460449, \"Time in s\": 253.082857 }, { \"step\": 1656, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7716012084592145, \"MicroF1\": 0.7716012084592145, \"MacroF1\": 0.7634170612719107, \"Memory in Mb\": 4.1867780685424805, \"Time in s\": 266.155857 }, { \"step\": 1702, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7713109935332157, \"MicroF1\": 0.7713109935332157, \"MacroF1\": 0.7652815676598499, \"Memory in Mb\": 4.187075614929199, \"Time in s\": 279.522325 }, { \"step\": 1748, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.77389811104751, \"MicroF1\": 0.77389811104751, \"MacroF1\": 0.7674409436090757, \"Memory in Mb\": 4.187258720397949, \"Time in s\": 293.187056 }, { \"step\": 1794, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7752370329057445, \"MicroF1\": 0.7752370329057446, \"MacroF1\": 0.7674318582149376, \"Memory in Mb\": 4.1872968673706055, \"Time in s\": 307.180551 }, { \"step\": 1840, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7765089722675367, \"MicroF1\": 0.7765089722675368, \"MacroF1\": 0.7688731808749575, \"Memory in Mb\": 4.187228202819824, \"Time in s\": 321.497423 }, { \"step\": 1886, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7750663129973475, \"MicroF1\": 0.7750663129973475, \"MacroF1\": 0.7678921362145585, \"Memory in Mb\": 4.187155723571777, \"Time in s\": 336.11504499999995 }, { \"step\": 1932, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7752459865354738, \"MicroF1\": 0.7752459865354739, \"MacroF1\": 0.7671636716269125, \"Memory in Mb\": 4.187251091003418, \"Time in s\": 351.053915 }, { \"step\": 1978, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7759231158320687, \"MicroF1\": 0.7759231158320687, \"MacroF1\": 0.7670573130332382, \"Memory in Mb\": 4.187151908874512, \"Time in s\": 366.316945 }, { \"step\": 2024, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7775580820563519, \"MicroF1\": 0.7775580820563519, \"MacroF1\": 0.7671264358471986, \"Memory in Mb\": 4.187129020690918, \"Time in s\": 381.896141 }, { \"step\": 2070, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.77670372160464, \"MicroF1\": 0.7767037216046399, \"MacroF1\": 0.7665050383810529, \"Memory in Mb\": 4.187205314636231, \"Time in s\": 397.783945 }, { \"step\": 2116, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7773049645390071, \"MicroF1\": 0.7773049645390071, \"MacroF1\": 0.7663404166149341, \"Memory in Mb\": 4.187205314636231, \"Time in s\": 413.995271 }, { \"step\": 2162, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7783433595557612, \"MicroF1\": 0.7783433595557612, \"MacroF1\": 0.7669657147488861, \"Memory in Mb\": 4.187277793884277, \"Time in s\": 430.524022 }, { \"step\": 2208, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.780244676030811, \"MicroF1\": 0.780244676030811, \"MacroF1\": 0.7678552364681829, \"Memory in Mb\": 4.187273979187012, \"Time in s\": 447.387318 }, { \"step\": 2254, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7776298268974701, \"MicroF1\": 0.7776298268974701, \"MacroF1\": 0.7652407320979201, \"Memory in Mb\": 4.187224388122559, \"Time in s\": 464.558569 }, { \"step\": 2300, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7768595041322314, \"MicroF1\": 0.7768595041322314, \"MacroF1\": 0.7644610611003249, \"Memory in Mb\": 4.18729305267334, \"Time in s\": 482.036228 }, { \"step\": 1056, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.6360189573459716, \"MicroF1\": 0.6360189573459716, \"MacroF1\": 0.5970323052762561, \"Memory in Mb\": 6.583989143371582, \"Time in s\": 11.628861 }, { \"step\": 2112, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.62482235907153, \"MicroF1\": 0.62482235907153, \"MacroF1\": 0.5890580890213498, \"Memory in Mb\": 6.584485054016113, \"Time in s\": 33.423105 }, { \"step\": 3168, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.6157246605620461, \"MicroF1\": 0.6157246605620461, \"MacroF1\": 0.5802533923244892, \"Memory in Mb\": 6.58519458770752, \"Time in s\": 65.495981 }, { \"step\": 4224, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.6107032914989344, \"MicroF1\": 0.6107032914989344, \"MacroF1\": 0.574850135712032, \"Memory in Mb\": 6.585576057434082, \"Time in s\": 107.816588 }, { \"step\": 5280, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.615078613373745, \"MicroF1\": 0.615078613373745, \"MacroF1\": 0.5779184071248228, \"Memory in Mb\": 6.5863847732543945, \"Time in s\": 160.258759 }, { \"step\": 6336, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.6091554853985793, \"MicroF1\": 0.6091554853985793, \"MacroF1\": 0.5734262289926554, \"Memory in Mb\": 6.586209297180176, \"Time in s\": 222.866435 }, { \"step\": 7392, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.6058720064943851, \"MicroF1\": 0.6058720064943851, \"MacroF1\": 0.5704339658550047, \"Memory in Mb\": 6.585629463195801, \"Time in s\": 295.76759400000003 }, { \"step\": 8448, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.6070794364863265, \"MicroF1\": 0.6070794364863265, \"MacroF1\": 0.5712261057542335, \"Memory in Mb\": 6.585507392883301, \"Time in s\": 378.90789100000006 }, { \"step\": 9504, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.6040197832263495, \"MicroF1\": 0.6040197832263495, \"MacroF1\": 0.567883906637128, \"Memory in Mb\": 6.585629463195801, \"Time in s\": 472.31115100000005 }, { \"step\": 10560, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.6035609432711431, \"MicroF1\": 0.6035609432711431, \"MacroF1\": 0.5674913890030829, \"Memory in Mb\": 6.5859880447387695, \"Time in s\": 575.935454 }, { \"step\": 11616, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.6006026689625484, \"MicroF1\": 0.6006026689625484, \"MacroF1\": 0.5651886352361905, \"Memory in Mb\": 6.585965156555176, \"Time in s\": 689.792083 }, { \"step\": 12672, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.6032673032909794, \"MicroF1\": 0.6032673032909794, \"MacroF1\": 0.5704386423232538, \"Memory in Mb\": 6.585919380187988, \"Time in s\": 813.953016 }, { \"step\": 13728, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.6147738034530488, \"MicroF1\": 0.6147738034530488, \"MacroF1\": 0.5955647708468143, \"Memory in Mb\": 6.584591865539551, \"Time in s\": 948.29318 }, { \"step\": 14784, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.6052222147060813, \"MicroF1\": 0.6052222147060813, \"MacroF1\": 0.586323857604342, \"Memory in Mb\": 6.583569526672363, \"Time in s\": 1092.793426 }, { \"step\": 15840, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.570427425973862, \"MicroF1\": 0.570427425973862, \"MacroF1\": 0.5530515395071289, \"Memory in Mb\": 6.584805488586426, \"Time in s\": 1247.37522 }, { \"step\": 16896, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.5441254809115122, \"MicroF1\": 0.5441254809115122, \"MacroF1\": 0.5274626123277456, \"Memory in Mb\": 6.5833024978637695, \"Time in s\": 1412.138663 }, { \"step\": 17952, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.5247061445044844, \"MicroF1\": 0.5247061445044844, \"MacroF1\": 0.5077849244821269, \"Memory in Mb\": 6.58421802520752, \"Time in s\": 1587.005582 }, { \"step\": 19008, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.5143368232756353, \"MicroF1\": 0.5143368232756353, \"MacroF1\": 0.4945891921842289, \"Memory in Mb\": 5.490016937255859, \"Time in s\": 1771.530278 }, { \"step\": 20064, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.5203110202860988, \"MicroF1\": 0.5203110202860988, \"MacroF1\": 0.4996705647403201, \"Memory in Mb\": 13.561949729919434, \"Time in s\": 1966.283082 }, { \"step\": 21120, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.5285288129172783, \"MicroF1\": 0.5285288129172783, \"MacroF1\": 0.5082662721949724, \"Memory in Mb\": 14.33274745941162, \"Time in s\": 2175.200087 }, { \"step\": 22176, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.5345208568207441, \"MicroF1\": 0.5345208568207441, \"MacroF1\": 0.5149076376433322, \"Memory in Mb\": 14.875703811645508, \"Time in s\": 2397.3423569999995 }, { \"step\": 23232, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.5431104989023288, \"MicroF1\": 0.5431104989023288, \"MacroF1\": 0.5234265380967914, \"Memory in Mb\": 14.787662506103516, \"Time in s\": 2632.304625 }, { \"step\": 24288, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.550253221888253, \"MicroF1\": 0.550253221888253, \"MacroF1\": 0.5298759738824472, \"Memory in Mb\": 16.32009983062744, \"Time in s\": 2880.082024 }, { \"step\": 25344, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.5564455668231859, \"MicroF1\": 0.5564455668231859, \"MacroF1\": 0.5355827199778521, \"Memory in Mb\": 16.310463905334473, \"Time in s\": 3141.997661 }, { \"step\": 26400, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.5614985416114247, \"MicroF1\": 0.5614985416114247, \"MacroF1\": 0.5398687013453174, \"Memory in Mb\": 16.303704261779785, \"Time in s\": 3417.905768 }, { \"step\": 27456, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.5647787288289929, \"MicroF1\": 0.5647787288289929, \"MacroF1\": 0.5421799635248432, \"Memory in Mb\": 15.336288452148438, \"Time in s\": 3708.118695 }, { \"step\": 28512, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.5680965241485743, \"MicroF1\": 0.5680965241485743, \"MacroF1\": 0.5473162851674372, \"Memory in Mb\": 13.81827449798584, \"Time in s\": 4011.685358 }, { \"step\": 29568, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.5701626813677411, \"MicroF1\": 0.5701626813677411, \"MacroF1\": 0.5529817475842932, \"Memory in Mb\": 11.429868698120115, \"Time in s\": 4328.316425 }, { \"step\": 30624, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.5724455474643242, \"MicroF1\": 0.5724455474643242, \"MacroF1\": 0.5586057023406553, \"Memory in Mb\": 9.4625883102417, \"Time in s\": 4656.70214 }, { \"step\": 31680, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.5750181508254679, \"MicroF1\": 0.5750181508254679, \"MacroF1\": 0.5636300266647484, \"Memory in Mb\": 9.46137523651123, \"Time in s\": 4996.240557 }, { \"step\": 32736, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.5782190316175347, \"MicroF1\": 0.5782190316175347, \"MacroF1\": 0.5684825891024486, \"Memory in Mb\": 9.4603910446167, \"Time in s\": 5346.796886 }, { \"step\": 33792, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.5756858335059631, \"MicroF1\": 0.5756858335059631, \"MacroF1\": 0.5663669622675245, \"Memory in Mb\": 7.919375419616699, \"Time in s\": 5710.001071 }, { \"step\": 34848, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.5754871294516027, \"MicroF1\": 0.5754871294516027, \"MacroF1\": 0.5660193869557423, \"Memory in Mb\": 7.260138511657715, \"Time in s\": 6084.365793 }, { \"step\": 35904, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.5764142272233518, \"MicroF1\": 0.5764142272233518, \"MacroF1\": 0.5665362650344427, \"Memory in Mb\": 6.60003662109375, \"Time in s\": 6469.590309 }, { \"step\": 36960, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.575908439081144, \"MicroF1\": 0.575908439081144, \"MacroF1\": 0.5657420280651625, \"Memory in Mb\": 6.597938537597656, \"Time in s\": 6865.284174 }, { \"step\": 38016, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.5767723267131396, \"MicroF1\": 0.5767723267131396, \"MacroF1\": 0.5661330182942309, \"Memory in Mb\": 6.597076416015625, \"Time in s\": 7271.517818 }, { \"step\": 39072, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.5764889560031737, \"MicroF1\": 0.5764889560031737, \"MacroF1\": 0.5659501482422926, \"Memory in Mb\": 6.593917846679688, \"Time in s\": 7688.178552 }, { \"step\": 40128, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.5734792035287961, \"MicroF1\": 0.5734792035287961, \"MacroF1\": 0.5636824355748769, \"Memory in Mb\": 8.576746940612793, \"Time in s\": 8115.281794 }, { \"step\": 41184, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.5726634776485443, \"MicroF1\": 0.5726634776485443, \"MacroF1\": 0.563417094879665, \"Memory in Mb\": 8.779536247253418, \"Time in s\": 8552.341759 }, { \"step\": 42240, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.5723383602831507, \"MicroF1\": 0.5723383602831507, \"MacroF1\": 0.5635995837049609, \"Memory in Mb\": 10.21227741241455, \"Time in s\": 8998.890652 }, { \"step\": 43296, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.5718212264695692, \"MicroF1\": 0.5718212264695692, \"MacroF1\": 0.5636175088230181, \"Memory in Mb\": 10.21111011505127, \"Time in s\": 9455.260723 }, { \"step\": 44352, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.571125791977633, \"MicroF1\": 0.571125791977633, \"MacroF1\": 0.5633830644644046, \"Memory in Mb\": 10.209759712219238, \"Time in s\": 9921.572486 }, { \"step\": 45408, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.5712555332878191, \"MicroF1\": 0.5712555332878191, \"MacroF1\": 0.5638292127585011, \"Memory in Mb\": 11.4169340133667, \"Time in s\": 10398.038321 }, { \"step\": 46464, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.5728429072595399, \"MicroF1\": 0.5728429072595399, \"MacroF1\": 0.565898409914518, \"Memory in Mb\": 12.449102401733398, \"Time in s\": 10884.980897 }, { \"step\": 47520, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.5768850354595004, \"MicroF1\": 0.5768850354595004, \"MacroF1\": 0.57039744062367, \"Memory in Mb\": 16.244935989379883, \"Time in s\": 11383.832017 }, { \"step\": 48576, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.5828718476582604, \"MicroF1\": 0.5828718476582604, \"MacroF1\": 0.5764217258661826, \"Memory in Mb\": 15.377230644226074, \"Time in s\": 11896.186788 }, { \"step\": 49632, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.5890471681005823, \"MicroF1\": 0.5890471681005823, \"MacroF1\": 0.5823842044431963, \"Memory in Mb\": 14.950640678405762, \"Time in s\": 12421.939031 }, { \"step\": 50688, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.594728431353207, \"MicroF1\": 0.594728431353207, \"MacroF1\": 0.5876258810149467, \"Memory in Mb\": 12.564711570739746, \"Time in s\": 12959.806662 }, { \"step\": 51744, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.6007382641130201, \"MicroF1\": 0.6007382641130201, \"MacroF1\": 0.5930524375976366, \"Memory in Mb\": 11.987659454345703, \"Time in s\": 13508.895544 }, { \"step\": 52800, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.606053144945927, \"MicroF1\": 0.606053144945927, \"MacroF1\": 0.5982224401760299, \"Memory in Mb\": 3.750063896179199, \"Time in s\": 14067.159334999998 }, { \"step\": 408, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9828009828009828, \"MicroF1\": 0.9828009828009828, \"MacroF1\": 0.6067632850241546, \"Memory in Mb\": 2.2869701385498047, \"Time in s\": 1.329477 }, { \"step\": 816, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9411042944785276, \"MicroF1\": 0.9411042944785276, \"MacroF1\": 0.7377235942917068, \"Memory in Mb\": 3.233952522277832, \"Time in s\": 4.406974 }, { \"step\": 1224, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8879803761242846, \"MicroF1\": 0.8879803761242846, \"MacroF1\": 0.873420796574987, \"Memory in Mb\": 4.178282737731934, \"Time in s\": 9.866805 }, { \"step\": 1632, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8988350705088902, \"MicroF1\": 0.8988350705088902, \"MacroF1\": 0.8792834531664682, \"Memory in Mb\": 5.13068962097168, \"Time in s\": 18.093903 }, { \"step\": 2040, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8950465914664051, \"MicroF1\": 0.8950465914664051, \"MacroF1\": 0.8828407845486113, \"Memory in Mb\": 6.191357612609863, \"Time in s\": 29.527087 }, { \"step\": 2448, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8561503882304863, \"MicroF1\": 0.8561503882304863, \"MacroF1\": 0.8521381720173345, \"Memory in Mb\": 7.13577938079834, \"Time in s\": 44.765436 }, { \"step\": 2856, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8623467600700525, \"MicroF1\": 0.8623467600700525, \"MacroF1\": 0.8461129037988256, \"Memory in Mb\": 8.082098007202148, \"Time in s\": 64.393699 }, { \"step\": 3264, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8528961078761875, \"MicroF1\": 0.8528961078761875, \"MacroF1\": 0.828204357625989, \"Memory in Mb\": 9.02734088897705, \"Time in s\": 89.05171 }, { \"step\": 3672, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8428221193135386, \"MicroF1\": 0.8428221193135386, \"MacroF1\": 0.8381978174360706, \"Memory in Mb\": 9.972960472106934, \"Time in s\": 119.28991 }, { \"step\": 4080, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8350085805344447, \"MicroF1\": 0.8350085805344447, \"MacroF1\": 0.8208915725311207, \"Memory in Mb\": 11.171079635620115, \"Time in s\": 155.799996 }, { \"step\": 4488, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.822821484287943, \"MicroF1\": 0.822821484287943, \"MacroF1\": 0.832874806475175, \"Memory in Mb\": 12.116948127746582, \"Time in s\": 198.957143 }, { \"step\": 4896, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8212461695607763, \"MicroF1\": 0.8212461695607763, \"MacroF1\": 0.8275900848879882, \"Memory in Mb\": 13.061814308166504, \"Time in s\": 249.493693 }, { \"step\": 5304, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8178389590797661, \"MicroF1\": 0.8178389590797661, \"MacroF1\": 0.8022229037941512, \"Memory in Mb\": 14.007365226745604, \"Time in s\": 307.913741 }, { \"step\": 5712, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7974085098931886, \"MicroF1\": 0.7974085098931886, \"MacroF1\": 0.8005324816804641, \"Memory in Mb\": 14.95396614074707, \"Time in s\": 374.813654 }, { \"step\": 6120, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7947377022389279, \"MicroF1\": 0.7947377022389279, \"MacroF1\": 0.7763699164747573, \"Memory in Mb\": 15.899590492248535, \"Time in s\": 450.788506 }, { \"step\": 6528, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7695725448138502, \"MicroF1\": 0.7695725448138502, \"MacroF1\": 0.7646092489325799, \"Memory in Mb\": 16.84642791748047, \"Time in s\": 536.475639 }, { \"step\": 6936, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7614996395097332, \"MicroF1\": 0.7614996395097332, \"MacroF1\": 0.7633186803137438, \"Memory in Mb\": 17.791536331176758, \"Time in s\": 632.653421 }, { \"step\": 7344, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.770393572109492, \"MicroF1\": 0.770393572109492, \"MacroF1\": 0.7679684376178252, \"Memory in Mb\": 18.753963470458984, \"Time in s\": 739.829913 }, { \"step\": 7752, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7709972906721714, \"MicroF1\": 0.7709972906721715, \"MacroF1\": 0.7694364393340193, \"Memory in Mb\": 19.70015239715576, \"Time in s\": 858.3068800000001 }, { \"step\": 8160, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7739919107733791, \"MicroF1\": 0.7739919107733791, \"MacroF1\": 0.7702264725589797, \"Memory in Mb\": 20.646059036254883, \"Time in s\": 989.0801780000002 }, { \"step\": 8568, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.770631492938018, \"MicroF1\": 0.770631492938018, \"MacroF1\": 0.7706502591714904, \"Memory in Mb\": 22.072673797607425, \"Time in s\": 1132.67323 }, { \"step\": 8976, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7691364902506964, \"MicroF1\": 0.7691364902506964, \"MacroF1\": 0.7697475673922982, \"Memory in Mb\": 23.017619132995605, \"Time in s\": 1289.930532 }, { \"step\": 9384, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7679846530960247, \"MicroF1\": 0.7679846530960248, \"MacroF1\": 0.7675735514139922, \"Memory in Mb\": 23.96499538421631, \"Time in s\": 1461.233929 }, { \"step\": 9792, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7634562353181493, \"MicroF1\": 0.7634562353181493, \"MacroF1\": 0.7626887405791724, \"Memory in Mb\": 24.91041660308838, \"Time in s\": 1647.051427 }, { \"step\": 10200, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7552701245220119, \"MicroF1\": 0.7552701245220118, \"MacroF1\": 0.7474447650479976, \"Memory in Mb\": 25.855456352233887, \"Time in s\": 1848.111209 }, { \"step\": 10608, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.734326388234185, \"MicroF1\": 0.734326388234185, \"MacroF1\": 0.7218544335091276, \"Memory in Mb\": 26.80277729034424, \"Time in s\": 2064.993893 }, { \"step\": 11016, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.727099409895597, \"MicroF1\": 0.727099409895597, \"MacroF1\": 0.7232704418570853, \"Memory in Mb\": 27.74752426147461, \"Time in s\": 2298.467217 }, { \"step\": 11424, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7203011468090694, \"MicroF1\": 0.7203011468090693, \"MacroF1\": 0.7069709690618045, \"Memory in Mb\": 28.693537712097168, \"Time in s\": 2549.106467 }, { \"step\": 11832, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7107598681430141, \"MicroF1\": 0.7107598681430141, \"MacroF1\": 0.7032019097144009, \"Memory in Mb\": 29.638681411743164, \"Time in s\": 2817.5197820000003 }, { \"step\": 12240, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7152545142577008, \"MicroF1\": 0.7152545142577007, \"MacroF1\": 0.7117335483783439, \"Memory in Mb\": 30.584209442138672, \"Time in s\": 3104.4910310000005 }, { \"step\": 12648, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7121056377006405, \"MicroF1\": 0.7121056377006404, \"MacroF1\": 0.7043178518121461, \"Memory in Mb\": 31.53076934814453, \"Time in s\": 3410.5360360000004 }, { \"step\": 13056, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7005744925315971, \"MicroF1\": 0.7005744925315971, \"MacroF1\": 0.6932522175542292, \"Memory in Mb\": 32.476640701293945, \"Time in s\": 3735.542008 }, { \"step\": 13464, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.6985070192379114, \"MicroF1\": 0.6985070192379114, \"MacroF1\": 0.6945196760058037, \"Memory in Mb\": 33.421990394592285, \"Time in s\": 4080.277425 }, { \"step\": 13872, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.6980751207555331, \"MicroF1\": 0.6980751207555331, \"MacroF1\": 0.6949558493849793, \"Memory in Mb\": 34.36870098114014, \"Time in s\": 4445.365121 }, { \"step\": 14280, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.6936760277330345, \"MicroF1\": 0.6936760277330345, \"MacroF1\": 0.6891645690411646, \"Memory in Mb\": 35.313669204711914, \"Time in s\": 4831.313826 }, { \"step\": 14688, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.6963300878327773, \"MicroF1\": 0.6963300878327773, \"MacroF1\": 0.6946500105809528, \"Memory in Mb\": 36.259453773498535, \"Time in s\": 5238.529619 }, { \"step\": 15096, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7024180192116595, \"MicroF1\": 0.7024180192116595, \"MacroF1\": 0.7008836593188431, \"Memory in Mb\": 37.20665740966797, \"Time in s\": 5667.803474 }, { \"step\": 15504, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.702509191769335, \"MicroF1\": 0.702509191769335, \"MacroF1\": 0.6995855030221436, \"Memory in Mb\": 38.15153884887695, \"Time in s\": 6119.5687720000005 }, { \"step\": 15912, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.6934824963861479, \"MicroF1\": 0.6934824963861479, \"MacroF1\": 0.687175788748239, \"Memory in Mb\": 39.09754180908203, \"Time in s\": 6594.573754000001 }, { \"step\": 16320, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.6848458851645322, \"MicroF1\": 0.6848458851645322, \"MacroF1\": 0.6802460349069701, \"Memory in Mb\": 40.04375648498535, \"Time in s\": 7093.395211000001 }, { \"step\": 16728, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.6819513361630896, \"MicroF1\": 0.6819513361630896, \"MacroF1\": 0.6795788912922722, \"Memory in Mb\": 40.98903942108154, \"Time in s\": 7616.277784000001 }, { \"step\": 17136, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.6779107090749927, \"MicroF1\": 0.6779107090749927, \"MacroF1\": 0.6747648209169417, \"Memory in Mb\": 42.91780757904053, \"Time in s\": 8164.158576000001 }, { \"step\": 17544, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.6705808584620646, \"MicroF1\": 0.6705808584620646, \"MacroF1\": 0.6680341530684186, \"Memory in Mb\": 43.864423751831055, \"Time in s\": 8737.125467000002 }, { \"step\": 17952, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.6695448721519692, \"MicroF1\": 0.6695448721519692, \"MacroF1\": 0.6687363294804706, \"Memory in Mb\": 44.8110933303833, \"Time in s\": 9335.759485000002 }, { \"step\": 18360, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.674927828313089, \"MicroF1\": 0.674927828313089, \"MacroF1\": 0.6747300618557481, \"Memory in Mb\": 45.75679397583008, \"Time in s\": 9961.084735000002 }, { \"step\": 18768, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.6799701603879149, \"MicroF1\": 0.6799701603879149, \"MacroF1\": 0.6801519832282531, \"Memory in Mb\": 46.703369140625, \"Time in s\": 10614.122071000002 }, { \"step\": 19176, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.6720730117340287, \"MicroF1\": 0.6720730117340287, \"MacroF1\": 0.6711666831354974, \"Memory in Mb\": 47.648451805114746, \"Time in s\": 11294.958186000002 }, { \"step\": 19584, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.6760455497114845, \"MicroF1\": 0.6760455497114845, \"MacroF1\": 0.6762772840246767, \"Memory in Mb\": 48.59414768218994, \"Time in s\": 12004.137364000002 }, { \"step\": 19992, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.6715521984893202, \"MicroF1\": 0.6715521984893202, \"MacroF1\": 0.6718362805013157, \"Memory in Mb\": 49.5405502319336, \"Time in s\": 12741.947206000004 }, { \"step\": 20400, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.6679739202902103, \"MicroF1\": 0.6679739202902103, \"MacroF1\": 0.6688529665037395, \"Memory in Mb\": 50.48721218109131, \"Time in s\": 13509.112779000005 }, { \"step\": 46, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.3777777777777777, \"MicroF1\": 0.3777777777777777, \"MacroF1\": 0.2811210847975554, \"Memory in Mb\": 4.12965202331543, \"Time in s\": 2.162623 }, { \"step\": 92, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.5164835164835165, \"MicroF1\": 0.5164835164835165, \"MacroF1\": 0.5316649744849407, \"Memory in Mb\": 4.130231857299805, \"Time in s\": 5.385739 }, { \"step\": 138, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.5547445255474452, \"MicroF1\": 0.5547445255474452, \"MacroF1\": 0.5804654781117262, \"Memory in Mb\": 4.130353927612305, \"Time in s\": 9.498091 }, { \"step\": 184, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.6174863387978142, \"MicroF1\": 0.6174863387978142, \"MacroF1\": 0.6394923756219437, \"Memory in Mb\": 4.130964279174805, \"Time in s\": 14.44781 }, { \"step\": 230, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.6506550218340611, \"MicroF1\": 0.6506550218340611, \"MacroF1\": 0.66859135700569, \"Memory in Mb\": 4.130964279174805, \"Time in s\": 20.213143 }, { \"step\": 276, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.6618181818181819, \"MicroF1\": 0.6618181818181819, \"MacroF1\": 0.6795855359270878, \"Memory in Mb\": 4.131082534790039, \"Time in s\": 26.807818 }, { \"step\": 322, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.6853582554517134, \"MicroF1\": 0.6853582554517134, \"MacroF1\": 0.6872635633687633, \"Memory in Mb\": 4.131624221801758, \"Time in s\": 34.210100999999995 }, { \"step\": 368, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7111716621253406, \"MicroF1\": 0.7111716621253404, \"MacroF1\": 0.7098417316927395, \"Memory in Mb\": 4.131597518920898, \"Time in s\": 42.42663699999999 }, { \"step\": 414, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7215496368038741, \"MicroF1\": 0.7215496368038742, \"MacroF1\": 0.7201557312728714, \"Memory in Mb\": 4.13151741027832, \"Time in s\": 51.47481599999999 }, { \"step\": 460, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7211328976034859, \"MicroF1\": 0.721132897603486, \"MacroF1\": 0.7175330036146421, \"Memory in Mb\": 4.131570816040039, \"Time in s\": 61.34488599999999 }, { \"step\": 506, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7287128712871287, \"MicroF1\": 0.7287128712871287, \"MacroF1\": 0.7233455022590812, \"Memory in Mb\": 4.131570816040039, \"Time in s\": 72.04824799999999 }, { \"step\": 552, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7295825771324864, \"MicroF1\": 0.7295825771324864, \"MacroF1\": 0.7255599965917697, \"Memory in Mb\": 4.131490707397461, \"Time in s\": 83.59783199999998 }, { \"step\": 598, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7353433835845896, \"MicroF1\": 0.7353433835845896, \"MacroF1\": 0.7308494254186014, \"Memory in Mb\": 4.131513595581055, \"Time in s\": 95.96874199999998 }, { \"step\": 644, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7340590979782271, \"MicroF1\": 0.7340590979782271, \"MacroF1\": 0.7314183982762247, \"Memory in Mb\": 4.132074356079102, \"Time in s\": 109.15469699999996 }, { \"step\": 690, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.737300435413643, \"MicroF1\": 0.737300435413643, \"MacroF1\": 0.7343909641298695, \"Memory in Mb\": 4.132074356079102, \"Time in s\": 123.16042199999995 }, { \"step\": 736, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7387755102040816, \"MicroF1\": 0.7387755102040816, \"MacroF1\": 0.7369557659594496, \"Memory in Mb\": 4.132101058959961, \"Time in s\": 138.00293199999996 }, { \"step\": 782, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7439180537772087, \"MicroF1\": 0.7439180537772088, \"MacroF1\": 0.7419020281650245, \"Memory in Mb\": 4.132101058959961, \"Time in s\": 153.67249999999996 }, { \"step\": 828, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7436517533252721, \"MicroF1\": 0.7436517533252721, \"MacroF1\": 0.7432199627682998, \"Memory in Mb\": 4.132101058959961, \"Time in s\": 170.15907299999995 }, { \"step\": 874, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7502863688430699, \"MicroF1\": 0.7502863688430699, \"MacroF1\": 0.7482089866208982, \"Memory in Mb\": 4.132101058959961, \"Time in s\": 187.48903599999997 }, { \"step\": 920, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.750816104461371, \"MicroF1\": 0.750816104461371, \"MacroF1\": 0.7477650187313973, \"Memory in Mb\": 4.132074356079102, \"Time in s\": 205.64141899999996 }, { \"step\": 966, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7512953367875648, \"MicroF1\": 0.7512953367875648, \"MacroF1\": 0.747322646811651, \"Memory in Mb\": 4.132074356079102, \"Time in s\": 224.60247399999992 }, { \"step\": 1012, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7507418397626113, \"MicroF1\": 0.7507418397626113, \"MacroF1\": 0.7469783619055548, \"Memory in Mb\": 4.132074356079102, \"Time in s\": 244.38522099999992 }, { \"step\": 1058, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7530747398297067, \"MicroF1\": 0.7530747398297066, \"MacroF1\": 0.7482363934596314, \"Memory in Mb\": 4.132074356079102, \"Time in s\": 264.9944789999999 }, { \"step\": 1104, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7552130553037172, \"MicroF1\": 0.7552130553037172, \"MacroF1\": 0.750118495060715, \"Memory in Mb\": 4.132123947143555, \"Time in s\": 286.4318919999999 }, { \"step\": 1150, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7571801566579635, \"MicroF1\": 0.7571801566579635, \"MacroF1\": 0.7516199800653578, \"Memory in Mb\": 4.132123947143555, \"Time in s\": 308.6933539999999 }, { \"step\": 1196, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7598326359832636, \"MicroF1\": 0.7598326359832636, \"MacroF1\": 0.7548841797367704, \"Memory in Mb\": 4.132123947143555, \"Time in s\": 331.8138849999999 }, { \"step\": 1242, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7598710717163578, \"MicroF1\": 0.7598710717163577, \"MacroF1\": 0.7553301531902636, \"Memory in Mb\": 4.132123947143555, \"Time in s\": 355.7543559999999 }, { \"step\": 1288, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7645687645687645, \"MicroF1\": 0.7645687645687647, \"MacroF1\": 0.7590078532621816, \"Memory in Mb\": 4.132734298706055, \"Time in s\": 380.5041089999999 }, { \"step\": 1334, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7644411102775694, \"MicroF1\": 0.7644411102775694, \"MacroF1\": 0.7591993978414527, \"Memory in Mb\": 4.132757186889648, \"Time in s\": 406.0948849999999 }, { \"step\": 1380, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7650471356055112, \"MicroF1\": 0.7650471356055112, \"MacroF1\": 0.7601575050520946, \"Memory in Mb\": 4.132757186889648, \"Time in s\": 432.504415 }, { \"step\": 1426, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7670175438596492, \"MicroF1\": 0.7670175438596492, \"MacroF1\": 0.7613339877221927, \"Memory in Mb\": 4.132757186889648, \"Time in s\": 459.75425999999993 }, { \"step\": 1472, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7715839564921821, \"MicroF1\": 0.7715839564921821, \"MacroF1\": 0.76413964752182, \"Memory in Mb\": 4.132802963256836, \"Time in s\": 487.813313 }, { \"step\": 1518, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7732366512854317, \"MicroF1\": 0.7732366512854317, \"MacroF1\": 0.7648275341801108, \"Memory in Mb\": 4.132802963256836, \"Time in s\": 516.6922259999999 }, { \"step\": 1564, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7735124760076776, \"MicroF1\": 0.7735124760076776, \"MacroF1\": 0.7657569341108763, \"Memory in Mb\": 4.132802963256836, \"Time in s\": 546.3975239999999 }, { \"step\": 1610, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7737725295214419, \"MicroF1\": 0.7737725295214419, \"MacroF1\": 0.7651494083475014, \"Memory in Mb\": 4.13282585144043, \"Time in s\": 576.9286479999998 }, { \"step\": 1656, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7740181268882175, \"MicroF1\": 0.7740181268882175, \"MacroF1\": 0.7654813489818475, \"Memory in Mb\": 4.132780075073242, \"Time in s\": 608.2971799999998 }, { \"step\": 1702, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7730746619635509, \"MicroF1\": 0.7730746619635509, \"MacroF1\": 0.7664930279619061, \"Memory in Mb\": 4.132780075073242, \"Time in s\": 640.4789209999998 }, { \"step\": 1748, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7756153405838581, \"MicroF1\": 0.7756153405838581, \"MacroF1\": 0.7686072256536652, \"Memory in Mb\": 4.132780075073242, \"Time in s\": 673.4948829999998 }, { \"step\": 1794, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7769102063580591, \"MicroF1\": 0.7769102063580591, \"MacroF1\": 0.7685414235990153, \"Memory in Mb\": 4.132753372192383, \"Time in s\": 707.3642379999999 }, { \"step\": 1840, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7781402936378466, \"MicroF1\": 0.7781402936378466, \"MacroF1\": 0.7699957723931324, \"Memory in Mb\": 4.132753372192383, \"Time in s\": 742.0792459999999 }, { \"step\": 1886, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7761273209549071, \"MicroF1\": 0.7761273209549071, \"MacroF1\": 0.7684985598909853, \"Memory in Mb\": 4.132753372192383, \"Time in s\": 777.6304799999999 }, { \"step\": 1932, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7762817193164163, \"MicroF1\": 0.7762817193164163, \"MacroF1\": 0.767743441804642, \"Memory in Mb\": 4.132753372192383, \"Time in s\": 814.0157899999999 }, { \"step\": 1978, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7774405665149215, \"MicroF1\": 0.7774405665149215, \"MacroF1\": 0.7684788817649146, \"Memory in Mb\": 4.132753372192383, \"Time in s\": 851.2372119999999 }, { \"step\": 2024, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7790410281759763, \"MicroF1\": 0.7790410281759763, \"MacroF1\": 0.7689103339153599, \"Memory in Mb\": 4.132753372192383, \"Time in s\": 889.2963629999999 }, { \"step\": 2070, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7786370227162881, \"MicroF1\": 0.7786370227162881, \"MacroF1\": 0.7686288077529282, \"Memory in Mb\": 4.132753372192383, \"Time in s\": 928.20257 }, { \"step\": 2116, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7791962174940898, \"MicroF1\": 0.7791962174940898, \"MacroF1\": 0.768391950800897, \"Memory in Mb\": 4.132753372192383, \"Time in s\": 967.945094 }, { \"step\": 2162, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7801943544655252, \"MicroF1\": 0.7801943544655253, \"MacroF1\": 0.768962628827985, \"Memory in Mb\": 4.132776260375977, \"Time in s\": 1008.503462 }, { \"step\": 2208, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7820570910738559, \"MicroF1\": 0.7820570910738559, \"MacroF1\": 0.7698068761587117, \"Memory in Mb\": 4.132749557495117, \"Time in s\": 1049.8865549999998 }, { \"step\": 2254, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7789613848202397, \"MicroF1\": 0.7789613848202397, \"MacroF1\": 0.7667173742344939, \"Memory in Mb\": 4.132749557495117, \"Time in s\": 1092.1021469999998 }, { \"step\": 2300, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7781644193127447, \"MicroF1\": 0.7781644193127447, \"MacroF1\": 0.7659138381656089, \"Memory in Mb\": 4.132749557495117, \"Time in s\": 1135.1554539999995 }, { \"step\": 1056, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.6218009478672986, \"MicroF1\": 0.6218009478672986, \"MacroF1\": 0.5857016652718547, \"Memory in Mb\": 6.522056579589844, \"Time in s\": 30.289418 }, { \"step\": 2112, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.6196115585030791, \"MicroF1\": 0.6196115585030791, \"MacroF1\": 0.5856756432415232, \"Memory in Mb\": 10.389650344848633, \"Time in s\": 88.669185 }, { \"step\": 3168, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.628986422481844, \"MicroF1\": 0.628986422481844, \"MacroF1\": 0.5949930595607558, \"Memory in Mb\": 19.16711711883545, \"Time in s\": 174.550284 }, { \"step\": 4224, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.6294103717736207, \"MicroF1\": 0.6294103717736207, \"MacroF1\": 0.5952675443708706, \"Memory in Mb\": 19.668034553527832, \"Time in s\": 287.918965 }, { \"step\": 5280, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.6364841826103429, \"MicroF1\": 0.6364841826103429, \"MacroF1\": 0.5994911272790604, \"Memory in Mb\": 18.96163558959961, \"Time in s\": 428.854975 }, { \"step\": 6336, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.6352012628255722, \"MicroF1\": 0.6352012628255722, \"MacroF1\": 0.5993891820807257, \"Memory in Mb\": 20.14603328704834, \"Time in s\": 597.190787 }, { \"step\": 7392, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.638749830875389, \"MicroF1\": 0.638749830875389, \"MacroF1\": 0.6030343276880051, \"Memory in Mb\": 21.10132884979248, \"Time in s\": 793.049033 }, { \"step\": 8448, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.6405824553095774, \"MicroF1\": 0.6405824553095774, \"MacroF1\": 0.6028521616895871, \"Memory in Mb\": 24.15276908874512, \"Time in s\": 1016.521492 }, { \"step\": 9504, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.6449542249815847, \"MicroF1\": 0.6449542249815847, \"MacroF1\": 0.6055705492028415, \"Memory in Mb\": 24.86981773376465, \"Time in s\": 1266.726445 }, { \"step\": 10560, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.6485462638507434, \"MicroF1\": 0.6485462638507434, \"MacroF1\": 0.6081614166360886, \"Memory in Mb\": 28.971991539001465, \"Time in s\": 1544.137884 }, { \"step\": 11616, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.6490744726646578, \"MicroF1\": 0.6490744726646578, \"MacroF1\": 0.6078786452761632, \"Memory in Mb\": 31.018654823303223, \"Time in s\": 1848.95866 }, { \"step\": 12672, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.6514876489621971, \"MicroF1\": 0.6514876489621971, \"MacroF1\": 0.6111938480023121, \"Memory in Mb\": 35.39500713348389, \"Time in s\": 2179.477442 }, { \"step\": 13728, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.6707947840023312, \"MicroF1\": 0.6707947840023312, \"MacroF1\": 0.6607574394823456, \"Memory in Mb\": 17.66313648223877, \"Time in s\": 2527.754593 }, { \"step\": 14784, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.6814584319826829, \"MicroF1\": 0.6814584319826829, \"MacroF1\": 0.6724584381879511, \"Memory in Mb\": 11.128533363342283, \"Time in s\": 2896.354015 }, { \"step\": 15840, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.6762421870067554, \"MicroF1\": 0.6762421870067554, \"MacroF1\": 0.6688785181435096, \"Memory in Mb\": 14.811795234680176, \"Time in s\": 3290.113809 }, { \"step\": 16896, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.6741639538324948, \"MicroF1\": 0.6741639538324948, \"MacroF1\": 0.6676833597101233, \"Memory in Mb\": 15.36542510986328, \"Time in s\": 3710.779607 }, { \"step\": 17952, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.670491894601972, \"MicroF1\": 0.670491894601972, \"MacroF1\": 0.6643621029883554, \"Memory in Mb\": 15.98740005493164, \"Time in s\": 4158.727334 }, { \"step\": 19008, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.6754353659178197, \"MicroF1\": 0.6754353659178197, \"MacroF1\": 0.6656526175716114, \"Memory in Mb\": 17.100504875183105, \"Time in s\": 4627.925426 }, { \"step\": 20064, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.6800079748791308, \"MicroF1\": 0.6800079748791308, \"MacroF1\": 0.6670489534490986, \"Memory in Mb\": 26.370519638061523, \"Time in s\": 5118.761149 }, { \"step\": 21120, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.6835550925706709, \"MicroF1\": 0.6835550925706709, \"MacroF1\": 0.6685883462655132, \"Memory in Mb\": 32.78877353668213, \"Time in s\": 5635.605831 }, { \"step\": 22176, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.6869447576099211, \"MicroF1\": 0.6869447576099211, \"MacroF1\": 0.6701495347804184, \"Memory in Mb\": 36.29740715026856, \"Time in s\": 6178.169973 }, { \"step\": 23232, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.6912745899875167, \"MicroF1\": 0.6912745899875167, \"MacroF1\": 0.6726358783249661, \"Memory in Mb\": 38.26123523712158, \"Time in s\": 6746.46947 }, { \"step\": 24288, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.6940338452670153, \"MicroF1\": 0.6940338452670153, \"MacroF1\": 0.673442702110033, \"Memory in Mb\": 39.100372314453125, \"Time in s\": 7340.085466 }, { \"step\": 25344, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.6976679951071302, \"MicroF1\": 0.6976679951071302, \"MacroF1\": 0.67525701759611, \"Memory in Mb\": 42.24958515167236, \"Time in s\": 7958.602362 }, { \"step\": 26400, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.7000643963786507, \"MicroF1\": 0.7000643963786507, \"MacroF1\": 0.6759116206749555, \"Memory in Mb\": 41.52747917175293, \"Time in s\": 8602.02664 }, { \"step\": 27456, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.7027135312329266, \"MicroF1\": 0.7027135312329266, \"MacroF1\": 0.6765494742782628, \"Memory in Mb\": 43.56198120117188, \"Time in s\": 9269.998307 }, { \"step\": 28512, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.7018343797130931, \"MicroF1\": 0.7018343797130931, \"MacroF1\": 0.6771545550561098, \"Memory in Mb\": 24.23386573791504, \"Time in s\": 9962.817287 }, { \"step\": 29568, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.7013224202658369, \"MicroF1\": 0.7013224202658369, \"MacroF1\": 0.681362451564682, \"Memory in Mb\": 5.156903266906738, \"Time in s\": 10676.641313 }, { \"step\": 30624, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.699702837736342, \"MicroF1\": 0.699702837736342, \"MacroF1\": 0.6839521261644582, \"Memory in Mb\": 8.359548568725586, \"Time in s\": 11409.958608 }, { \"step\": 31680, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.6993907635973358, \"MicroF1\": 0.6993907635973358, \"MacroF1\": 0.6874853197903658, \"Memory in Mb\": 12.837088584899902, \"Time in s\": 12162.299998 }, { \"step\": 32736, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.7005651443409195, \"MicroF1\": 0.7005651443409195, \"MacroF1\": 0.692127614099415, \"Memory in Mb\": 14.392640113830566, \"Time in s\": 12933.190651 }, { \"step\": 33792, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.6971678849397769, \"MicroF1\": 0.6971678849397769, \"MacroF1\": 0.6903104823999882, \"Memory in Mb\": 22.11440753936768, \"Time in s\": 13726.605059 }, { \"step\": 34848, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.6941487072057853, \"MicroF1\": 0.6941487072057853, \"MacroF1\": 0.6871648754350796, \"Memory in Mb\": 16.369569778442383, \"Time in s\": 14546.496494 }, { \"step\": 35904, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.6917527783193606, \"MicroF1\": 0.6917527783193606, \"MacroF1\": 0.684473708604621, \"Memory in Mb\": 15.783265113830566, \"Time in s\": 15393.169285 }, { \"step\": 36960, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.6883303119673151, \"MicroF1\": 0.6883303119673151, \"MacroF1\": 0.6807777972894504, \"Memory in Mb\": 18.195876121521, \"Time in s\": 16266.308937 }, { \"step\": 38016, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.6865973957648297, \"MicroF1\": 0.6865973957648297, \"MacroF1\": 0.6786744939637405, \"Memory in Mb\": 21.092598915100098, \"Time in s\": 17165.821217 }, { \"step\": 39072, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.6857259860254409, \"MicroF1\": 0.6857259860254409, \"MacroF1\": 0.6778492437957201, \"Memory in Mb\": 16.29904079437256, \"Time in s\": 18090.697656 }, { \"step\": 40128, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.6837540807934807, \"MicroF1\": 0.6837540807934807, \"MacroF1\": 0.6766238977666043, \"Memory in Mb\": 13.538718223571776, \"Time in s\": 19041.492123 }, { \"step\": 41184, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.6814462278124469, \"MicroF1\": 0.6814462278124469, \"MacroF1\": 0.675074837604149, \"Memory in Mb\": 15.844508171081545, \"Time in s\": 20015.794843 }, { \"step\": 42240, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.6790643717891048, \"MicroF1\": 0.6790643717891048, \"MacroF1\": 0.6733686277261395, \"Memory in Mb\": 15.962260246276855, \"Time in s\": 21014.495285 }, { \"step\": 43296, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.6762443700196328, \"MicroF1\": 0.6762443700196328, \"MacroF1\": 0.6713719096586489, \"Memory in Mb\": 17.128825187683105, \"Time in s\": 22038.291411 }, { \"step\": 44352, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.6738066785416338, \"MicroF1\": 0.6738066785416338, \"MacroF1\": 0.6696205967919768, \"Memory in Mb\": 16.462289810180664, \"Time in s\": 23087.153485000003 }, { \"step\": 45408, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.6717686700288502, \"MicroF1\": 0.6717686700288502, \"MacroF1\": 0.6680705737277651, \"Memory in Mb\": 17.22057342529297, \"Time in s\": 24160.866409 }, { \"step\": 46464, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.6708994253492025, \"MicroF1\": 0.6708994253492025, \"MacroF1\": 0.6677330044499646, \"Memory in Mb\": 17.752578735351562, \"Time in s\": 25258.281990000003 }, { \"step\": 47520, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.6729939603106126, \"MicroF1\": 0.6729939603106126, \"MacroF1\": 0.6699611714455135, \"Memory in Mb\": 18.93515110015869, \"Time in s\": 26380.044308000004 }, { \"step\": 48576, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.6767061245496655, \"MicroF1\": 0.6767061245496655, \"MacroF1\": 0.6733691077464542, \"Memory in Mb\": 20.549713134765625, \"Time in s\": 27525.907792000005 }, { \"step\": 49632, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.6807237412101308, \"MicroF1\": 0.6807237412101308, \"MacroF1\": 0.6769109137483648, \"Memory in Mb\": 20.974443435668945, \"Time in s\": 28695.60352400001 }, { \"step\": 50688, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.6845147671000453, \"MicroF1\": 0.6845147671000453, \"MacroF1\": 0.6800104952374638, \"Memory in Mb\": 22.97932243347168, \"Time in s\": 29888.26411600001 }, { \"step\": 51744, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.6885182536768258, \"MicroF1\": 0.6885182536768258, \"MacroF1\": 0.6832561756017089, \"Memory in Mb\": 24.11430263519287, \"Time in s\": 31103.175398000007 }, { \"step\": 52800, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.6915471883937195, \"MicroF1\": 0.6915471883937195, \"MacroF1\": 0.6864107325641782, \"Memory in Mb\": 18.141328811645508, \"Time in s\": 32334.061725000007 }, { \"step\": 408, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9828009828009828, \"MicroF1\": 0.9828009828009828, \"MacroF1\": 0.6067632850241546, \"Memory in Mb\": 2.238800048828125, \"Time in s\": 2.971982 }, { \"step\": 816, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9460122699386504, \"MicroF1\": 0.9460122699386504, \"MacroF1\": 0.8367492469040564, \"Memory in Mb\": 4.44326114654541, \"Time in s\": 9.847778 }, { \"step\": 1224, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9411283728536386, \"MicroF1\": 0.9411283728536386, \"MacroF1\": 0.9276213812296338, \"Memory in Mb\": 6.153376579284668, \"Time in s\": 20.932147 }, { \"step\": 1632, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.950337216431637, \"MicroF1\": 0.950337216431637, \"MacroF1\": 0.9330502878949444, \"Memory in Mb\": 7.8991851806640625, \"Time in s\": 36.717851 }, { \"step\": 2040, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9494850416871016, \"MicroF1\": 0.9494850416871016, \"MacroF1\": 0.932928877406915, \"Memory in Mb\": 10.965654373168944, \"Time in s\": 57.922702 }, { \"step\": 2448, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9525950143032283, \"MicroF1\": 0.9525950143032283, \"MacroF1\": 0.9502305130509756, \"Memory in Mb\": 10.694184303283691, \"Time in s\": 83.450682 }, { \"step\": 2856, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9544658493870404, \"MicroF1\": 0.9544658493870404, \"MacroF1\": 0.943855127765724, \"Memory in Mb\": 15.53213119506836, \"Time in s\": 113.412194 }, { \"step\": 3264, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9515783021759118, \"MicroF1\": 0.9515783021759118, \"MacroF1\": 0.944582727256988, \"Memory in Mb\": 15.652314186096191, \"Time in s\": 149.290909 }, { \"step\": 3672, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9526014709888314, \"MicroF1\": 0.9526014709888314, \"MacroF1\": 0.9497542235388344, \"Memory in Mb\": 16.11695098876953, \"Time in s\": 190.984607 }, { \"step\": 4080, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9499877420936504, \"MicroF1\": 0.9499877420936504, \"MacroF1\": 0.9391633661003512, \"Memory in Mb\": 16.578293800354004, \"Time in s\": 238.599211 }, { \"step\": 4488, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9474036104301314, \"MicroF1\": 0.9474036104301314, \"MacroF1\": 0.9496969875723204, \"Memory in Mb\": 13.190230369567873, \"Time in s\": 292.48392399999994 }, { \"step\": 4896, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9493360572012256, \"MicroF1\": 0.9493360572012256, \"MacroF1\": 0.9494027577495958, \"Memory in Mb\": 12.864276885986328, \"Time in s\": 353.18946299999993 }, { \"step\": 5304, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.951159720912691, \"MicroF1\": 0.951159720912691, \"MacroF1\": 0.9518992835106976, \"Memory in Mb\": 11.604743957519531, \"Time in s\": 419.932067 }, { \"step\": 5712, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.951146909472947, \"MicroF1\": 0.951146909472947, \"MacroF1\": 0.9505351682914018, \"Memory in Mb\": 13.577879905700684, \"Time in s\": 492.702395 }, { \"step\": 6120, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9478672985781992, \"MicroF1\": 0.9478672985781992, \"MacroF1\": 0.9429356622084736, \"Memory in Mb\": 16.35688304901123, \"Time in s\": 572.32886 }, { \"step\": 6528, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9480618967366324, \"MicroF1\": 0.9480618967366324, \"MacroF1\": 0.9478348775735732, \"Memory in Mb\": 10.670846939086914, \"Time in s\": 659.058815 }, { \"step\": 6936, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9495313626532084, \"MicroF1\": 0.9495313626532084, \"MacroF1\": 0.9511497142125284, \"Memory in Mb\": 10.614124298095703, \"Time in s\": 751.777349 }, { \"step\": 7344, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9500204276181398, \"MicroF1\": 0.9500204276181398, \"MacroF1\": 0.9502583235097112, \"Memory in Mb\": 12.824416160583496, \"Time in s\": 851.689186 }, { \"step\": 7752, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9505870210295446, \"MicroF1\": 0.9505870210295446, \"MacroF1\": 0.9508630550075082, \"Memory in Mb\": 11.99438190460205, \"Time in s\": 957.810381 }, { \"step\": 8160, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9487682314009068, \"MicroF1\": 0.9487682314009068, \"MacroF1\": 0.9466937008923912, \"Memory in Mb\": 16.34542465209961, \"Time in s\": 1069.487963 }, { \"step\": 8568, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9491070386366288, \"MicroF1\": 0.9491070386366288, \"MacroF1\": 0.9496258519963297, \"Memory in Mb\": 14.096193313598633, \"Time in s\": 1187.33597 }, { \"step\": 8976, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.95041782729805, \"MicroF1\": 0.95041782729805, \"MacroF1\": 0.95112303337496, \"Memory in Mb\": 8.487105369567871, \"Time in s\": 1310.539589 }, { \"step\": 9384, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9507620164126612, \"MicroF1\": 0.9507620164126612, \"MacroF1\": 0.9509680125568912, \"Memory in Mb\": 10.491826057434082, \"Time in s\": 1439.276187 }, { \"step\": 9792, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9505668471044836, \"MicroF1\": 0.9505668471044836, \"MacroF1\": 0.9508008066421794, \"Memory in Mb\": 12.578843116760254, \"Time in s\": 1574.391157 }, { \"step\": 10200, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9496029022453182, \"MicroF1\": 0.9496029022453182, \"MacroF1\": 0.9490825188137642, \"Memory in Mb\": 15.329971313476562, \"Time in s\": 1716.648577 }, { \"step\": 10608, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9462619025172057, \"MicroF1\": 0.9462619025172057, \"MacroF1\": 0.9448381382156612, \"Memory in Mb\": 14.149526596069336, \"Time in s\": 1865.207745 }, { \"step\": 11016, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.94734453018611, \"MicroF1\": 0.94734453018611, \"MacroF1\": 0.9480489849360164, \"Memory in Mb\": 15.43016529083252, \"Time in s\": 2018.851918 }, { \"step\": 11424, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9476494791210716, \"MicroF1\": 0.9476494791210716, \"MacroF1\": 0.947763256048792, \"Memory in Mb\": 19.33940696716309, \"Time in s\": 2178.069416 }, { \"step\": 11832, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9466655396838812, \"MicroF1\": 0.9466655396838812, \"MacroF1\": 0.9465646854570324, \"Memory in Mb\": 20.836685180664062, \"Time in s\": 2343.241513 }, { \"step\": 12240, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9473813220034316, \"MicroF1\": 0.9473813220034316, \"MacroF1\": 0.9477056335712672, \"Memory in Mb\": 22.594761848449707, \"Time in s\": 2516.191517 }, { \"step\": 12648, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9481299913022851, \"MicroF1\": 0.9481299913022851, \"MacroF1\": 0.9484695727303012, \"Memory in Mb\": 17.12001132965088, \"Time in s\": 2696.111746 }, { \"step\": 13056, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9465338950593642, \"MicroF1\": 0.9465338950593642, \"MacroF1\": 0.9461537407653536, \"Memory in Mb\": 16.317899703979492, \"Time in s\": 2881.285632 }, { \"step\": 13464, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.947559979202258, \"MicroF1\": 0.947559979202258, \"MacroF1\": 0.9479124389900307, \"Memory in Mb\": 17.175325393676758, \"Time in s\": 3071.301855 }, { \"step\": 13872, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9480931439694328, \"MicroF1\": 0.9480931439694328, \"MacroF1\": 0.9483129032895908, \"Memory in Mb\": 20.13454818725586, \"Time in s\": 3267.097668 }, { \"step\": 14280, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.947265214650886, \"MicroF1\": 0.9472652146508858, \"MacroF1\": 0.9472495958535088, \"Memory in Mb\": 23.271190643310547, \"Time in s\": 3470.238619 }, { \"step\": 14688, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9470960713556206, \"MicroF1\": 0.9470960713556206, \"MacroF1\": 0.9472715831304288, \"Memory in Mb\": 24.70554256439209, \"Time in s\": 3681.555614 }, { \"step\": 15096, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9477310367671414, \"MicroF1\": 0.9477310367671414, \"MacroF1\": 0.948023523282346, \"Memory in Mb\": 12.28943920135498, \"Time in s\": 3901.326497 }, { \"step\": 15504, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9481390698574468, \"MicroF1\": 0.9481390698574468, \"MacroF1\": 0.9483821660022894, \"Memory in Mb\": 8.167351722717285, \"Time in s\": 4127.852482 }, { \"step\": 15912, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9489661240651122, \"MicroF1\": 0.9489661240651122, \"MacroF1\": 0.949259317367439, \"Memory in Mb\": 10.055158615112305, \"Time in s\": 4359.152379 }, { \"step\": 16320, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9489552055885776, \"MicroF1\": 0.9489552055885776, \"MacroF1\": 0.949102505659295, \"Memory in Mb\": 8.927614212036133, \"Time in s\": 4595.973694 }, { \"step\": 16728, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.948645901835356, \"MicroF1\": 0.948645901835356, \"MacroF1\": 0.9487532899546076, \"Memory in Mb\": 9.772565841674805, \"Time in s\": 4838.16456 }, { \"step\": 17136, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9491683688357164, \"MicroF1\": 0.9491683688357164, \"MacroF1\": 0.9493664270655614, \"Memory in Mb\": 8.885259628295898, \"Time in s\": 5086.602422 }, { \"step\": 17544, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9497235364532862, \"MicroF1\": 0.9497235364532862, \"MacroF1\": 0.9498997400720456, \"Memory in Mb\": 7.432655334472656, \"Time in s\": 5340.103049 }, { \"step\": 17952, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9492507381204388, \"MicroF1\": 0.9492507381204388, \"MacroF1\": 0.94932994822919, \"Memory in Mb\": 6.564939498901367, \"Time in s\": 5598.267976 }, { \"step\": 18360, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9498883381447792, \"MicroF1\": 0.949888338144779, \"MacroF1\": 0.9500369712738612, \"Memory in Mb\": 9.445448875427246, \"Time in s\": 5861.742373 }, { \"step\": 18768, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.950125219800714, \"MicroF1\": 0.950125219800714, \"MacroF1\": 0.950244810756275, \"Memory in Mb\": 8.985580444335938, \"Time in s\": 6130.741927 }, { \"step\": 19176, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9502477183833116, \"MicroF1\": 0.9502477183833116, \"MacroF1\": 0.950357710715448, \"Memory in Mb\": 10.539984703063965, \"Time in s\": 6405.708121 }, { \"step\": 19584, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9504672419956084, \"MicroF1\": 0.9504672419956084, \"MacroF1\": 0.9505675543483478, \"Memory in Mb\": 12.433100700378418, \"Time in s\": 6686.90918 }, { \"step\": 19992, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9504777149717372, \"MicroF1\": 0.9504777149717372, \"MacroF1\": 0.95056596570352, \"Memory in Mb\": 12.245397567749023, \"Time in s\": 6973.48828 }, { \"step\": 20400, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.950389724986519, \"MicroF1\": 0.950389724986519, \"MacroF1\": 0.9504675266923704, \"Memory in Mb\": 10.420146942138672, \"Time in s\": 7265.024106 }, { \"step\": 46, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.4, \"MicroF1\": 0.4000000000000001, \"MacroF1\": 0.3289160825620571, \"Memory in Mb\": 1.8703498840332031, \"Time in s\": 0.761019 }, { \"step\": 92, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.5494505494505495, \"MicroF1\": 0.5494505494505495, \"MacroF1\": 0.5607526488856412, \"Memory in Mb\": 2.0432376861572266, \"Time in s\": 2.058459 }, { \"step\": 138, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.5620437956204379, \"MicroF1\": 0.5620437956204379, \"MacroF1\": 0.5814352652080846, \"Memory in Mb\": 2.2601184844970703, \"Time in s\": 3.877596 }, { \"step\": 184, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.6174863387978142, \"MicroF1\": 0.6174863387978142, \"MacroF1\": 0.6349823285289026, \"Memory in Mb\": 2.5773630142211914, \"Time in s\": 5.988367 }, { \"step\": 230, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.6550218340611353, \"MicroF1\": 0.6550218340611353, \"MacroF1\": 0.6697464616246889, \"Memory in Mb\": 2.673569679260254, \"Time in s\": 8.36838 }, { \"step\": 276, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.68, \"MicroF1\": 0.68, \"MacroF1\": 0.6977451412884614, \"Memory in Mb\": 2.705929756164551, \"Time in s\": 11.015152 }, { \"step\": 322, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7040498442367601, \"MicroF1\": 0.7040498442367601, \"MacroF1\": 0.708655608864303, \"Memory in Mb\": 2.747677803039551, \"Time in s\": 13.922748 }, { \"step\": 368, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7302452316076294, \"MicroF1\": 0.7302452316076294, \"MacroF1\": 0.731555248839775, \"Memory in Mb\": 2.91958236694336, \"Time in s\": 17.085856 }, { \"step\": 414, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7481840193704601, \"MicroF1\": 0.7481840193704601, \"MacroF1\": 0.7498869297449521, \"Memory in Mb\": 3.2087087631225586, \"Time in s\": 20.515594 }, { \"step\": 460, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7429193899782135, \"MicroF1\": 0.7429193899782135, \"MacroF1\": 0.7431113090395209, \"Memory in Mb\": 2.874252319335937, \"Time in s\": 24.226579 }, { \"step\": 506, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7465346534653465, \"MicroF1\": 0.7465346534653465, \"MacroF1\": 0.7453691625646783, \"Memory in Mb\": 3.051929473876953, \"Time in s\": 28.205653 }, { \"step\": 552, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7531760435571688, \"MicroF1\": 0.7531760435571688, \"MacroF1\": 0.7537204076398122, \"Memory in Mb\": 3.133829116821289, \"Time in s\": 32.454626000000005 }, { \"step\": 598, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7587939698492462, \"MicroF1\": 0.7587939698492462, \"MacroF1\": 0.7612399908296416, \"Memory in Mb\": 3.14900016784668, \"Time in s\": 36.970245000000006 }, { \"step\": 644, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7589424572317263, \"MicroF1\": 0.7589424572317262, \"MacroF1\": 0.7628637146980985, \"Memory in Mb\": 3.4707136154174805, \"Time in s\": 41.75427400000001 }, { \"step\": 690, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7619738751814223, \"MicroF1\": 0.7619738751814223, \"MacroF1\": 0.76530464273308, \"Memory in Mb\": 3.455944061279297, \"Time in s\": 46.80492900000001 }, { \"step\": 736, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7687074829931972, \"MicroF1\": 0.7687074829931972, \"MacroF1\": 0.7727990926768868, \"Memory in Mb\": 3.680045127868652, \"Time in s\": 52.11999800000001 }, { \"step\": 782, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7733674775928298, \"MicroF1\": 0.7733674775928298, \"MacroF1\": 0.7767963295410655, \"Memory in Mb\": 3.8801565170288086, \"Time in s\": 57.706528000000006 }, { \"step\": 828, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7738814993954051, \"MicroF1\": 0.7738814993954051, \"MacroF1\": 0.7787678467755003, \"Memory in Mb\": 3.867655754089356, \"Time in s\": 63.567513000000005 }, { \"step\": 874, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7812142038946163, \"MicroF1\": 0.7812142038946163, \"MacroF1\": 0.7848289172220594, \"Memory in Mb\": 3.691183090209961, \"Time in s\": 69.704029 }, { \"step\": 920, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7878128400435256, \"MicroF1\": 0.7878128400435256, \"MacroF1\": 0.7905661589338376, \"Memory in Mb\": 3.770216941833496, \"Time in s\": 76.10799700000001 }, { \"step\": 966, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7917098445595855, \"MicroF1\": 0.7917098445595855, \"MacroF1\": 0.7936972979049142, \"Memory in Mb\": 3.8226003646850586, \"Time in s\": 82.78424000000001 }, { \"step\": 1012, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7952522255192879, \"MicroF1\": 0.7952522255192878, \"MacroF1\": 0.796484514345152, \"Memory in Mb\": 4.098711967468262, \"Time in s\": 89.73293500000001 }, { \"step\": 1058, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8032166508987701, \"MicroF1\": 0.8032166508987703, \"MacroF1\": 0.8038465931831994, \"Memory in Mb\": 4.173297882080078, \"Time in s\": 96.952593 }, { \"step\": 1104, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8041704442429737, \"MicroF1\": 0.8041704442429737, \"MacroF1\": 0.8051724065917674, \"Memory in Mb\": 4.39574146270752, \"Time in s\": 104.442345 }, { \"step\": 1150, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8102697998259356, \"MicroF1\": 0.8102697998259357, \"MacroF1\": 0.8109646011887589, \"Memory in Mb\": 4.552497863769531, \"Time in s\": 112.202361 }, { \"step\": 1196, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8142259414225942, \"MicroF1\": 0.8142259414225941, \"MacroF1\": 0.8149917549940485, \"Memory in Mb\": 4.571473121643066, \"Time in s\": 120.234064 }, { \"step\": 1242, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8186946011281225, \"MicroF1\": 0.8186946011281225, \"MacroF1\": 0.8196592056494876, \"Memory in Mb\": 4.626148223876953, \"Time in s\": 128.53886899999998 }, { \"step\": 1288, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8212898212898213, \"MicroF1\": 0.8212898212898213, \"MacroF1\": 0.822176577441966, \"Memory in Mb\": 5.001523017883301, \"Time in s\": 137.11683999999997 }, { \"step\": 1334, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8229557389347337, \"MicroF1\": 0.8229557389347337, \"MacroF1\": 0.8237863794336502, \"Memory in Mb\": 5.142135620117188, \"Time in s\": 145.97222099999996 }, { \"step\": 1380, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8245105148658448, \"MicroF1\": 0.8245105148658448, \"MacroF1\": 0.8256018780761997, \"Memory in Mb\": 5.339564323425293, \"Time in s\": 155.11012299999996 }, { \"step\": 1426, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8287719298245614, \"MicroF1\": 0.8287719298245614, \"MacroF1\": 0.8290084946618356, \"Memory in Mb\": 5.337568283081055, \"Time in s\": 164.53666099999995 }, { \"step\": 1472, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8334466349422162, \"MicroF1\": 0.8334466349422162, \"MacroF1\": 0.8325983603187124, \"Memory in Mb\": 5.299435615539551, \"Time in s\": 174.25653999999994 }, { \"step\": 1518, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8358602504943968, \"MicroF1\": 0.8358602504943968, \"MacroF1\": 0.8344617749849152, \"Memory in Mb\": 5.345264434814453, \"Time in s\": 184.26794299999997 }, { \"step\": 1564, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8387715930902111, \"MicroF1\": 0.8387715930902111, \"MacroF1\": 0.837784263767798, \"Memory in Mb\": 4.9267168045043945, \"Time in s\": 194.57830699999997 }, { \"step\": 1610, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.839030453697949, \"MicroF1\": 0.839030453697949, \"MacroF1\": 0.838065870841574, \"Memory in Mb\": 4.685762405395508, \"Time in s\": 205.19165999999996 }, { \"step\": 1656, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8416918429003021, \"MicroF1\": 0.8416918429003022, \"MacroF1\": 0.8408915736149335, \"Memory in Mb\": 4.737677574157715, \"Time in s\": 216.09684699999997 }, { \"step\": 1702, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8418577307466196, \"MicroF1\": 0.8418577307466195, \"MacroF1\": 0.8423710518418951, \"Memory in Mb\": 4.180461883544922, \"Time in s\": 227.29100499999996 }, { \"step\": 1748, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8431597023468803, \"MicroF1\": 0.8431597023468804, \"MacroF1\": 0.8432643493367186, \"Memory in Mb\": 4.331151962280273, \"Time in s\": 238.7660419999999 }, { \"step\": 1794, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8455103179029559, \"MicroF1\": 0.8455103179029559, \"MacroF1\": 0.8449435902582664, \"Memory in Mb\": 4.424459457397461, \"Time in s\": 250.52640599999992 }, { \"step\": 1840, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8466557911908646, \"MicroF1\": 0.8466557911908648, \"MacroF1\": 0.8462222022075542, \"Memory in Mb\": 4.3217973709106445, \"Time in s\": 262.5845499999999 }, { \"step\": 1886, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8477453580901857, \"MicroF1\": 0.8477453580901856, \"MacroF1\": 0.84772474367672, \"Memory in Mb\": 4.364754676818848, \"Time in s\": 274.94178099999993 }, { \"step\": 1932, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8487830139823925, \"MicroF1\": 0.8487830139823925, \"MacroF1\": 0.8484572581714136, \"Memory in Mb\": 4.410244941711426, \"Time in s\": 287.5965719999999 }, { \"step\": 1978, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.849772382397572, \"MicroF1\": 0.849772382397572, \"MacroF1\": 0.8495372758679525, \"Memory in Mb\": 4.436578750610352, \"Time in s\": 300.5458569999999 }, { \"step\": 2024, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8507167572911517, \"MicroF1\": 0.8507167572911517, \"MacroF1\": 0.8496927624131454, \"Memory in Mb\": 4.292850494384766, \"Time in s\": 313.7872069999999 }, { \"step\": 2070, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8511358144030933, \"MicroF1\": 0.8511358144030933, \"MacroF1\": 0.8503705992191455, \"Memory in Mb\": 4.422323226928711, \"Time in s\": 327.3192999999999 }, { \"step\": 2116, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8515366430260047, \"MicroF1\": 0.8515366430260047, \"MacroF1\": 0.850305284692234, \"Memory in Mb\": 4.440757751464844, \"Time in s\": 341.1384159999999 }, { \"step\": 2162, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8505321610365572, \"MicroF1\": 0.850532161036557, \"MacroF1\": 0.84908675540822, \"Memory in Mb\": 4.611151695251465, \"Time in s\": 355.24865599999987 }, { \"step\": 2208, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.851835070231083, \"MicroF1\": 0.851835070231083, \"MacroF1\": 0.8501011345319502, \"Memory in Mb\": 4.809813499450684, \"Time in s\": 369.64566299999984 }, { \"step\": 2254, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8490901020861075, \"MicroF1\": 0.8490901020861075, \"MacroF1\": 0.847799327251759, \"Memory in Mb\": 4.949430465698242, \"Time in s\": 384.3275549999999 }, { \"step\": 2300, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8490648107872988, \"MicroF1\": 0.8490648107872988, \"MacroF1\": 0.8479218608351832, \"Memory in Mb\": 5.295671463012695, \"Time in s\": 399.2890019999999 }, { \"step\": 1056, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Insects\", \"Accuracy\": 0.6454976303317536, \"MicroF1\": 0.6454976303317536, \"MacroF1\": 0.5867724425586438, \"Memory in Mb\": 7.441350936889648, \"Time in s\": 8.583896 }, { \"step\": 2112, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Insects\", \"Accuracy\": 0.6826148744670772, \"MicroF1\": 0.6826148744670772, \"MacroF1\": 0.6053874539212664, \"Memory in Mb\": 11.234929084777832, \"Time in s\": 25.378115 }, { \"step\": 3168, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Insects\", \"Accuracy\": 0.6896116198294916, \"MicroF1\": 0.6896116198294916, \"MacroF1\": 0.6083758872885286, \"Memory in Mb\": 13.40891933441162, \"Time in s\": 50.237849 }, { \"step\": 4224, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Insects\", \"Accuracy\": 0.6954771489462468, \"MicroF1\": 0.6954771489462468, \"MacroF1\": 0.6085129807470798, \"Memory in Mb\": 18.096717834472656, \"Time in s\": 83.102519 }, { \"step\": 5280, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Insects\", \"Accuracy\": 0.7014586095851487, \"MicroF1\": 0.7014586095851487, \"MacroF1\": 0.6122692721162352, \"Memory in Mb\": 21.446727752685547, \"Time in s\": 123.914838 }, { \"step\": 6336, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Insects\", \"Accuracy\": 0.7021310181531176, \"MicroF1\": 0.7021310181531176, \"MacroF1\": 0.6116513676781078, \"Memory in Mb\": 27.182113647460938, \"Time in s\": 172.776504 }, { \"step\": 7392, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Insects\", \"Accuracy\": 0.7054525774590719, \"MicroF1\": 0.7054525774590719, \"MacroF1\": 0.6129808753663538, \"Memory in Mb\": 26.876797676086422, \"Time in s\": 229.478895 }, { \"step\": 8448, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Insects\", \"Accuracy\": 0.70853557476027, \"MicroF1\": 0.70853557476027, \"MacroF1\": 0.6147213044531655, \"Memory in Mb\": 31.851184844970703, \"Time in s\": 293.916345 }, { \"step\": 9504, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Insects\", \"Accuracy\": 0.7137745974955277, \"MicroF1\": 0.7137745974955277, \"MacroF1\": 0.6175531178778296, \"Memory in Mb\": 22.848219871521, \"Time in s\": 366.230765 }, { \"step\": 10560, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Insects\", \"Accuracy\": 0.7174921867601098, \"MicroF1\": 0.7174921867601098, \"MacroF1\": 0.619713417782018, \"Memory in Mb\": 19.317788124084476, \"Time in s\": 446.248603 }, { \"step\": 11616, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Insects\", \"Accuracy\": 0.717606543263022, \"MicroF1\": 0.717606543263022, \"MacroF1\": 0.618960125586482, \"Memory in Mb\": 19.568995475769043, \"Time in s\": 533.7141509999999 }, { \"step\": 12672, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Insects\", \"Accuracy\": 0.7184121221687317, \"MicroF1\": 0.7184121221687317, \"MacroF1\": 0.6302774396409263, \"Memory in Mb\": 23.59817409515381, \"Time in s\": 628.5255009999998 }, { \"step\": 13728, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Insects\", \"Accuracy\": 0.7373060391928317, \"MicroF1\": 0.7373060391928317, \"MacroF1\": 0.7337291247132964, \"Memory in Mb\": 6.318717002868652, \"Time in s\": 729.5138609999999 }, { \"step\": 14784, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Insects\", \"Accuracy\": 0.744774403030508, \"MicroF1\": 0.7447744030305079, \"MacroF1\": 0.7439388578060665, \"Memory in Mb\": 4.747281074523926, \"Time in s\": 836.7314339999999 }, { \"step\": 15840, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Insects\", \"Accuracy\": 0.7374834269840268, \"MicroF1\": 0.7374834269840268, \"MacroF1\": 0.7388535634976899, \"Memory in Mb\": 8.635688781738281, \"Time in s\": 951.47119 }, { \"step\": 16896, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Insects\", \"Accuracy\": 0.7324652263983427, \"MicroF1\": 0.7324652263983427, \"MacroF1\": 0.736003592775451, \"Memory in Mb\": 13.742908477783203, \"Time in s\": 1073.59215 }, { \"step\": 17952, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Insects\", \"Accuracy\": 0.7253077822962509, \"MicroF1\": 0.7253077822962509, \"MacroF1\": 0.7305072565778182, \"Memory in Mb\": 20.00777053833008, \"Time in s\": 1203.171307 }, { \"step\": 19008, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Insects\", \"Accuracy\": 0.7387804493081497, \"MicroF1\": 0.7387804493081497, \"MacroF1\": 0.7395324944779035, \"Memory in Mb\": 6.087196350097656, \"Time in s\": 1339.825862 }, { \"step\": 20064, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Insects\", \"Accuracy\": 0.7439066939141704, \"MicroF1\": 0.7439066939141704, \"MacroF1\": 0.7399287274487314, \"Memory in Mb\": 7.841000556945801, \"Time in s\": 1483.477757 }, { \"step\": 21120, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Insects\", \"Accuracy\": 0.7456792461764288, \"MicroF1\": 0.7456792461764288, \"MacroF1\": 0.738136498436516, \"Memory in Mb\": 11.804688453674316, \"Time in s\": 1634.941518 }, { \"step\": 22176, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Insects\", \"Accuracy\": 0.7464712514092446, \"MicroF1\": 0.7464712514092445, \"MacroF1\": 0.7355899333520025, \"Memory in Mb\": 17.346091270446777, \"Time in s\": 1793.971452 }, { \"step\": 23232, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Insects\", \"Accuracy\": 0.7486548146872714, \"MicroF1\": 0.7486548146872714, \"MacroF1\": 0.7347795423630049, \"Memory in Mb\": 20.6541748046875, \"Time in s\": 1960.617459 }, { \"step\": 24288, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Insects\", \"Accuracy\": 0.7502367521719439, \"MicroF1\": 0.7502367521719437, \"MacroF1\": 0.7334324471857778, \"Memory in Mb\": 21.727876663208008, \"Time in s\": 2134.847723 }, { \"step\": 25344, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Insects\", \"Accuracy\": 0.7523576530008287, \"MicroF1\": 0.7523576530008288, \"MacroF1\": 0.7330792890892175, \"Memory in Mb\": 28.11653995513916, \"Time in s\": 2316.991759 }, { \"step\": 26400, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Insects\", \"Accuracy\": 0.7535891511042085, \"MicroF1\": 0.7535891511042085, \"MacroF1\": 0.731955812013067, \"Memory in Mb\": 28.993709564208984, \"Time in s\": 2507.189995 }, { \"step\": 27456, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Insects\", \"Accuracy\": 0.7540338736113641, \"MicroF1\": 0.7540338736113641, \"MacroF1\": 0.7298780765329144, \"Memory in Mb\": 35.52100467681885, \"Time in s\": 2705.597193 }, { \"step\": 28512, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Insects\", \"Accuracy\": 0.7522710532776823, \"MicroF1\": 0.7522710532776823, \"MacroF1\": 0.7301216768723076, \"Memory in Mb\": 19.597237586975098, \"Time in s\": 2912.282888 }, { \"step\": 29568, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Insects\", \"Accuracy\": 0.7502621165488551, \"MicroF1\": 0.7502621165488552, \"MacroF1\": 0.733319854895679, \"Memory in Mb\": 15.064599990844728, \"Time in s\": 3126.360865 }, { \"step\": 30624, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Insects\", \"Accuracy\": 0.7500244913953564, \"MicroF1\": 0.7500244913953564, \"MacroF1\": 0.7381499467352403, \"Memory in Mb\": 21.998522758483887, \"Time in s\": 3348.070871 }, { \"step\": 31680, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Insects\", \"Accuracy\": 0.7493292086240096, \"MicroF1\": 0.7493292086240096, \"MacroF1\": 0.7414716120706107, \"Memory in Mb\": 28.99252319335937, \"Time in s\": 3577.2433 }, { \"step\": 32736, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Insects\", \"Accuracy\": 0.7494424927447686, \"MicroF1\": 0.7494424927447686, \"MacroF1\": 0.7447602446394828, \"Memory in Mb\": 36.39131259918213, \"Time in s\": 3813.876585 }, { \"step\": 33792, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Insects\", \"Accuracy\": 0.7448137077920156, \"MicroF1\": 0.7448137077920156, \"MacroF1\": 0.7415559043607837, \"Memory in Mb\": 7.406244277954102, \"Time in s\": 4059.132247 }, { \"step\": 34848, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Insects\", \"Accuracy\": 0.7397193445633771, \"MicroF1\": 0.739719344563377, \"MacroF1\": 0.7363475181006618, \"Memory in Mb\": 8.795232772827148, \"Time in s\": 4312.474973 }, { \"step\": 35904, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Insects\", \"Accuracy\": 0.7365679748210456, \"MicroF1\": 0.7365679748210455, \"MacroF1\": 0.7329849736783064, \"Memory in Mb\": 11.10138702392578, \"Time in s\": 4573.597154 }, { \"step\": 36960, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Insects\", \"Accuracy\": 0.7330014340214832, \"MicroF1\": 0.7330014340214832, \"MacroF1\": 0.7293557861681861, \"Memory in Mb\": 15.830912590026855, \"Time in s\": 4842.477951 }, { \"step\": 38016, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Insects\", \"Accuracy\": 0.7302643693278968, \"MicroF1\": 0.7302643693278967, \"MacroF1\": 0.7264691718738406, \"Memory in Mb\": 19.795815467834476, \"Time in s\": 5119.095735 }, { \"step\": 39072, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Insects\", \"Accuracy\": 0.7309513449873307, \"MicroF1\": 0.7309513449873307, \"MacroF1\": 0.7270525503986339, \"Memory in Mb\": 9.05908489227295, \"Time in s\": 5403.358583 }, { \"step\": 40128, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Insects\", \"Accuracy\": 0.729284521643781, \"MicroF1\": 0.729284521643781, \"MacroF1\": 0.7256952486493923, \"Memory in Mb\": 11.242535591125488, \"Time in s\": 5695.769582 }, { \"step\": 41184, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Insects\", \"Accuracy\": 0.7294029089672923, \"MicroF1\": 0.7294029089672922, \"MacroF1\": 0.7260996194485368, \"Memory in Mb\": 9.506610870361328, \"Time in s\": 5995.177374 }, { \"step\": 42240, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Insects\", \"Accuracy\": 0.7298941736310045, \"MicroF1\": 0.7298941736310045, \"MacroF1\": 0.7269475794208268, \"Memory in Mb\": 15.258689880371094, \"Time in s\": 6301.374087 }, { \"step\": 43296, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Insects\", \"Accuracy\": 0.7306848365862109, \"MicroF1\": 0.7306848365862109, \"MacroF1\": 0.7280100891072271, \"Memory in Mb\": 20.2097225189209, \"Time in s\": 6614.284978 }, { \"step\": 44352, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Insects\", \"Accuracy\": 0.7312574688282113, \"MicroF1\": 0.7312574688282113, \"MacroF1\": 0.7287466644577517, \"Memory in Mb\": 26.378506660461422, \"Time in s\": 6934.278340999999 }, { \"step\": 45408, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Insects\", \"Accuracy\": 0.7317814433897857, \"MicroF1\": 0.7317814433897857, \"MacroF1\": 0.7291491859846939, \"Memory in Mb\": 32.061384201049805, \"Time in s\": 7261.498196999999 }, { \"step\": 46464, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Insects\", \"Accuracy\": 0.732776617954071, \"MicroF1\": 0.732776617954071, \"MacroF1\": 0.7299865007540453, \"Memory in Mb\": 32.25613784790039, \"Time in s\": 7595.964266999999 }, { \"step\": 47520, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Insects\", \"Accuracy\": 0.7334329426124288, \"MicroF1\": 0.7334329426124286, \"MacroF1\": 0.7309449816547512, \"Memory in Mb\": 16.162960052490234, \"Time in s\": 7937.466337999999 }, { \"step\": 48576, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Insects\", \"Accuracy\": 0.7373751930005147, \"MicroF1\": 0.7373751930005147, \"MacroF1\": 0.7352697035426822, \"Memory in Mb\": 12.93554973602295, \"Time in s\": 8285.412244 }, { \"step\": 49632, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Insects\", \"Accuracy\": 0.741210130765046, \"MicroF1\": 0.741210130765046, \"MacroF1\": 0.739269872700679, \"Memory in Mb\": 15.798639297485352, \"Time in s\": 8639.863329999998 }, { \"step\": 50688, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Insects\", \"Accuracy\": 0.7446682581332491, \"MicroF1\": 0.7446682581332491, \"MacroF1\": 0.7426657147430288, \"Memory in Mb\": 14.252553939819336, \"Time in s\": 9000.792304999999 }, { \"step\": 51744, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Insects\", \"Accuracy\": 0.7485650232881742, \"MicroF1\": 0.7485650232881743, \"MacroF1\": 0.7463959215624629, \"Memory in Mb\": 16.087495803833008, \"Time in s\": 9368.035475 }, { \"step\": 52800, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Insects\", \"Accuracy\": 0.752154396863577, \"MicroF1\": 0.752154396863577, \"MacroF1\": 0.7502511872752614, \"Memory in Mb\": 11.339015007019045, \"Time in s\": 9741.135739 }, { \"step\": 408, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9803439803439804, \"MicroF1\": 0.9803439803439804, \"MacroF1\": 0.4950372208436724, \"Memory in Mb\": 1.0347824096679688, \"Time in s\": 1.562947 }, { \"step\": 816, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9840490797546012, \"MicroF1\": 0.9840490797546012, \"MacroF1\": 0.9559273479637392, \"Memory in Mb\": 2.137660026550293, \"Time in s\": 5.08284 }, { \"step\": 1224, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.983646770237122, \"MicroF1\": 0.983646770237122, \"MacroF1\": 0.9660207101584454, \"Memory in Mb\": 3.2939910888671875, \"Time in s\": 10.794145 }, { \"step\": 1632, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9803801348865726, \"MicroF1\": 0.9803801348865726, \"MacroF1\": 0.9452685517164728, \"Memory in Mb\": 4.760180473327637, \"Time in s\": 18.719227 }, { \"step\": 2040, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.973516429622364, \"MicroF1\": 0.973516429622364, \"MacroF1\": 0.9361195161551138, \"Memory in Mb\": 6.6425981521606445, \"Time in s\": 28.938428 }, { \"step\": 2448, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.973028197793216, \"MicroF1\": 0.973028197793216, \"MacroF1\": 0.9615988180290456, \"Memory in Mb\": 5.552071571350098, \"Time in s\": 41.439403 }, { \"step\": 2856, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9747810858143608, \"MicroF1\": 0.9747810858143608, \"MacroF1\": 0.9713591464752812, \"Memory in Mb\": 6.8436784744262695, \"Time in s\": 56.378536 }, { \"step\": 3264, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.974869751762182, \"MicroF1\": 0.974869751762182, \"MacroF1\": 0.9692034094625394, \"Memory in Mb\": 6.567030906677246, \"Time in s\": 73.934123 }, { \"step\": 3672, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9743938981204032, \"MicroF1\": 0.9743938981204032, \"MacroF1\": 0.9689232613591288, \"Memory in Mb\": 8.48116397857666, \"Time in s\": 94.245901 }, { \"step\": 4080, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9722971316499142, \"MicroF1\": 0.9722971316499142, \"MacroF1\": 0.96426610548244, \"Memory in Mb\": 5.934853553771973, \"Time in s\": 117.581237 }, { \"step\": 4488, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9732560731000668, \"MicroF1\": 0.9732560731000668, \"MacroF1\": 0.9722719909296184, \"Memory in Mb\": 3.644045829772949, \"Time in s\": 143.695153 }, { \"step\": 4896, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9748723186925434, \"MicroF1\": 0.9748723186925434, \"MacroF1\": 0.9754037061196345, \"Memory in Mb\": 4.787886619567871, \"Time in s\": 172.738198 }, { \"step\": 5304, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9741655666603808, \"MicroF1\": 0.9741655666603808, \"MacroF1\": 0.9716360242916738, \"Memory in Mb\": 5.742301940917969, \"Time in s\": 204.85114700000003 }, { \"step\": 5712, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9746104009805638, \"MicroF1\": 0.9746104009805638, \"MacroF1\": 0.9740216295290516, \"Memory in Mb\": 6.782421112060547, \"Time in s\": 240.11050800000004 }, { \"step\": 6120, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9738519365909464, \"MicroF1\": 0.9738519365909464, \"MacroF1\": 0.9722333406974256, \"Memory in Mb\": 8.176769256591797, \"Time in s\": 278.69027800000003 }, { \"step\": 6528, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9742607629845258, \"MicroF1\": 0.9742607629845258, \"MacroF1\": 0.9741504405159308, \"Memory in Mb\": 4.433716773986816, \"Time in s\": 320.74209900000005 }, { \"step\": 6936, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9751982696467196, \"MicroF1\": 0.9751982696467196, \"MacroF1\": 0.9755523782693606, \"Memory in Mb\": 5.133135795593262, \"Time in s\": 366.1265460000001 }, { \"step\": 7344, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9757592264741932, \"MicroF1\": 0.9757592264741932, \"MacroF1\": 0.9758485662267348, \"Memory in Mb\": 5.760107040405273, \"Time in s\": 415.04014400000005 }, { \"step\": 7752, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9760030963746612, \"MicroF1\": 0.9760030963746612, \"MacroF1\": 0.9758957983961688, \"Memory in Mb\": 6.842521667480469, \"Time in s\": 467.5877 }, { \"step\": 8160, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9758548841769824, \"MicroF1\": 0.9758548841769824, \"MacroF1\": 0.9755087152005796, \"Memory in Mb\": 8.403876304626465, \"Time in s\": 523.9395460000001 }, { \"step\": 8568, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9751371541963348, \"MicroF1\": 0.9751371541963348, \"MacroF1\": 0.9744422302091884, \"Memory in Mb\": 8.9804105758667, \"Time in s\": 584.299164 }, { \"step\": 8976, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.975598885793872, \"MicroF1\": 0.975598885793872, \"MacroF1\": 0.9757626053423432, \"Memory in Mb\": 8.348807334899902, \"Time in s\": 648.764254 }, { \"step\": 9384, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.97527443248428, \"MicroF1\": 0.97527443248428, \"MacroF1\": 0.9749874884381716, \"Memory in Mb\": 8.780474662780762, \"Time in s\": 717.435188 }, { \"step\": 9792, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9751812889388214, \"MicroF1\": 0.9751812889388214, \"MacroF1\": 0.9751287694103772, \"Memory in Mb\": 7.27089786529541, \"Time in s\": 790.366072 }, { \"step\": 10200, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9748994999509756, \"MicroF1\": 0.9748994999509756, \"MacroF1\": 0.9747198913701116, \"Memory in Mb\": 8.182950019836426, \"Time in s\": 867.658962 }, { \"step\": 10608, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9744508343546714, \"MicroF1\": 0.9744508343546714, \"MacroF1\": 0.9742218409220016, \"Memory in Mb\": 8.212386131286621, \"Time in s\": 949.401841 }, { \"step\": 11016, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9747616886064456, \"MicroF1\": 0.9747616886064456, \"MacroF1\": 0.9748981365239816, \"Memory in Mb\": 7.736974716186523, \"Time in s\": 1035.676852 }, { \"step\": 11424, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9748752516851964, \"MicroF1\": 0.9748752516851964, \"MacroF1\": 0.9749367981815978, \"Memory in Mb\": 8.583486557006836, \"Time in s\": 1126.650079 }, { \"step\": 11832, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9749809821654974, \"MicroF1\": 0.9749809821654974, \"MacroF1\": 0.9750463661723392, \"Memory in Mb\": 8.887914657592773, \"Time in s\": 1222.478726 }, { \"step\": 12240, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9755698995015932, \"MicroF1\": 0.9755698995015932, \"MacroF1\": 0.9757989853757532, \"Memory in Mb\": 9.402573585510254, \"Time in s\": 1323.224256 }, { \"step\": 12648, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9760417490313908, \"MicroF1\": 0.9760417490313908, \"MacroF1\": 0.9762258400907322, \"Memory in Mb\": 9.833843231201172, \"Time in s\": 1429.020384 }, { \"step\": 13056, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9760245116813482, \"MicroF1\": 0.9760245116813482, \"MacroF1\": 0.9760626338918788, \"Memory in Mb\": 9.939188957214355, \"Time in s\": 1539.962955 }, { \"step\": 13464, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9762311520463492, \"MicroF1\": 0.9762311520463492, \"MacroF1\": 0.976330045562598, \"Memory in Mb\": 10.46349811553955, \"Time in s\": 1656.191893 }, { \"step\": 13872, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9763535433638526, \"MicroF1\": 0.9763535433638526, \"MacroF1\": 0.9764287224231292, \"Memory in Mb\": 11.428705215454102, \"Time in s\": 1777.90617 }, { \"step\": 14280, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9758386441627565, \"MicroF1\": 0.9758386441627565, \"MacroF1\": 0.9757700210755772, \"Memory in Mb\": 10.687178611755373, \"Time in s\": 1905.20097 }, { \"step\": 14688, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.976101314087288, \"MicroF1\": 0.976101314087288, \"MacroF1\": 0.9761996431080104, \"Memory in Mb\": 10.750173568725586, \"Time in s\": 2038.11614 }, { \"step\": 15096, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9764822789002982, \"MicroF1\": 0.9764822789002982, \"MacroF1\": 0.9765941858257003, \"Memory in Mb\": 10.87511920928955, \"Time in s\": 2176.860193 }, { \"step\": 15504, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9762626588402245, \"MicroF1\": 0.9762626588402245, \"MacroF1\": 0.9762697293829714, \"Memory in Mb\": 11.144217491149902, \"Time in s\": 2321.518715 }, { \"step\": 15912, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.976305700458802, \"MicroF1\": 0.976305700458802, \"MacroF1\": 0.9763523962033862, \"Memory in Mb\": 10.881969451904297, \"Time in s\": 2472.196158 }, { \"step\": 16320, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9761627550707764, \"MicroF1\": 0.9761627550707764, \"MacroF1\": 0.9761821898526978, \"Memory in Mb\": 10.72462272644043, \"Time in s\": 2629.0181850000004 }, { \"step\": 16728, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9760267830453756, \"MicroF1\": 0.9760267830453756, \"MacroF1\": 0.9760462981867312, \"Memory in Mb\": 10.397873878479004, \"Time in s\": 2792.0314960000005 }, { \"step\": 17136, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9763641669098336, \"MicroF1\": 0.9763641669098336, \"MacroF1\": 0.976427628373518, \"Memory in Mb\": 11.5784912109375, \"Time in s\": 2961.4220420000006 }, { \"step\": 17544, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9762868380550648, \"MicroF1\": 0.9762868380550648, \"MacroF1\": 0.9763077393136288, \"Memory in Mb\": 10.780138969421388, \"Time in s\": 3137.2839260000005 }, { \"step\": 17952, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9763801459528716, \"MicroF1\": 0.9763801459528716, \"MacroF1\": 0.9764101118400772, \"Memory in Mb\": 11.24526309967041, \"Time in s\": 3319.622350000001 }, { \"step\": 18360, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.97663271420012, \"MicroF1\": 0.97663271420012, \"MacroF1\": 0.976666198082788, \"Memory in Mb\": 11.450252532958984, \"Time in s\": 3508.4984120000004 }, { \"step\": 18768, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9769808706772526, \"MicroF1\": 0.9769808706772526, \"MacroF1\": 0.9770112706505792, \"Memory in Mb\": 12.824438095092772, \"Time in s\": 3704.110850000001 }, { \"step\": 19176, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9767926988265972, \"MicroF1\": 0.9767926988265972, \"MacroF1\": 0.976797459665624, \"Memory in Mb\": 13.463789939880373, \"Time in s\": 3906.654607 }, { \"step\": 19584, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9766123678700914, \"MicroF1\": 0.9766123678700914, \"MacroF1\": 0.97661532368473, \"Memory in Mb\": 12.60595417022705, \"Time in s\": 4116.138220000001 }, { \"step\": 19992, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9765894652593669, \"MicroF1\": 0.9765894652593669, \"MacroF1\": 0.976591825772772, \"Memory in Mb\": 11.445868492126465, \"Time in s\": 4332.655771000001 }, { \"step\": 20400, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9765184567870974, \"MicroF1\": 0.9765184567870974, \"MacroF1\": 0.9765167109502484, \"Memory in Mb\": 12.220311164855955, \"Time in s\": 4556.333966000001 }, { \"step\": 46, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.4888888888888889, \"MicroF1\": 0.4888888888888889, \"MacroF1\": 0.4138888888888889, \"Memory in Mb\": 0.8855724334716797, \"Time in s\": 0.380739 }, { \"step\": 92, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.6263736263736264, \"MicroF1\": 0.6263736263736264, \"MacroF1\": 0.6295417331131617, \"Memory in Mb\": 0.9400959014892578, \"Time in s\": 0.906366 }, { \"step\": 138, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.6788321167883211, \"MicroF1\": 0.6788321167883211, \"MacroF1\": 0.6955125455614023, \"Memory in Mb\": 0.9512205123901368, \"Time in s\": 1.596335 }, { \"step\": 184, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7158469945355191, \"MicroF1\": 0.7158469945355191, \"MacroF1\": 0.7293605295181818, \"Memory in Mb\": 0.9506902694702148, \"Time in s\": 2.451892 }, { \"step\": 230, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.74235807860262, \"MicroF1\": 0.74235807860262, \"MacroF1\": 0.7560849066334576, \"Memory in Mb\": 0.9507265090942384, \"Time in s\": 3.478975 }, { \"step\": 276, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7490909090909091, \"MicroF1\": 0.7490909090909091, \"MacroF1\": 0.7654899494294127, \"Memory in Mb\": 0.9522123336791992, \"Time in s\": 4.6524790000000005 }, { \"step\": 322, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7632398753894081, \"MicroF1\": 0.7632398753894081, \"MacroF1\": 0.7699967547900484, \"Memory in Mb\": 0.9522132873535156, \"Time in s\": 5.915859 }, { \"step\": 368, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.782016348773842, \"MicroF1\": 0.782016348773842, \"MacroF1\": 0.7847454642968661, \"Memory in Mb\": 0.9517135620117188, \"Time in s\": 7.268692 }, { \"step\": 414, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7869249394673123, \"MicroF1\": 0.7869249394673122, \"MacroF1\": 0.7891209865588749, \"Memory in Mb\": 0.952162742614746, \"Time in s\": 8.714221 }, { \"step\": 460, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7821350762527233, \"MicroF1\": 0.7821350762527233, \"MacroF1\": 0.7829889615631377, \"Memory in Mb\": 0.9522056579589844, \"Time in s\": 10.249346 }, { \"step\": 506, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7861386138613862, \"MicroF1\": 0.7861386138613862, \"MacroF1\": 0.7872755051739567, \"Memory in Mb\": 0.9517154693603516, \"Time in s\": 11.874447 }, { \"step\": 552, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7858439201451906, \"MicroF1\": 0.7858439201451906, \"MacroF1\": 0.7876565639439724, \"Memory in Mb\": 0.9515762329101562, \"Time in s\": 13.588949 }, { \"step\": 598, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7872696817420436, \"MicroF1\": 0.7872696817420435, \"MacroF1\": 0.7897468061485311, \"Memory in Mb\": 0.9521427154541016, \"Time in s\": 15.393404 }, { \"step\": 644, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7822706065318819, \"MicroF1\": 0.7822706065318819, \"MacroF1\": 0.7858452362125997, \"Memory in Mb\": 0.9521217346191406, \"Time in s\": 17.2908 }, { \"step\": 690, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7851959361393324, \"MicroF1\": 0.7851959361393324, \"MacroF1\": 0.788215888108031, \"Memory in Mb\": 0.9515953063964844, \"Time in s\": 19.280843 }, { \"step\": 736, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7836734693877551, \"MicroF1\": 0.783673469387755, \"MacroF1\": 0.7873581098337732, \"Memory in Mb\": 0.9521245956420898, \"Time in s\": 21.362069 }, { \"step\": 782, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7861715749039693, \"MicroF1\": 0.7861715749039692, \"MacroF1\": 0.7892834149474556, \"Memory in Mb\": 0.9521360397338868, \"Time in s\": 23.534270000000003 }, { \"step\": 828, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7847642079806529, \"MicroF1\": 0.7847642079806529, \"MacroF1\": 0.7891292080670234, \"Memory in Mb\": 0.951629638671875, \"Time in s\": 25.799776 }, { \"step\": 874, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7892325315005727, \"MicroF1\": 0.7892325315005727, \"MacroF1\": 0.7922023317831084, \"Memory in Mb\": 0.9516172409057616, \"Time in s\": 28.155901 }, { \"step\": 920, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7889009793253536, \"MicroF1\": 0.7889009793253536, \"MacroF1\": 0.7905862723276574, \"Memory in Mb\": 0.9520702362060548, \"Time in s\": 30.602138 }, { \"step\": 966, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.78860103626943, \"MicroF1\": 0.78860103626943, \"MacroF1\": 0.7894031693051725, \"Memory in Mb\": 0.952082633972168, \"Time in s\": 33.138731 }, { \"step\": 1012, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7873392680514342, \"MicroF1\": 0.7873392680514342, \"MacroF1\": 0.7878835011583499, \"Memory in Mb\": 0.9515609741210938, \"Time in s\": 35.768379 }, { \"step\": 1058, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7899716177861873, \"MicroF1\": 0.7899716177861873, \"MacroF1\": 0.7897146415510686, \"Memory in Mb\": 0.9520339965820312, \"Time in s\": 38.488246 }, { \"step\": 1104, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7905711695376246, \"MicroF1\": 0.7905711695376246, \"MacroF1\": 0.7902707663283154, \"Memory in Mb\": 0.9521427154541016, \"Time in s\": 41.298010000000005 }, { \"step\": 1150, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7919930374238469, \"MicroF1\": 0.7919930374238469, \"MacroF1\": 0.7910217164829003, \"Memory in Mb\": 0.9516496658325196, \"Time in s\": 44.198117 }, { \"step\": 1196, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.793305439330544, \"MicroF1\": 0.793305439330544, \"MacroF1\": 0.7926565595792737, \"Memory in Mb\": 0.9516582489013672, \"Time in s\": 47.188338 }, { \"step\": 1242, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7921031426269137, \"MicroF1\": 0.7921031426269137, \"MacroF1\": 0.791644431462719, \"Memory in Mb\": 0.9522132873535156, \"Time in s\": 50.271512 }, { \"step\": 1288, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7964257964257965, \"MicroF1\": 0.7964257964257965, \"MacroF1\": 0.7949172523959339, \"Memory in Mb\": 0.952223777770996, \"Time in s\": 53.444669000000005 }, { \"step\": 1334, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.795198799699925, \"MicroF1\": 0.7951987996999249, \"MacroF1\": 0.7938516970082157, \"Memory in Mb\": 0.9516925811767578, \"Time in s\": 56.707980000000006 }, { \"step\": 1380, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7955039883973894, \"MicroF1\": 0.7955039883973894, \"MacroF1\": 0.794312731896104, \"Memory in Mb\": 0.9516897201538086, \"Time in s\": 60.06121 }, { \"step\": 1426, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7971929824561403, \"MicroF1\": 0.7971929824561403, \"MacroF1\": 0.7952130436298935, \"Memory in Mb\": 0.9521360397338868, \"Time in s\": 63.507281000000006 }, { \"step\": 1472, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8008157715839564, \"MicroF1\": 0.8008157715839563, \"MacroF1\": 0.7971305683653547, \"Memory in Mb\": 0.9521236419677734, \"Time in s\": 67.043451 }, { \"step\": 1518, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8015820698747528, \"MicroF1\": 0.8015820698747528, \"MacroF1\": 0.7969787037511136, \"Memory in Mb\": 0.95166015625, \"Time in s\": 70.670162 }, { \"step\": 1564, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8016634676903391, \"MicroF1\": 0.8016634676903392, \"MacroF1\": 0.7975983332578384, \"Memory in Mb\": 0.9521465301513672, \"Time in s\": 74.387065 }, { \"step\": 1610, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8017402113113735, \"MicroF1\": 0.8017402113113735, \"MacroF1\": 0.7969541458804642, \"Memory in Mb\": 0.9521703720092772, \"Time in s\": 78.19725000000001 }, { \"step\": 1656, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8018126888217523, \"MicroF1\": 0.8018126888217523, \"MacroF1\": 0.7970318311622571, \"Memory in Mb\": 0.9516267776489258, \"Time in s\": 82.09764400000002 }, { \"step\": 1702, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8018812463256908, \"MicroF1\": 0.8018812463256908, \"MacroF1\": 0.7992301124377234, \"Memory in Mb\": 0.9516735076904296, \"Time in s\": 86.09102400000002 }, { \"step\": 1748, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8036634230108758, \"MicroF1\": 0.8036634230108759, \"MacroF1\": 0.8004815801809151, \"Memory in Mb\": 0.952157974243164, \"Time in s\": 90.17551900000002 }, { \"step\": 1794, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8042387060791969, \"MicroF1\": 0.8042387060791969, \"MacroF1\": 0.799787639242423, \"Memory in Mb\": 0.9521493911743164, \"Time in s\": 94.35067000000002 }, { \"step\": 1840, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8053289831430125, \"MicroF1\": 0.8053289831430125, \"MacroF1\": 0.8009597766649573, \"Memory in Mb\": 0.9516563415527344, \"Time in s\": 98.61893200000004 }, { \"step\": 1886, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.803183023872679, \"MicroF1\": 0.8031830238726789, \"MacroF1\": 0.799227837217116, \"Memory in Mb\": 0.9521894454956056, \"Time in s\": 102.97762100000004 }, { \"step\": 1932, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8032107716209218, \"MicroF1\": 0.8032107716209218, \"MacroF1\": 0.7985344176802335, \"Memory in Mb\": 0.9521827697753906, \"Time in s\": 107.42655600000003 }, { \"step\": 1978, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8042488619119879, \"MicroF1\": 0.8042488619119877, \"MacroF1\": 0.7992002826592023, \"Memory in Mb\": 0.9516563415527344, \"Time in s\": 111.96600400000004 }, { \"step\": 2024, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8057340583292141, \"MicroF1\": 0.8057340583292142, \"MacroF1\": 0.799488243695578, \"Memory in Mb\": 0.9516725540161132, \"Time in s\": 116.59842900000002 }, { \"step\": 2070, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.80521991300145, \"MicroF1\": 0.80521991300145, \"MacroF1\": 0.7990099218703556, \"Memory in Mb\": 0.9521942138671876, \"Time in s\": 121.31866500000002 }, { \"step\": 2116, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8056737588652483, \"MicroF1\": 0.8056737588652483, \"MacroF1\": 0.798658845250099, \"Memory in Mb\": 0.9521694183349608, \"Time in s\": 126.12612700000004 }, { \"step\": 2162, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8061082832022212, \"MicroF1\": 0.8061082832022212, \"MacroF1\": 0.7986518526284686, \"Memory in Mb\": 0.9516706466674804, \"Time in s\": 131.02082400000003 }, { \"step\": 2208, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8078840054372451, \"MicroF1\": 0.8078840054372451, \"MacroF1\": 0.7995103660963299, \"Memory in Mb\": 0.9521732330322266, \"Time in s\": 136.00538900000004 }, { \"step\": 2254, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8047048379937861, \"MicroF1\": 0.8047048379937861, \"MacroF1\": 0.7963417515999387, \"Memory in Mb\": 0.9521608352661132, \"Time in s\": 141.07727400000005 }, { \"step\": 2300, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8033927794693345, \"MicroF1\": 0.8033927794693345, \"MacroF1\": 0.7949752803158223, \"Memory in Mb\": 0.9516582489013672, \"Time in s\": 146.23634000000004 }, { \"step\": 1056, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Insects\", \"Accuracy\": 0.6293838862559241, \"MicroF1\": 0.6293838862559241, \"MacroF1\": 0.5939725193500994, \"Memory in Mb\": 1.5110340118408203, \"Time in s\": 3.200552 }, { \"step\": 2112, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Insects\", \"Accuracy\": 0.62482235907153, \"MicroF1\": 0.62482235907153, \"MacroF1\": 0.5894737350922559, \"Memory in Mb\": 1.5110177993774414, \"Time in s\": 9.230037 }, { \"step\": 3168, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Insects\", \"Accuracy\": 0.6198294916324597, \"MicroF1\": 0.6198294916324597, \"MacroF1\": 0.5838888884930272, \"Memory in Mb\": 1.5110721588134766, \"Time in s\": 17.854201 }, { \"step\": 4224, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Insects\", \"Accuracy\": 0.6192280369405636, \"MicroF1\": 0.6192280369405636, \"MacroF1\": 0.5835519631382228, \"Memory in Mb\": 1.5110435485839844, \"Time in s\": 28.950406 }, { \"step\": 5280, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Insects\", \"Accuracy\": 0.6256866830839174, \"MicroF1\": 0.6256866830839174, \"MacroF1\": 0.5887468172490868, \"Memory in Mb\": 1.511063575744629, \"Time in s\": 42.489771000000005 }, { \"step\": 6336, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Insects\", \"Accuracy\": 0.6187845303867403, \"MicroF1\": 0.6187845303867403, \"MacroF1\": 0.5833486573822239, \"Memory in Mb\": 1.5110454559326172, \"Time in s\": 58.469284 }, { \"step\": 7392, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Insects\", \"Accuracy\": 0.6180489784873495, \"MicroF1\": 0.6180489784873495, \"MacroF1\": 0.5826198728106428, \"Memory in Mb\": 1.5110721588134766, \"Time in s\": 76.881409 }, { \"step\": 8448, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Insects\", \"Accuracy\": 0.619746655617379, \"MicroF1\": 0.619746655617379, \"MacroF1\": 0.5840081546383048, \"Memory in Mb\": 1.5110502243041992, \"Time in s\": 97.728436 }, { \"step\": 9504, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Insects\", \"Accuracy\": 0.6190676628433126, \"MicroF1\": 0.6190676628433126, \"MacroF1\": 0.5828637425505069, \"Memory in Mb\": 1.511042594909668, \"Time in s\": 121.006042 }, { \"step\": 10560, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Insects\", \"Accuracy\": 0.6198503646178616, \"MicroF1\": 0.6198503646178616, \"MacroF1\": 0.5836946750940745, \"Memory in Mb\": 1.5110759735107422, \"Time in s\": 146.711441 }, { \"step\": 11616, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Insects\", \"Accuracy\": 0.6175634954799828, \"MicroF1\": 0.6175634954799828, \"MacroF1\": 0.5822534545682404, \"Memory in Mb\": 1.511033058166504, \"Time in s\": 174.85184600000002 }, { \"step\": 12672, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Insects\", \"Accuracy\": 0.6204719438086971, \"MicroF1\": 0.6204719438086971, \"MacroF1\": 0.5879866433279776, \"Memory in Mb\": 1.5111761093139648, \"Time in s\": 205.425851 }, { \"step\": 13728, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Insects\", \"Accuracy\": 0.6369199388067313, \"MicroF1\": 0.6369199388067313, \"MacroF1\": 0.618745437324273, \"Memory in Mb\": 1.5112380981445312, \"Time in s\": 238.425571 }, { \"step\": 14784, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Insects\", \"Accuracy\": 0.630386254481499, \"MicroF1\": 0.630386254481499, \"MacroF1\": 0.6115259179282228, \"Memory in Mb\": 1.5110998153686523, \"Time in s\": 273.85181700000004 }, { \"step\": 15840, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Insects\", \"Accuracy\": 0.5992171222930741, \"MicroF1\": 0.5992171222930741, \"MacroF1\": 0.581747071745844, \"Memory in Mb\": 1.5110797882080078, \"Time in s\": 311.71395800000005 }, { \"step\": 16896, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Insects\", \"Accuracy\": 0.5783959751405742, \"MicroF1\": 0.5783959751405742, \"MacroF1\": 0.5619501594422388, \"Memory in Mb\": 1.511063575744629, \"Time in s\": 352.00364300000007 }, { \"step\": 17952, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Insects\", \"Accuracy\": 0.5631998217369506, \"MicroF1\": 0.5631998217369506, \"MacroF1\": 0.5464708450044057, \"Memory in Mb\": 1.511117935180664, \"Time in s\": 394.7217830000001 }, { \"step\": 19008, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Insects\", \"Accuracy\": 0.565528489503867, \"MicroF1\": 0.565528489503867, \"MacroF1\": 0.5447789723081985, \"Memory in Mb\": 1.5110950469970703, \"Time in s\": 439.87157300000007 }, { \"step\": 20064, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Insects\", \"Accuracy\": 0.5725464785924338, \"MicroF1\": 0.5725464785924338, \"MacroF1\": 0.5493312346450109, \"Memory in Mb\": 2.16702938079834, \"Time in s\": 487.436087 }, { \"step\": 21120, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Insects\", \"Accuracy\": 0.5819404327856432, \"MicroF1\": 0.5819404327856432, \"MacroF1\": 0.5575973426297249, \"Memory in Mb\": 2.167789459228516, \"Time in s\": 537.344767 }, { \"step\": 22176, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Insects\", \"Accuracy\": 0.5905298759864712, \"MicroF1\": 0.5905298759864712, \"MacroF1\": 0.5648531785235197, \"Memory in Mb\": 2.167774200439453, \"Time in s\": 589.565521 }, { \"step\": 23232, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Insects\", \"Accuracy\": 0.5995867590719297, \"MicroF1\": 0.5995867590719297, \"MacroF1\": 0.5728007753824246, \"Memory in Mb\": 2.167778968811035, \"Time in s\": 644.107989 }, { \"step\": 24288, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Insects\", \"Accuracy\": 0.6068678717009099, \"MicroF1\": 0.6068678717009099, \"MacroF1\": 0.578555560305262, \"Memory in Mb\": 2.16780948638916, \"Time in s\": 700.966552 }, { \"step\": 25344, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Insects\", \"Accuracy\": 0.6143313735548278, \"MicroF1\": 0.6143313735548278, \"MacroF1\": 0.5848116898462843, \"Memory in Mb\": 2.167755126953125, \"Time in s\": 760.153761 }, { \"step\": 26400, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Insects\", \"Accuracy\": 0.621084131974696, \"MicroF1\": 0.621084131974696, \"MacroF1\": 0.5900605973096019, \"Memory in Mb\": 2.1677980422973637, \"Time in s\": 821.662998 }, { \"step\": 27456, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Insects\", \"Accuracy\": 0.6266618102349298, \"MicroF1\": 0.6266618102349298, \"MacroF1\": 0.5936647802901621, \"Memory in Mb\": 2.167790412902832, \"Time in s\": 885.506266 }, { \"step\": 28512, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Insects\", \"Accuracy\": 0.6295114166462067, \"MicroF1\": 0.6295114166462067, \"MacroF1\": 0.5991480792709615, \"Memory in Mb\": 2.168045997619629, \"Time in s\": 951.65113 }, { \"step\": 29568, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Insects\", \"Accuracy\": 0.6294517536442655, \"MicroF1\": 0.6294517536442655, \"MacroF1\": 0.6037001563215106, \"Memory in Mb\": 2.1680641174316406, \"Time in s\": 1020.249901 }, { \"step\": 30624, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Insects\", \"Accuracy\": 0.6287104463964993, \"MicroF1\": 0.6287104463964993, \"MacroF1\": 0.6068237930795873, \"Memory in Mb\": 2.168071746826172, \"Time in s\": 1091.329507 }, { \"step\": 31680, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Insects\", \"Accuracy\": 0.6292496606584804, \"MicroF1\": 0.6292496606584804, \"MacroF1\": 0.6106666463743293, \"Memory in Mb\": 2.1680679321289062, \"Time in s\": 1164.886578 }, { \"step\": 32736, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Insects\", \"Accuracy\": 0.6302734076676341, \"MicroF1\": 0.6302734076676341, \"MacroF1\": 0.614251388937007, \"Memory in Mb\": 2.168027877807617, \"Time in s\": 1240.9182979999998 }, { \"step\": 33792, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Insects\", \"Accuracy\": 0.6266165547039152, \"MicroF1\": 0.6266165547039152, \"MacroF1\": 0.6112639299818544, \"Memory in Mb\": 2.1678638458251958, \"Time in s\": 1319.4287269999998 }, { \"step\": 34848, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Insects\", \"Accuracy\": 0.6216604011823113, \"MicroF1\": 0.6216604011823113, \"MacroF1\": 0.6060150865308916, \"Memory in Mb\": 2.1678390502929688, \"Time in s\": 1400.4187529999997 }, { \"step\": 35904, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Insects\", \"Accuracy\": 0.6181377600757597, \"MicroF1\": 0.6181377600757597, \"MacroF1\": 0.6018714875673907, \"Memory in Mb\": 2.167888641357422, \"Time in s\": 1483.8935999999997 }, { \"step\": 36960, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Insects\", \"Accuracy\": 0.6138153088557591, \"MicroF1\": 0.6138153088557591, \"MacroF1\": 0.5971057932031453, \"Memory in Mb\": 2.167864799499512, \"Time in s\": 1569.8367669999996 }, { \"step\": 38016, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Insects\", \"Accuracy\": 0.6116796001578324, \"MicroF1\": 0.6116796001578324, \"MacroF1\": 0.5945381289951768, \"Memory in Mb\": 2.709075927734375, \"Time in s\": 1658.2515679999997 }, { \"step\": 39072, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Insects\", \"Accuracy\": 0.6122187811932124, \"MicroF1\": 0.6122187811932124, \"MacroF1\": 0.5950787740952911, \"Memory in Mb\": 2.823759078979492, \"Time in s\": 1749.1539719999994 }, { \"step\": 40128, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Insects\", \"Accuracy\": 0.6125052956861963, \"MicroF1\": 0.6125052956861963, \"MacroF1\": 0.5964110573184415, \"Memory in Mb\": 2.823785781860352, \"Time in s\": 1842.4635979999996 }, { \"step\": 41184, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Insects\", \"Accuracy\": 0.6145254109705461, \"MicroF1\": 0.6145254109705461, \"MacroF1\": 0.5992770713855892, \"Memory in Mb\": 2.82379150390625, \"Time in s\": 1938.0560169999997 }, { \"step\": 42240, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Insects\", \"Accuracy\": 0.6163024692819432, \"MicroF1\": 0.6163024692819432, \"MacroF1\": 0.601670854132613, \"Memory in Mb\": 2.823772430419922, \"Time in s\": 2035.9261369999997 }, { \"step\": 43296, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Insects\", \"Accuracy\": 0.6181776186626631, \"MicroF1\": 0.6181776186626631, \"MacroF1\": 0.6041281005310094, \"Memory in Mb\": 2.8237924575805664, \"Time in s\": 2136.072454 }, { \"step\": 44352, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Insects\", \"Accuracy\": 0.6197605465491195, \"MicroF1\": 0.6197605465491195, \"MacroF1\": 0.6062005996937425, \"Memory in Mb\": 2.824528694152832, \"Time in s\": 2238.51149 }, { \"step\": 45408, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Insects\", \"Accuracy\": 0.6214019864778558, \"MicroF1\": 0.6214019864778558, \"MacroF1\": 0.607792464273323, \"Memory in Mb\": 2.824528694152832, \"Time in s\": 2343.241606 }, { \"step\": 46464, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Insects\", \"Accuracy\": 0.6233992639304393, \"MicroF1\": 0.6233992639304393, \"MacroF1\": 0.6097993182820672, \"Memory in Mb\": 2.824531555175781, \"Time in s\": 2450.26566 }, { \"step\": 47520, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Insects\", \"Accuracy\": 0.6260864075422463, \"MicroF1\": 0.6260864075422463, \"MacroF1\": 0.6129939002712749, \"Memory in Mb\": 2.8244552612304688, \"Time in s\": 2559.63137 }, { \"step\": 48576, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Insects\", \"Accuracy\": 0.6300154400411735, \"MicroF1\": 0.6300154400411735, \"MacroF1\": 0.6173873766747581, \"Memory in Mb\": 2.824479103088379, \"Time in s\": 2671.379304 }, { \"step\": 49632, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Insects\", \"Accuracy\": 0.6343011424311418, \"MicroF1\": 0.6343011424311418, \"MacroF1\": 0.621931196280001, \"Memory in Mb\": 2.8244781494140625, \"Time in s\": 2785.498386 }, { \"step\": 50688, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Insects\", \"Accuracy\": 0.638506914988064, \"MicroF1\": 0.638506914988064, \"MacroF1\": 0.6263145143911814, \"Memory in Mb\": 2.947113037109375, \"Time in s\": 2902.0058780000004 }, { \"step\": 51744, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Insects\", \"Accuracy\": 0.6434686817540537, \"MicroF1\": 0.6434686817540537, \"MacroF1\": 0.6313977027921706, \"Memory in Mb\": 3.184900283813477, \"Time in s\": 3020.8771950000005 }, { \"step\": 52800, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Insects\", \"Accuracy\": 0.6479289380480691, \"MicroF1\": 0.6479289380480691, \"MacroF1\": 0.635943324049664, \"Memory in Mb\": 3.3886165618896484, \"Time in s\": 3141.9869480000007 }, { \"step\": 408, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9828009828009828, \"MicroF1\": 0.9828009828009828, \"MacroF1\": 0.6067632850241546, \"Memory in Mb\": 0.6423864364624023, \"Time in s\": 0.703596 }, { \"step\": 816, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9546012269938652, \"MicroF1\": 0.9546012269938652, \"MacroF1\": 0.7993954329623859, \"Memory in Mb\": 0.8351936340332031, \"Time in s\": 2.165771 }, { \"step\": 1224, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9206868356500408, \"MicroF1\": 0.9206868356500408, \"MacroF1\": 0.9055597826779512, \"Memory in Mb\": 1.029007911682129, \"Time in s\": 4.467328 }, { \"step\": 1632, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9307173513182097, \"MicroF1\": 0.9307173513182097, \"MacroF1\": 0.917259757091744, \"Memory in Mb\": 1.2232952117919922, \"Time in s\": 7.6892 }, { \"step\": 2040, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9303580186365864, \"MicroF1\": 0.9303580186365864, \"MacroF1\": 0.919916287137026, \"Memory in Mb\": 1.428065299987793, \"Time in s\": 11.917356 }, { \"step\": 2448, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9060073559460564, \"MicroF1\": 0.9060073559460564, \"MacroF1\": 0.9093956340782632, \"Memory in Mb\": 1.6218795776367188, \"Time in s\": 17.211204 }, { \"step\": 2856, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9103327495621716, \"MicroF1\": 0.9103327495621716, \"MacroF1\": 0.8980697688452707, \"Memory in Mb\": 1.8146867752075195, \"Time in s\": 23.617224 }, { \"step\": 3264, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.904382470119522, \"MicroF1\": 0.904382470119522, \"MacroF1\": 0.888202704220525, \"Memory in Mb\": 2.0085010528564453, \"Time in s\": 31.150342 }, { \"step\": 3672, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8994824298556252, \"MicroF1\": 0.8994824298556252, \"MacroF1\": 0.8972334256598172, \"Memory in Mb\": 2.2018117904663086, \"Time in s\": 39.868796 }, { \"step\": 4080, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8945820053934788, \"MicroF1\": 0.8945820053934787, \"MacroF1\": 0.8851783489415491, \"Memory in Mb\": 2.420787811279297, \"Time in s\": 49.793690000000005 }, { \"step\": 4488, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8914642299977713, \"MicroF1\": 0.8914642299977713, \"MacroF1\": 0.898372373723482, \"Memory in Mb\": 2.6146020889282227, \"Time in s\": 61.011475 }, { \"step\": 4896, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8880490296220633, \"MicroF1\": 0.8880490296220633, \"MacroF1\": 0.8932697641963906, \"Memory in Mb\": 2.807912826538086, \"Time in s\": 73.572163 }, { \"step\": 5304, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.883085046200264, \"MicroF1\": 0.883085046200264, \"MacroF1\": 0.8680917053752625, \"Memory in Mb\": 3.000770568847656, \"Time in s\": 87.52603 }, { \"step\": 5712, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8746279110488531, \"MicroF1\": 0.8746279110488531, \"MacroF1\": 0.8792177397015432, \"Memory in Mb\": 3.194584846496582, \"Time in s\": 102.922365 }, { \"step\": 6120, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8695865337473443, \"MicroF1\": 0.8695865337473442, \"MacroF1\": 0.8546904737358852, \"Memory in Mb\": 3.387392044067383, \"Time in s\": 119.819987 }, { \"step\": 6528, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8579745671824728, \"MicroF1\": 0.8579745671824728, \"MacroF1\": 0.858067415232278, \"Memory in Mb\": 3.5812063217163086, \"Time in s\": 138.28125400000002 }, { \"step\": 6936, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8537851478010093, \"MicroF1\": 0.8537851478010093, \"MacroF1\": 0.8590096923865055, \"Memory in Mb\": 3.774517059326172, \"Time in s\": 158.37518500000002 }, { \"step\": 7344, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8594579871986926, \"MicroF1\": 0.8594579871986926, \"MacroF1\": 0.8620220702364139, \"Memory in Mb\": 3.9702539443969727, \"Time in s\": 180.164436 }, { \"step\": 7752, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8593729841310799, \"MicroF1\": 0.8593729841310799, \"MacroF1\": 0.8617576440335053, \"Memory in Mb\": 4.164068222045898, \"Time in s\": 203.71390800000003 }, { \"step\": 8160, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8601544306900355, \"MicroF1\": 0.8601544306900355, \"MacroF1\": 0.8605355806611993, \"Memory in Mb\": 4.357378959655762, \"Time in s\": 229.09093700000005 }, { \"step\": 8568, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8596941753239173, \"MicroF1\": 0.8596941753239173, \"MacroF1\": 0.8627767842417701, \"Memory in Mb\": 4.60028076171875, \"Time in s\": 256.36638600000003 }, { \"step\": 8976, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8599442896935933, \"MicroF1\": 0.8599442896935933, \"MacroF1\": 0.8629838037923419, \"Memory in Mb\": 4.794095039367676, \"Time in s\": 285.60935500000005 }, { \"step\": 9384, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8581477139507621, \"MicroF1\": 0.8581477139507621, \"MacroF1\": 0.8592031021693959, \"Memory in Mb\": 4.986902236938477, \"Time in s\": 316.88668100000007 }, { \"step\": 9792, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8539475028087019, \"MicroF1\": 0.8539475028087019, \"MacroF1\": 0.8546213426549989, \"Memory in Mb\": 5.180716514587402, \"Time in s\": 350.2631590000001 }, { \"step\": 10200, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8465535836846749, \"MicroF1\": 0.8465535836846749, \"MacroF1\": 0.8431270001478435, \"Memory in Mb\": 5.374000549316406, \"Time in s\": 385.8123490000001 }, { \"step\": 10608, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8300179126991609, \"MicroF1\": 0.8300179126991609, \"MacroF1\": 0.8240754775818138, \"Memory in Mb\": 5.566834449768066, \"Time in s\": 423.5940000000001 }, { \"step\": 11016, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8254198819791194, \"MicroF1\": 0.8254198819791194, \"MacroF1\": 0.8271925616445298, \"Memory in Mb\": 5.760648727416992, \"Time in s\": 463.6719130000001 }, { \"step\": 11424, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.820449969360063, \"MicroF1\": 0.820449969360063, \"MacroF1\": 0.8166393841205931, \"Memory in Mb\": 5.953959465026856, \"Time in s\": 506.1140620000001 }, { \"step\": 11832, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8169216465218494, \"MicroF1\": 0.8169216465218494, \"MacroF1\": 0.8172029683603622, \"Memory in Mb\": 6.146766662597656, \"Time in s\": 550.9879090000001 }, { \"step\": 12240, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8200016341204347, \"MicroF1\": 0.8200016341204347, \"MacroF1\": 0.8225884010623591, \"Memory in Mb\": 6.340580940246582, \"Time in s\": 598.362302 }, { \"step\": 12648, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8167154265833795, \"MicroF1\": 0.8167154265833795, \"MacroF1\": 0.8162987105601626, \"Memory in Mb\": 6.533388137817383, \"Time in s\": 648.30112 }, { \"step\": 13056, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8121792416698583, \"MicroF1\": 0.8121792416698584, \"MacroF1\": 0.8136075732214813, \"Memory in Mb\": 6.727202415466309, \"Time in s\": 700.874955 }, { \"step\": 13464, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8099234940206492, \"MicroF1\": 0.8099234940206492, \"MacroF1\": 0.8122480630127521, \"Memory in Mb\": 6.920539855957031, \"Time in s\": 756.151971 }, { \"step\": 13872, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.810539975488429, \"MicroF1\": 0.810539975488429, \"MacroF1\": 0.8134726777385565, \"Memory in Mb\": 7.113347053527832, \"Time in s\": 814.194174 }, { \"step\": 14280, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8103508649065061, \"MicroF1\": 0.810350864906506, \"MacroF1\": 0.8130549704062812, \"Memory in Mb\": 7.307161331176758, \"Time in s\": 875.070018 }, { \"step\": 14688, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8133042826989855, \"MicroF1\": 0.8133042826989855, \"MacroF1\": 0.8168484225511677, \"Memory in Mb\": 7.500472068786621, \"Time in s\": 938.856287 }, { \"step\": 15096, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8174229877442862, \"MicroF1\": 0.8174229877442862, \"MacroF1\": 0.8208616131428813, \"Memory in Mb\": 7.693279266357422, \"Time in s\": 1005.608239 }, { \"step\": 15504, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8175191898342257, \"MicroF1\": 0.8175191898342257, \"MacroF1\": 0.8200404227627133, \"Memory in Mb\": 7.887093544006348, \"Time in s\": 1075.3914499999998 }, { \"step\": 15912, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8100685060649865, \"MicroF1\": 0.8100685060649865, \"MacroF1\": 0.8105704783549956, \"Memory in Mb\": 8.079900741577148, \"Time in s\": 1148.2720969999998 }, { \"step\": 16320, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8058704577486365, \"MicroF1\": 0.8058704577486365, \"MacroF1\": 0.8082920647955453, \"Memory in Mb\": 8.273715019226074, \"Time in s\": 1224.3182599999998 }, { \"step\": 16728, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8029533090213428, \"MicroF1\": 0.8029533090213428, \"MacroF1\": 0.8061756731743527, \"Memory in Mb\": 8.467025756835938, \"Time in s\": 1303.603477 }, { \"step\": 17136, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7992996790195507, \"MicroF1\": 0.7992996790195507, \"MacroF1\": 0.8021910628966759, \"Memory in Mb\": 8.7622652053833, \"Time in s\": 1386.189622 }, { \"step\": 17544, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7934218776720059, \"MicroF1\": 0.7934218776720059, \"MacroF1\": 0.7969041071406875, \"Memory in Mb\": 8.956079483032227, \"Time in s\": 1472.1458429999998 }, { \"step\": 17952, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7934933986964514, \"MicroF1\": 0.7934933986964514, \"MacroF1\": 0.7978100866424277, \"Memory in Mb\": 9.14939022064209, \"Time in s\": 1561.5342959999998 }, { \"step\": 18360, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7969933002886868, \"MicroF1\": 0.7969933002886866, \"MacroF1\": 0.8014382450066739, \"Memory in Mb\": 9.34219741821289, \"Time in s\": 1654.4248979999998 }, { \"step\": 18768, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7999147439654714, \"MicroF1\": 0.7999147439654714, \"MacroF1\": 0.8043799341405246, \"Memory in Mb\": 9.536011695861816, \"Time in s\": 1750.8812689999995 }, { \"step\": 19176, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7945241199478488, \"MicroF1\": 0.7945241199478488, \"MacroF1\": 0.7987282715896407, \"Memory in Mb\": 9.728818893432615, \"Time in s\": 1850.973451 }, { \"step\": 19584, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.797375274472757, \"MicroF1\": 0.797375274472757, \"MacroF1\": 0.8021140041360401, \"Memory in Mb\": 9.922633171081545, \"Time in s\": 1954.769568 }, { \"step\": 19992, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7945075283877745, \"MicroF1\": 0.7945075283877745, \"MacroF1\": 0.7995475233856788, \"Memory in Mb\": 10.115943908691406, \"Time in s\": 2062.333925 }, { \"step\": 20400, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.793274180106868, \"MicroF1\": 0.793274180106868, \"MacroF1\": 0.7984237858213096, \"Memory in Mb\": 10.308751106262209, \"Time in s\": 2173.749777 }, { \"step\": 46, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.1777777777777777, \"MicroF1\": 0.1777777777777777, \"MacroF1\": 0.1526026604973973, \"Memory in Mb\": 0.0013666152954101, \"Time in s\": 0.007048 }, { \"step\": 92, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.1318681318681318, \"MicroF1\": 0.1318681318681318, \"MacroF1\": 0.1213108980966124, \"Memory in Mb\": 0.0013637542724609, \"Time in s\": 0.018168 }, { \"step\": 138, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.1240875912408759, \"MicroF1\": 0.1240875912408759, \"MacroF1\": 0.1187445506554449, \"Memory in Mb\": 0.0013694763183593, \"Time in s\": 0.031716 }, { \"step\": 184, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.1256830601092896, \"MicroF1\": 0.1256830601092896, \"MacroF1\": 0.1226298342307158, \"Memory in Mb\": 0.0013647079467773, \"Time in s\": 0.047654 }, { \"step\": 230, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.1266375545851528, \"MicroF1\": 0.1266375545851528, \"MacroF1\": 0.1250385204120806, \"Memory in Mb\": 0.0013637542724609, \"Time in s\": 0.065983 }, { \"step\": 276, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.1272727272727272, \"MicroF1\": 0.1272727272727272, \"MacroF1\": 0.1242790791814499, \"Memory in Mb\": 0.0013666152954101, \"Time in s\": 0.086242 }, { \"step\": 322, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.1339563862928348, \"MicroF1\": 0.1339563862928348, \"MacroF1\": 0.1321003659624602, \"Memory in Mb\": 0.0013666152954101, \"Time in s\": 0.108232 }, { \"step\": 368, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.1389645776566757, \"MicroF1\": 0.1389645776566757, \"MacroF1\": 0.1374501146297296, \"Memory in Mb\": 0.0013675689697265, \"Time in s\": 0.131958 }, { \"step\": 414, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.1404358353510895, \"MicroF1\": 0.1404358353510895, \"MacroF1\": 0.1403581309694754, \"Memory in Mb\": 0.0013666152954101, \"Time in s\": 0.1574209999999999 }, { \"step\": 460, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.1459694989106753, \"MicroF1\": 0.1459694989106753, \"MacroF1\": 0.1456314871072794, \"Memory in Mb\": 0.0013656616210937, \"Time in s\": 0.1845859999999999 }, { \"step\": 506, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.1386138613861386, \"MicroF1\": 0.1386138613861386, \"MacroF1\": 0.1383381610231494, \"Memory in Mb\": 0.0013666152954101, \"Time in s\": 0.2134849999999999 }, { \"step\": 552, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.1397459165154265, \"MicroF1\": 0.1397459165154265, \"MacroF1\": 0.1393865249177789, \"Memory in Mb\": 0.0013666152954101, \"Time in s\": 0.24411 }, { \"step\": 598, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.1373534338358459, \"MicroF1\": 0.1373534338358459, \"MacroF1\": 0.1372798104345861, \"Memory in Mb\": 0.0013675689697265, \"Time in s\": 0.276463 }, { \"step\": 644, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.1399688958009331, \"MicroF1\": 0.1399688958009331, \"MacroF1\": 0.1401757170901796, \"Memory in Mb\": 0.0013666152954101, \"Time in s\": 0.310533 }, { \"step\": 690, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.1378809869375907, \"MicroF1\": 0.1378809869375907, \"MacroF1\": 0.1380151778455332, \"Memory in Mb\": 0.0013694763183593, \"Time in s\": 0.346313 }, { \"step\": 736, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.1401360544217687, \"MicroF1\": 0.1401360544217687, \"MacroF1\": 0.1403108892795828, \"Memory in Mb\": 0.0013675689697265, \"Time in s\": 0.38382 }, { \"step\": 782, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.1421254801536491, \"MicroF1\": 0.1421254801536491, \"MacroF1\": 0.1420930265541123, \"Memory in Mb\": 0.0013647079467773, \"Time in s\": 0.423095 }, { \"step\": 828, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.1426844014510278, \"MicroF1\": 0.1426844014510278, \"MacroF1\": 0.1422987455304691, \"Memory in Mb\": 0.0013666152954101, \"Time in s\": 0.464082 }, { \"step\": 874, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.138602520045819, \"MicroF1\": 0.138602520045819, \"MacroF1\": 0.1384535269459527, \"Memory in Mb\": 0.0013647079467773, \"Time in s\": 0.506788 }, { \"step\": 920, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.1349292709466811, \"MicroF1\": 0.1349292709466811, \"MacroF1\": 0.1348083913046733, \"Memory in Mb\": 0.0013666152954101, \"Time in s\": 0.551195 }, { \"step\": 966, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.1336787564766839, \"MicroF1\": 0.1336787564766839, \"MacroF1\": 0.1334917777444527, \"Memory in Mb\": 0.0013637542724609, \"Time in s\": 0.597302 }, { \"step\": 1012, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.1325420375865479, \"MicroF1\": 0.1325420375865479, \"MacroF1\": 0.1324936677659038, \"Memory in Mb\": 0.0013675689697265, \"Time in s\": 0.645131 }, { \"step\": 1058, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.1333964049195837, \"MicroF1\": 0.1333964049195837, \"MacroF1\": 0.1331834965440007, \"Memory in Mb\": 0.0013656616210937, \"Time in s\": 0.69466 }, { \"step\": 1104, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.1341795104261106, \"MicroF1\": 0.1341795104261106, \"MacroF1\": 0.1340282652950153, \"Memory in Mb\": 0.0013666152954101, \"Time in s\": 0.7459020000000001 }, { \"step\": 1150, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.134029590948651, \"MicroF1\": 0.134029590948651, \"MacroF1\": 0.1340639115051912, \"Memory in Mb\": 0.0013637542724609, \"Time in s\": 0.7988440000000001 }, { \"step\": 1196, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.1364016736401673, \"MicroF1\": 0.1364016736401673, \"MacroF1\": 0.1363948420172951, \"Memory in Mb\": 0.0013694763183593, \"Time in s\": 0.8534870000000001 }, { \"step\": 1242, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.1394037066881547, \"MicroF1\": 0.1394037066881547, \"MacroF1\": 0.1391977238389222, \"Memory in Mb\": 0.0013637542724609, \"Time in s\": 0.909824 }, { \"step\": 1288, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.1414141414141414, \"MicroF1\": 0.1414141414141414, \"MacroF1\": 0.1411871502321015, \"Memory in Mb\": 0.0013666152954101, \"Time in s\": 0.967868 }, { \"step\": 1334, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.1432858214553638, \"MicroF1\": 0.1432858214553638, \"MacroF1\": 0.1430255327815666, \"Memory in Mb\": 0.0013637542724609, \"Time in s\": 1.027625 }, { \"step\": 1380, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.1435823060188542, \"MicroF1\": 0.1435823060188542, \"MacroF1\": 0.1433209000486506, \"Memory in Mb\": 0.0013694763183593, \"Time in s\": 1.089079 }, { \"step\": 1426, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.1417543859649122, \"MicroF1\": 0.1417543859649122, \"MacroF1\": 0.1414546655929112, \"Memory in Mb\": 0.0013694763183593, \"Time in s\": 1.152253 }, { \"step\": 1472, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.1393609789259007, \"MicroF1\": 0.1393609789259007, \"MacroF1\": 0.1390762971394262, \"Memory in Mb\": 0.0013647079467773, \"Time in s\": 1.217139 }, { \"step\": 1518, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.1397495056031641, \"MicroF1\": 0.1397495056031641, \"MacroF1\": 0.1395136668589845, \"Memory in Mb\": 0.0013666152954101, \"Time in s\": 1.283725 }, { \"step\": 1564, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.1369161868202175, \"MicroF1\": 0.1369161868202175, \"MacroF1\": 0.1366417047439511, \"Memory in Mb\": 0.0013666152954101, \"Time in s\": 1.352073 }, { \"step\": 1610, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.1361093847110006, \"MicroF1\": 0.1361093847110006, \"MacroF1\": 0.1359768388190307, \"Memory in Mb\": 0.0013637542724609, \"Time in s\": 1.422125 }, { \"step\": 1656, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.1365558912386707, \"MicroF1\": 0.1365558912386707, \"MacroF1\": 0.1363322462377459, \"Memory in Mb\": 0.0013694763183593, \"Time in s\": 1.493896 }, { \"step\": 1702, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.1393298059964726, \"MicroF1\": 0.1393298059964726, \"MacroF1\": 0.1390129627439909, \"Memory in Mb\": 0.0013675689697265, \"Time in s\": 1.5673830000000002 }, { \"step\": 1748, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.1419576416714367, \"MicroF1\": 0.1419576416714367, \"MacroF1\": 0.1414719731272364, \"Memory in Mb\": 0.0013656616210937, \"Time in s\": 1.6425530000000002 }, { \"step\": 1794, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.1422197434467373, \"MicroF1\": 0.1422197434467373, \"MacroF1\": 0.1419410396611007, \"Memory in Mb\": 0.0013647079467773, \"Time in s\": 1.7194410000000002 }, { \"step\": 1840, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.1413811854268624, \"MicroF1\": 0.1413811854268624, \"MacroF1\": 0.1411432976659866, \"Memory in Mb\": 0.0013675689697265, \"Time in s\": 1.7980130000000003 }, { \"step\": 1886, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.140053050397878, \"MicroF1\": 0.140053050397878, \"MacroF1\": 0.1397325871382075, \"Memory in Mb\": 0.0013666152954101, \"Time in s\": 1.8782870000000005 }, { \"step\": 1932, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.1429311237700673, \"MicroF1\": 0.1429311237700673, \"MacroF1\": 0.1427522922982585, \"Memory in Mb\": 0.0013666152954101, \"Time in s\": 1.960245 }, { \"step\": 1978, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.1461810824481537, \"MicroF1\": 0.1461810824481537, \"MacroF1\": 0.1459715815160596, \"Memory in Mb\": 0.0013694763183593, \"Time in s\": 2.043928 }, { \"step\": 2024, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.1443400889767671, \"MicroF1\": 0.1443400889767671, \"MacroF1\": 0.1441662523776106, \"Memory in Mb\": 0.0013694763183593, \"Time in s\": 2.12929 }, { \"step\": 2070, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.1440309328177863, \"MicroF1\": 0.1440309328177863, \"MacroF1\": 0.1438554349712762, \"Memory in Mb\": 0.0013666152954101, \"Time in s\": 2.216361 }, { \"step\": 2116, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.1446808510638297, \"MicroF1\": 0.1446808510638297, \"MacroF1\": 0.1446036231777657, \"Memory in Mb\": 0.0013637542724609, \"Time in s\": 2.305147 }, { \"step\": 2162, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.1453031004164738, \"MicroF1\": 0.1453031004164738, \"MacroF1\": 0.1452046591382179, \"Memory in Mb\": 0.0013694763183593, \"Time in s\": 2.395629 }, { \"step\": 2208, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.1449932034435885, \"MicroF1\": 0.1449932034435885, \"MacroF1\": 0.1449110985199169, \"Memory in Mb\": 0.0013694763183593, \"Time in s\": 2.487817 }, { \"step\": 2254, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.1464713715046604, \"MicroF1\": 0.1464713715046604, \"MacroF1\": 0.146404255341296, \"Memory in Mb\": 0.0013666152954101, \"Time in s\": 2.5817110000000003 }, { \"step\": 2300, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.1478903871248368, \"MicroF1\": 0.1478903871248368, \"MacroF1\": 0.1478868852481029, \"Memory in Mb\": 0.0013675689697265, \"Time in s\": 2.6773210000000005 }, { \"step\": 1056, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Insects\", \"Accuracy\": 0.1582938388625592, \"MicroF1\": 0.1582938388625592, \"MacroF1\": 0.1376212379233521, \"Memory in Mb\": 0.0013856887817382, \"Time in s\": 0.055672 }, { \"step\": 2112, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Insects\", \"Accuracy\": 0.1657981999052581, \"MicroF1\": 0.1657981999052581, \"MacroF1\": 0.1511045106411843, \"Memory in Mb\": 0.0013856887817382, \"Time in s\": 0.157206 }, { \"step\": 3168, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Insects\", \"Accuracy\": 0.1701926113040732, \"MicroF1\": 0.1701926113040732, \"MacroF1\": 0.1568151235503963, \"Memory in Mb\": 0.0013885498046875, \"Time in s\": 0.304619 }, { \"step\": 4224, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Insects\", \"Accuracy\": 0.1659957376272791, \"MicroF1\": 0.1659957376272791, \"MacroF1\": 0.1525443315605067, \"Memory in Mb\": 0.0013856887817382, \"Time in s\": 0.4978269999999999 }, { \"step\": 5280, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Insects\", \"Accuracy\": 0.1708656942602765, \"MicroF1\": 0.1708656942602765, \"MacroF1\": 0.1567667911399359, \"Memory in Mb\": 0.0013837814331054, \"Time in s\": 0.736912 }, { \"step\": 6336, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Insects\", \"Accuracy\": 0.1737963693764798, \"MicroF1\": 0.1737963693764798, \"MacroF1\": 0.1613756819597299, \"Memory in Mb\": 0.0013837814331054, \"Time in s\": 1.021646 }, { \"step\": 7392, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Insects\", \"Accuracy\": 0.1752130970098769, \"MicroF1\": 0.1752130970098769, \"MacroF1\": 0.1618940790413477, \"Memory in Mb\": 0.0013837814331054, \"Time in s\": 1.351897 }, { \"step\": 8448, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Insects\", \"Accuracy\": 0.1772226826092103, \"MicroF1\": 0.1772226826092103, \"MacroF1\": 0.163740045170864, \"Memory in Mb\": 0.0013818740844726, \"Time in s\": 1.7276090000000002 }, { \"step\": 9504, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Insects\", \"Accuracy\": 0.1773124276544249, \"MicroF1\": 0.1773124276544249, \"MacroF1\": 0.1637492974453095, \"Memory in Mb\": 0.0013885498046875, \"Time in s\": 2.148941 }, { \"step\": 10560, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Insects\", \"Accuracy\": 0.1790889288758405, \"MicroF1\": 0.1790889288758405, \"MacroF1\": 0.1656421076747495, \"Memory in Mb\": 0.0013837814331054, \"Time in s\": 2.615755 }, { \"step\": 11616, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Insects\", \"Accuracy\": 0.1789926818768833, \"MicroF1\": 0.1789926818768833, \"MacroF1\": 0.1655925383533761, \"Memory in Mb\": 0.0013856887817382, \"Time in s\": 3.128148 }, { \"step\": 12672, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Insects\", \"Accuracy\": 0.1853050272275274, \"MicroF1\": 0.1853050272275274, \"MacroF1\": 0.182698099884098, \"Memory in Mb\": 0.0013866424560546, \"Time in s\": 3.685883 }, { \"step\": 13728, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Insects\", \"Accuracy\": 0.2479784366576819, \"MicroF1\": 0.2479784366576819, \"MacroF1\": 0.266039368455288, \"Memory in Mb\": 0.0013866424560546, \"Time in s\": 4.288806 }, { \"step\": 14784, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Insects\", \"Accuracy\": 0.2795778935263478, \"MicroF1\": 0.2795778935263478, \"MacroF1\": 0.2822974275171512, \"Memory in Mb\": 0.0013818740844726, \"Time in s\": 4.937051 }, { \"step\": 15840, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Insects\", \"Accuracy\": 0.2761537975882315, \"MicroF1\": 0.2761537975882315, \"MacroF1\": 0.2847375853365436, \"Memory in Mb\": 0.0013818740844726, \"Time in s\": 5.631085000000001 }, { \"step\": 16896, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Insects\", \"Accuracy\": 0.2723290914471737, \"MicroF1\": 0.2723290914471737, \"MacroF1\": 0.2859139704285301, \"Memory in Mb\": 0.0013856887817382, \"Time in s\": 6.370871000000001 }, { \"step\": 17952, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Insects\", \"Accuracy\": 0.2720739791655061, \"MicroF1\": 0.2720739791655061, \"MacroF1\": 0.2880143206503877, \"Memory in Mb\": 0.0013866424560546, \"Time in s\": 7.156724000000001 }, { \"step\": 19008, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Insects\", \"Accuracy\": 0.2825274898721523, \"MicroF1\": 0.2825274898721523, \"MacroF1\": 0.2877504429321086, \"Memory in Mb\": 0.0013866424560546, \"Time in s\": 7.988744000000001 }, { \"step\": 20064, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Insects\", \"Accuracy\": 0.2872451776902756, \"MicroF1\": 0.2872451776902756, \"MacroF1\": 0.2866739236661926, \"Memory in Mb\": 0.0013818740844726, \"Time in s\": 8.866412000000002 }, { \"step\": 21120, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Insects\", \"Accuracy\": 0.2830626450116009, \"MicroF1\": 0.2830626450116009, \"MacroF1\": 0.2816476602425525, \"Memory in Mb\": 0.0013837814331054, \"Time in s\": 9.789517000000002 }, { \"step\": 22176, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Insects\", \"Accuracy\": 0.2805411499436302, \"MicroF1\": 0.2805411499436302, \"MacroF1\": 0.2786296072528009, \"Memory in Mb\": 0.0013866424560546, \"Time in s\": 10.758806000000002 }, { \"step\": 23232, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Insects\", \"Accuracy\": 0.2797124531875511, \"MicroF1\": 0.2797124531875511, \"MacroF1\": 0.2771941975793341, \"Memory in Mb\": 0.0013856887817382, \"Time in s\": 11.774485000000002 }, { \"step\": 24288, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Insects\", \"Accuracy\": 0.2777205912628155, \"MicroF1\": 0.2777205912628155, \"MacroF1\": 0.2745878480946635, \"Memory in Mb\": 0.0013866424560546, \"Time in s\": 12.836246000000004 }, { \"step\": 25344, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Insects\", \"Accuracy\": 0.2756579726157124, \"MicroF1\": 0.2756579726157124, \"MacroF1\": 0.2723380305202896, \"Memory in Mb\": 0.0013818740844726, \"Time in s\": 13.944171000000004 }, { \"step\": 26400, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Insects\", \"Accuracy\": 0.2739497708246524, \"MicroF1\": 0.2739497708246524, \"MacroF1\": 0.2699690442569991, \"Memory in Mb\": 0.0013837814331054, \"Time in s\": 15.098386000000003 }, { \"step\": 27456, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Insects\", \"Accuracy\": 0.2718994718630486, \"MicroF1\": 0.2718994718630486, \"MacroF1\": 0.2671948532388624, \"Memory in Mb\": 0.0013866424560546, \"Time in s\": 16.299309000000004 }, { \"step\": 28512, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Insects\", \"Accuracy\": 0.2723860965942969, \"MicroF1\": 0.2723860965942969, \"MacroF1\": 0.2686965366571338, \"Memory in Mb\": 0.0013885498046875, \"Time in s\": 17.546694000000006 }, { \"step\": 29568, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Insects\", \"Accuracy\": 0.2738187844556431, \"MicroF1\": 0.2738187844556431, \"MacroF1\": 0.2720266804437783, \"Memory in Mb\": 0.0013885498046875, \"Time in s\": 18.840271000000005 }, { \"step\": 30624, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Insects\", \"Accuracy\": 0.2753812493877151, \"MicroF1\": 0.2753812493877151, \"MacroF1\": 0.2748698663810352, \"Memory in Mb\": 0.0013885498046875, \"Time in s\": 20.179938000000003 }, { \"step\": 31680, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Insects\", \"Accuracy\": 0.2780390795163989, \"MicroF1\": 0.2780390795163989, \"MacroF1\": 0.2784141751235631, \"Memory in Mb\": 0.0013856887817382, \"Time in s\": 21.565237000000003 }, { \"step\": 32736, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Insects\", \"Accuracy\": 0.279670077898274, \"MicroF1\": 0.279670077898274, \"MacroF1\": 0.2802192251245276, \"Memory in Mb\": 0.0013837814331054, \"Time in s\": 22.996618000000005 }, { \"step\": 33792, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Insects\", \"Accuracy\": 0.2808440117190968, \"MicroF1\": 0.2808440117190968, \"MacroF1\": 0.2811962745371706, \"Memory in Mb\": 0.0013856887817382, \"Time in s\": 24.474295000000005 }, { \"step\": 34848, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Insects\", \"Accuracy\": 0.2772405085086234, \"MicroF1\": 0.2772405085086234, \"MacroF1\": 0.2781905182864757, \"Memory in Mb\": 0.0013837814331054, \"Time in s\": 25.998543000000005 }, { \"step\": 35904, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Insects\", \"Accuracy\": 0.2739325404562293, \"MicroF1\": 0.2739325404562293, \"MacroF1\": 0.2754200456137155, \"Memory in Mb\": 0.0013856887817382, \"Time in s\": 27.569042000000007 }, { \"step\": 36960, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Insects\", \"Accuracy\": 0.271246516410076, \"MicroF1\": 0.271246516410076, \"MacroF1\": 0.273332837678202, \"Memory in Mb\": 0.0013818740844726, \"Time in s\": 29.18542500000001 }, { \"step\": 38016, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Insects\", \"Accuracy\": 0.2685518874128633, \"MicroF1\": 0.2685518874128633, \"MacroF1\": 0.2710722002891223, \"Memory in Mb\": 0.0013856887817382, \"Time in s\": 30.847968000000005 }, { \"step\": 39072, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Insects\", \"Accuracy\": 0.277034117376059, \"MicroF1\": 0.277034117376059, \"MacroF1\": 0.2770619820799866, \"Memory in Mb\": 0.0013866424560546, \"Time in s\": 32.556678000000005 }, { \"step\": 40128, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Insects\", \"Accuracy\": 0.2761731502479627, \"MicroF1\": 0.2761731502479627, \"MacroF1\": 0.2760769006623073, \"Memory in Mb\": 0.0013837814331054, \"Time in s\": 34.31145000000001 }, { \"step\": 41184, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Insects\", \"Accuracy\": 0.2756720005827647, \"MicroF1\": 0.2756720005827647, \"MacroF1\": 0.2754352632972117, \"Memory in Mb\": 0.0013837814331054, \"Time in s\": 36.11334600000001 }, { \"step\": 42240, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Insects\", \"Accuracy\": 0.2740121688486943, \"MicroF1\": 0.2740121688486943, \"MacroF1\": 0.2735946193588543, \"Memory in Mb\": 0.0013885498046875, \"Time in s\": 37.96222800000001 }, { \"step\": 43296, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Insects\", \"Accuracy\": 0.2738422450629403, \"MicroF1\": 0.2738422450629403, \"MacroF1\": 0.2731948869083578, \"Memory in Mb\": 0.0013856887817382, \"Time in s\": 39.85759000000001 }, { \"step\": 44352, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Insects\", \"Accuracy\": 0.2729588960790061, \"MicroF1\": 0.2729588960790061, \"MacroF1\": 0.2720911653869048, \"Memory in Mb\": 0.0013866424560546, \"Time in s\": 41.79925400000001 }, { \"step\": 45408, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Insects\", \"Accuracy\": 0.2720505648908758, \"MicroF1\": 0.2720505648908758, \"MacroF1\": 0.2708084959373003, \"Memory in Mb\": 0.0013866424560546, \"Time in s\": 43.79182700000001 }, { \"step\": 46464, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Insects\", \"Accuracy\": 0.271377224888621, \"MicroF1\": 0.271377224888621, \"MacroF1\": 0.2698631410415436, \"Memory in Mb\": 0.0013837814331054, \"Time in s\": 45.834688000000014 }, { \"step\": 47520, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Insects\", \"Accuracy\": 0.2723542162082535, \"MicroF1\": 0.2723542162082535, \"MacroF1\": 0.2717062798322285, \"Memory in Mb\": 0.0013837814331054, \"Time in s\": 47.92837500000002 }, { \"step\": 48576, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Insects\", \"Accuracy\": 0.2741327843540916, \"MicroF1\": 0.2741327843540916, \"MacroF1\": 0.2744946340974243, \"Memory in Mb\": 0.0013818740844726, \"Time in s\": 50.07231500000002 }, { \"step\": 49632, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Insects\", \"Accuracy\": 0.2753520984868328, \"MicroF1\": 0.2753520984868328, \"MacroF1\": 0.2765036876430403, \"Memory in Mb\": 0.0013818740844726, \"Time in s\": 52.26665400000002 }, { \"step\": 50688, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Insects\", \"Accuracy\": 0.2768362696549411, \"MicroF1\": 0.2768362696549411, \"MacroF1\": 0.2786344091273496, \"Memory in Mb\": 0.0013837814331054, \"Time in s\": 54.51115200000002 }, { \"step\": 51744, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Insects\", \"Accuracy\": 0.2782791875229499, \"MicroF1\": 0.2782791875229499, \"MacroF1\": 0.2805971515128955, \"Memory in Mb\": 0.0013885498046875, \"Time in s\": 56.805577000000014 }, { \"step\": 52800, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Insects\", \"Accuracy\": 0.2891153241538665, \"MicroF1\": 0.2891153241538665, \"MacroF1\": 0.2892953202729756, \"Memory in Mb\": 0.0013866424560546, \"Time in s\": 59.150289000000015 }, { \"step\": 408, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9975429975429976, \"MicroF1\": 0.9975429975429976, \"MacroF1\": 0.966040884438882, \"Memory in Mb\": 0.0006122589111328, \"Time in s\": 0.026957 }, { \"step\": 816, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9975460122699388, \"MicroF1\": 0.9975460122699388, \"MacroF1\": 0.9879967903427672, \"Memory in Mb\": 0.0006628036499023, \"Time in s\": 0.073338 }, { \"step\": 1224, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9975470155355682, \"MicroF1\": 0.9975470155355682, \"MacroF1\": 0.9931179599499376, \"Memory in Mb\": 0.0007133483886718, \"Time in s\": 0.138405 }, { \"step\": 1632, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9975475168608215, \"MicroF1\": 0.9975475168608215, \"MacroF1\": 0.9950750839342832, \"Memory in Mb\": 0.0012521743774414, \"Time in s\": 0.2220349999999999 }, { \"step\": 2040, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9975478175576264, \"MicroF1\": 0.9975478175576264, \"MacroF1\": 0.9960150346160548, \"Memory in Mb\": 0.0013027191162109, \"Time in s\": 0.3242069999999999 }, { \"step\": 2448, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9975480179812016, \"MicroF1\": 0.9975480179812016, \"MacroF1\": 0.9965317313935652, \"Memory in Mb\": 0.0013532638549804, \"Time in s\": 0.4452569999999999 }, { \"step\": 2856, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9975481611208408, \"MicroF1\": 0.9975481611208408, \"MacroF1\": 0.996842428316928, \"Memory in Mb\": 0.00140380859375, \"Time in s\": 0.58488 }, { \"step\": 3264, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9975482684646032, \"MicroF1\": 0.9975482684646032, \"MacroF1\": 0.9970416021996, \"Memory in Mb\": 0.0014543533325195, \"Time in s\": 0.7430509999999999 }, { \"step\": 3672, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9975483519476982, \"MicroF1\": 0.9975483519476982, \"MacroF1\": 0.9971755428551424, \"Memory in Mb\": 0.001504898071289, \"Time in s\": 0.9196609999999998 }, { \"step\": 4080, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9975484187300808, \"MicroF1\": 0.9975484187300808, \"MacroF1\": 0.9972690115789392, \"Memory in Mb\": 0.0015554428100585, \"Time in s\": 1.1148029999999998 }, { \"step\": 4488, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9975484733675062, \"MicroF1\": 0.9975484733675062, \"MacroF1\": 0.9973361791525124, \"Memory in Mb\": 0.0016059875488281, \"Time in s\": 1.3284669999999998 }, { \"step\": 4896, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9975485188968336, \"MicroF1\": 0.9975485188968336, \"MacroF1\": 0.9973856025730918, \"Memory in Mb\": 0.0016565322875976, \"Time in s\": 1.56082 }, { \"step\": 5304, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.997548557420328, \"MicroF1\": 0.997548557420328, \"MacroF1\": 0.997422679833574, \"Memory in Mb\": 0.0017070770263671, \"Time in s\": 1.81175 }, { \"step\": 5712, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9975485904395028, \"MicroF1\": 0.9975485904395028, \"MacroF1\": 0.99745094204078, \"Memory in Mb\": 0.0017576217651367, \"Time in s\": 2.0815 }, { \"step\": 6120, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9975486190554012, \"MicroF1\": 0.9975486190554012, \"MacroF1\": 0.9974727709453766, \"Memory in Mb\": 0.0018081665039062, \"Time in s\": 2.369724 }, { \"step\": 6528, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9975486440937644, \"MicroF1\": 0.9975486440937644, \"MacroF1\": 0.997489815700999, \"Memory in Mb\": 0.0018587112426757, \"Time in s\": 2.6764910000000004 }, { \"step\": 6936, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.997548666186013, \"MicroF1\": 0.997548666186013, \"MacroF1\": 0.9975032443691146, \"Memory in Mb\": 0.0019092559814453, \"Time in s\": 3.0019130000000005 }, { \"step\": 7344, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.997548685823233, \"MicroF1\": 0.997548685823233, \"MacroF1\": 0.9975139007887864, \"Memory in Mb\": 0.0034246444702148, \"Time in s\": 3.3461420000000004 }, { \"step\": 7752, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9975487033931104, \"MicroF1\": 0.9975487033931104, \"MacroF1\": 0.9975224052755712, \"Memory in Mb\": 0.0034751892089843, \"Time in s\": 3.708917000000001 }, { \"step\": 8160, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.997548719205785, \"MicroF1\": 0.997548719205785, \"MacroF1\": 0.9975292209193422, \"Memory in Mb\": 0.0035257339477539, \"Time in s\": 4.090185000000001 }, { \"step\": 8568, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9975487335123148, \"MicroF1\": 0.9975487335123148, \"MacroF1\": 0.9975346982235256, \"Memory in Mb\": 0.0035762786865234, \"Time in s\": 4.489997000000001 }, { \"step\": 8976, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.997548746518106, \"MicroF1\": 0.997548746518106, \"MacroF1\": 0.9975391057693664, \"Memory in Mb\": 0.0036268234252929, \"Time in s\": 4.908346000000001 }, { \"step\": 9384, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.997548758392838, \"MicroF1\": 0.997548758392838, \"MacroF1\": 0.997542651662671, \"Memory in Mb\": 0.0036773681640625, \"Time in s\": 5.3453800000000005 }, { \"step\": 9792, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9975487692779084, \"MicroF1\": 0.9975487692779084, \"MacroF1\": 0.9975454987794794, \"Memory in Mb\": 0.003727912902832, \"Time in s\": 5.8012950000000005 }, { \"step\": 10200, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9975487792920874, \"MicroF1\": 0.9975487792920874, \"MacroF1\": 0.9975477757646256, \"Memory in Mb\": 0.0037784576416015, \"Time in s\": 6.275930000000001 }, { \"step\": 10608, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9975487885358726, \"MicroF1\": 0.9975487885358726, \"MacroF1\": 0.9975495850737114, \"Memory in Mb\": 0.003829002380371, \"Time in s\": 6.769232000000001 }, { \"step\": 11016, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9975487970948708, \"MicroF1\": 0.9975487970948708, \"MacroF1\": 0.997551008926056, \"Memory in Mb\": 0.0038795471191406, \"Time in s\": 7.281090000000001 }, { \"step\": 11424, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9975488050424582, \"MicroF1\": 0.9975488050424582, \"MacroF1\": 0.997552113761348, \"Memory in Mb\": 0.0039300918579101, \"Time in s\": 7.811594 }, { \"step\": 11832, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.99754881244189, \"MicroF1\": 0.99754881244189, \"MacroF1\": 0.9975529536110198, \"Memory in Mb\": 0.0039806365966796, \"Time in s\": 8.360849 }, { \"step\": 12240, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.997548819347986, \"MicroF1\": 0.997548819347986, \"MacroF1\": 0.9975535726732964, \"Memory in Mb\": 0.0040311813354492, \"Time in s\": 8.928801 }, { \"step\": 12648, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.997548825808492, \"MicroF1\": 0.997548825808492, \"MacroF1\": 0.9975540072976318, \"Memory in Mb\": 0.0040817260742187, \"Time in s\": 9.515298 }, { \"step\": 13056, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9975488318651856, \"MicroF1\": 0.9975488318651856, \"MacroF1\": 0.997554287526727, \"Memory in Mb\": 0.0041322708129882, \"Time in s\": 10.120335 }, { \"step\": 13464, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9975488375547796, \"MicroF1\": 0.9975488375547796, \"MacroF1\": 0.9975544383040468, \"Memory in Mb\": 0.0041828155517578, \"Time in s\": 10.744063 }, { \"step\": 13872, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9975488429096676, \"MicroF1\": 0.9975488429096676, \"MacroF1\": 0.9975544804262364, \"Memory in Mb\": 0.0042333602905273, \"Time in s\": 11.386615999999998 }, { \"step\": 14280, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9975488479585404, \"MicroF1\": 0.9975488479585404, \"MacroF1\": 0.9975544312994103, \"Memory in Mb\": 0.0042839050292968, \"Time in s\": 12.048032999999998 }, { \"step\": 14688, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9975488527269012, \"MicroF1\": 0.9975488527269012, \"MacroF1\": 0.997554305543504, \"Memory in Mb\": 0.0043344497680664, \"Time in s\": 12.728291999999998 }, { \"step\": 15096, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.997548857237496, \"MicroF1\": 0.997548857237496, \"MacroF1\": 0.9975541154780816, \"Memory in Mb\": 0.0043849945068359, \"Time in s\": 13.427186999999998 }, { \"step\": 15504, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9975488615106752, \"MicroF1\": 0.9975488615106752, \"MacroF1\": 0.9975538715150368, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 14.144781999999998 }, { \"step\": 15912, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9975488655647036, \"MicroF1\": 0.9975488655647036, \"MacroF1\": 0.997553582477696, \"Memory in Mb\": 0.004486083984375, \"Time in s\": 14.881121999999998 }, { \"step\": 16320, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9975488694160182, \"MicroF1\": 0.9975488694160182, \"MacroF1\": 0.997553255861403, \"Memory in Mb\": 0.0045366287231445, \"Time in s\": 15.636483999999998 }, { \"step\": 16728, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9975488730794524, \"MicroF1\": 0.9975488730794524, \"MacroF1\": 0.997552898047314, \"Memory in Mb\": 0.004587173461914, \"Time in s\": 16.410486 }, { \"step\": 17136, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9975488765684272, \"MicroF1\": 0.9975488765684272, \"MacroF1\": 0.997552514478575, \"Memory in Mb\": 0.0046377182006835, \"Time in s\": 17.203509999999998 }, { \"step\": 17544, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9975488798951148, \"MicroF1\": 0.9975488798951148, \"MacroF1\": 0.997552109806108, \"Memory in Mb\": 0.0046882629394531, \"Time in s\": 18.015261 }, { \"step\": 17952, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.997548883070581, \"MicroF1\": 0.997548883070581, \"MacroF1\": 0.997551688009728, \"Memory in Mb\": 0.0047388076782226, \"Time in s\": 18.845896 }, { \"step\": 18360, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9975488861049076, \"MicroF1\": 0.9975488861049076, \"MacroF1\": 0.9975512524991372, \"Memory in Mb\": 0.0047893524169921, \"Time in s\": 19.695493 }, { \"step\": 18768, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9975488890073, \"MicroF1\": 0.9975488890073, \"MacroF1\": 0.9975508061984416, \"Memory in Mb\": 0.0048398971557617, \"Time in s\": 20.563922 }, { \"step\": 19176, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.99754889178618, \"MicroF1\": 0.99754889178618, \"MacroF1\": 0.9975503516171184, \"Memory in Mb\": 0.0048904418945312, \"Time in s\": 21.451134 }, { \"step\": 19584, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9975488944492672, \"MicroF1\": 0.9975488944492672, \"MacroF1\": 0.997549890909789, \"Memory in Mb\": 0.0049409866333007, \"Time in s\": 22.357399 }, { \"step\": 19992, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9975488970036516, \"MicroF1\": 0.9975488970036516, \"MacroF1\": 0.9975494259267256, \"Memory in Mb\": 0.0049915313720703, \"Time in s\": 23.282656 }, { \"step\": 20400, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9975488994558556, \"MicroF1\": 0.9975488994558556, \"MacroF1\": 0.9975489582566448, \"Memory in Mb\": 0.0050420761108398, \"Time in s\": 24.227046 } ] }, \"params\": [ { \"name\": \"models\", \"select\": { \"type\": \"point\", \"fields\": [ \"model\" ] }, \"bind\": \"legend\" }, { \"name\": \"Dataset\", \"value\": \"ImageSegments\", \"bind\": { \"input\": \"select\", \"options\": [ \"ImageSegments\", \"Insects\", \"Keystroke\" ] } }, { \"name\": \"grid\", \"select\": \"interval\", \"bind\": \"scales\" } ], \"transform\": [ { \"filter\": { \"field\": \"dataset\", \"equal\": { \"expr\": \"Dataset\" } } } ], \"repeat\": { \"row\": [ \"Accuracy\", \"MicroF1\", \"MacroF1\", \"Memory in Mb\", \"Time in s\" ] }, \"spec\": { \"width\": \"container\", \"mark\": \"line\", \"encoding\": { \"x\": { \"field\": \"step\", \"type\": \"quantitative\", \"axis\": { \"titleFontSize\": 18, \"labelFontSize\": 18, \"title\": \"Instance\" } }, \"y\": { \"field\": { \"repeat\": \"row\" }, \"type\": \"quantitative\", \"axis\": { \"titleFontSize\": 18, \"labelFontSize\": 18 } }, \"color\": { \"field\": \"model\", \"type\": \"ordinal\", \"scale\": { \"scheme\": \"category20b\" }, \"title\": \"Models\", \"legend\": { \"titleFontSize\": 18, \"labelFontSize\": 18, \"labelLimit\": 500 } }, \"opacity\": { \"condition\": { \"param\": \"models\", \"value\": 1 }, \"value\": 0.2 } } } }

    "},{"location":"benchmarks/Multiclass%20classification/#datasets","title":"Datasets","text":"ImageSegments

    Image segments classification.

    This dataset contains features that describe image segments into 7 classes: brickface, sky, foliage, cement, window, path, and grass.

    Name  ImageSegments                                              \nTask  Multi-class classification\n

    Samples 2,310 Features 18 Sparse False Path /home/kulbach/projects/river/river/datasets/segment.csv.zip

    Insects

    Insects dataset.

    This dataset has different variants, which are:

    • abrupt_balanced
    • abrupt_imbalanced
    • gradual_balanced
    • gradual_imbalanced
    • incremental-abrupt_balanced
    • incremental-abrupt_imbalanced
    • incremental-reoccurring_balanced
    • incremental-reoccurring_imbalanced
    • incremental_balanced
    • incremental_imbalanced
    • out-of-control

    The number of samples and the difficulty change from one variant to another. The number of classes is always the same (6), except for the last variant (24).

      Name  Insects                                                                                 \n  Task  Multi-class classification\n

    Samples 52,848 Features 33 Classes 6 Sparse False Path /home/kulbach/river_data/Insects/INSECTS-abrupt_balanced_norm.arff URL http://sites.labic.icmc.usp.br/vsouza/repository/creme/INSECTS-abrupt_balanced_norm.arff Size 15.66 MB Downloaded True Variant abrupt_balanced

    Keystroke

    CMU keystroke dataset.

    Users are tasked to type in a password. The task is to determine which user is typing in the password.

    The only difference with the original dataset is that the \"sessionIndex\" and \"rep\" attributes have been dropped.

      Name  Keystroke                                                    \n  Task  Multi-class classification\n

    Samples 20,400 Features 31 Sparse False Path /home/kulbach/river_data/Keystroke/DSL-StrongPasswordData.csv URL http://www.cs.cmu.edu/~keystroke/DSL-StrongPasswordData.csv Size 4.45 MB Downloaded True

    "},{"location":"benchmarks/Multiclass%20classification/#parameters","title":"Parameters","text":"
    variant\n    Indicates which variant of the dataset to load.\n
    "},{"location":"benchmarks/Multiclass%20classification/#models","title":"Models","text":"Naive Bayes

    GaussianNB ()

    Hoeffding Tree

    HoeffdingTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n)

    Hoeffding Adaptive Tree

    HoeffdingAdaptiveTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  bootstrap_sampling=True\n  drift_window_threshold=300\n  drift_detector=ADWIN (\n    delta=0.002\n    clock=32\n    max_buckets=5\n    min_window_length=5\n    grace_period=10\n  )\n  switch_significance=0.05\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n  seed=42\n)

    Adaptive Random Forest

    []

    Streaming Random Patches

    SRPClassifier (\n  model=HoeffdingTreeClassifier (\n    grace_period=50\n    max_depth=inf\n    split_criterion=\"info_gain\"\n    delta=0.01\n    tau=0.05\n    leaf_prediction=\"nba\"\n    nb_threshold=0\n    nominal_attributes=None\n    splitter=GaussianSplitter (\n      n_splits=10\n    )\n    binary_split=False\n    max_size=100.\n    memory_estimate_period=1000000\n    stop_mem_management=False\n    remove_poor_attrs=False\n    merit_preprune=True\n  )\n  n_models=10\n  subspace_size=0.6\n  training_method=\"patches\"\n  lam=6\n  drift_detector=ADWIN (\n    delta=1e-05\n    clock=32\n    max_buckets=5\n    min_window_length=5\n    grace_period=10\n  )\n  warning_detector=ADWIN (\n    delta=0.0001\n    clock=32\n    max_buckets=5\n    min_window_length=5\n    grace_period=10\n  )\n  disable_detector=\"off\"\n  disable_weighted_vote=False\n  seed=None\n  metric=Accuracy (\n    cm=ConfusionMatrix (\n      classes=[]\n    )\n  )\n)

    k-Nearest Neighbors

    Pipeline (\n  StandardScaler (\n    with_std=True\n  ),\n  KNNClassifier (\n    n_neighbors=5\n    window_size=100\n    min_distance_keep=0.\n    weighted=True\n    cleanup_every=0\n    distance_func=functools.partial(, p=2)\n    softmax=False\n  )\n)\n

    \nADWIN Bagging\n

    [HoeffdingTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n), HoeffdingTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n), HoeffdingTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n), HoeffdingTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n), HoeffdingTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n), HoeffdingTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n), HoeffdingTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n), HoeffdingTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n), HoeffdingTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n), HoeffdingTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n)]

    \n

    \nAdaBoost\n

    [HoeffdingTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n), HoeffdingTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n), HoeffdingTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n), HoeffdingTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n), HoeffdingTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n), HoeffdingTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n), HoeffdingTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n), HoeffdingTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n), HoeffdingTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n), HoeffdingTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n)]

    \n

    \nBagging\n

    [HoeffdingAdaptiveTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  bootstrap_sampling=False\n  drift_window_threshold=300\n  drift_detector=ADWIN (\n    delta=0.002\n    clock=32\n    max_buckets=5\n    min_window_length=5\n    grace_period=10\n  )\n  switch_significance=0.05\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n  seed=None\n), HoeffdingAdaptiveTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  bootstrap_sampling=False\n  drift_window_threshold=300\n  drift_detector=ADWIN (\n    delta=0.002\n    clock=32\n    max_buckets=5\n    min_window_length=5\n    grace_period=10\n  )\n  switch_significance=0.05\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n  seed=None\n), HoeffdingAdaptiveTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  bootstrap_sampling=False\n  drift_window_threshold=300\n  drift_detector=ADWIN (\n    delta=0.002\n    clock=32\n    max_buckets=5\n    min_window_length=5\n    grace_period=10\n  )\n  switch_significance=0.05\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n  seed=None\n), HoeffdingAdaptiveTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  bootstrap_sampling=False\n  drift_window_threshold=300\n  drift_detector=ADWIN (\n    delta=0.002\n    clock=32\n    max_buckets=5\n    min_window_length=5\n    grace_period=10\n  )\n  switch_significance=0.05\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n  seed=None\n), HoeffdingAdaptiveTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  bootstrap_sampling=False\n  drift_window_threshold=300\n  drift_detector=ADWIN (\n    delta=0.002\n    clock=32\n    max_buckets=5\n    min_window_length=5\n    grace_period=10\n  )\n  switch_significance=0.05\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n  seed=None\n), HoeffdingAdaptiveTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  bootstrap_sampling=False\n  drift_window_threshold=300\n  drift_detector=ADWIN (\n    delta=0.002\n    clock=32\n    max_buckets=5\n    min_window_length=5\n    grace_period=10\n  )\n  switch_significance=0.05\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n  seed=None\n), HoeffdingAdaptiveTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  bootstrap_sampling=False\n  drift_window_threshold=300\n  drift_detector=ADWIN (\n    delta=0.002\n    clock=32\n    max_buckets=5\n    min_window_length=5\n    grace_period=10\n  )\n  switch_significance=0.05\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n  seed=None\n), HoeffdingAdaptiveTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  bootstrap_sampling=False\n  drift_window_threshold=300\n  drift_detector=ADWIN (\n    delta=0.002\n    clock=32\n    max_buckets=5\n    min_window_length=5\n    grace_period=10\n  )\n  switch_significance=0.05\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n  seed=None\n), HoeffdingAdaptiveTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  bootstrap_sampling=False\n  drift_window_threshold=300\n  drift_detector=ADWIN (\n    delta=0.002\n    clock=32\n    max_buckets=5\n    min_window_length=5\n    grace_period=10\n  )\n  switch_significance=0.05\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n  seed=None\n), HoeffdingAdaptiveTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  bootstrap_sampling=False\n  drift_window_threshold=300\n  drift_detector=ADWIN (\n    delta=0.002\n    clock=32\n    max_buckets=5\n    min_window_length=5\n    grace_period=10\n  )\n  switch_significance=0.05\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n  seed=None\n)]

    \n

    \nLeveraging Bagging\n

    [HoeffdingTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n), HoeffdingTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n), HoeffdingTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n), HoeffdingTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n), HoeffdingTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n), HoeffdingTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n), HoeffdingTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n), HoeffdingTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n), HoeffdingTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n), HoeffdingTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n)]

    \n

    \nStacking\n

    [Pipeline (\n  StandardScaler (\n    with_std=True\n  ),\n  SoftmaxRegression (\n    optimizer=SGD (\n      lr=Constant (\n        learning_rate=0.01\n      )\n    )\n    loss=CrossEntropy (\n      class_weight={}\n    )\n    l2=0\n  )\n), GaussianNB (), HoeffdingTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n), Pipeline (\n  StandardScaler (\n    with_std=True\n  ),\n  KNNClassifier (\n    n_neighbors=5\n    window_size=100\n    min_distance_keep=0.\n    weighted=True\n    cleanup_every=0\n    distance_func=functools.partial(, p=2)\n    softmax=False\n  )\n)]\n

    \nVoting\n

    VotingClassifier (\n  models=[Pipeline (\n  StandardScaler (\n    with_std=True\n  ),\n  SoftmaxRegression (\n    optimizer=SGD (\n      lr=Constant (\n        learning_rate=0.01\n      )\n    )\n    loss=CrossEntropy (\n      class_weight={}\n    )\n    l2=0\n  )\n), GaussianNB (), HoeffdingTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n), Pipeline (\n  StandardScaler (\n    with_std=True\n  ),\n  KNNClassifier (\n    n_neighbors=5\n    window_size=100\n    min_distance_keep=0.\n    weighted=True\n    cleanup_every=0\n    distance_func=functools.partial(, p=2)\n    softmax=False\n  )\n)]\n  use_probabilities=True\n)\n

    \n[baseline] Last Class\n

    NoChangeClassifier ()

    \n

    "},{"location":"benchmarks/Multiclass%20classification/#environment","title":"Environment","text":"
    Python implementation: CPython\nPython version       : 3.11.5\nIPython version      : 8.15.0\n\nriver       : 0.19.0\nnumpy       : 1.25.2\nscikit-learn: 1.3.0\npandas      : 2.1.0\nscipy       : 1.11.2\n\nCompiler    : GCC 11.4.0\nOS          : Linux\nRelease     : 5.15.0-1041-azure\nMachine     : x86_64\nProcessor   : x86_64\nCPU cores   : 2\nArchitecture: 64bit\n
    "},{"location":"benchmarks/Regression/","title":"Regression","text":"TableChart Model Dataset MAE RMSE R2 Memory in Mb Time in s Adaptive Model Rules ChickWeights 24.0925 37.1369 0.719675 0.0469542 5.03028 Adaptive Model Rules TrumpApproval 1.40204 2.43644 -1.02749 0.114429 5.76779 Adaptive Random Forest ChickWeights 25.9648 40.6034 0.6649 1.18613 32.8286 Adaptive Random Forest TrumpApproval 0.801133 2.11603 -0.529292 1.28362 54.6942 Bagging ChickWeights 23.0595 36.5862 0.727928 0.643575 19.8658 Bagging TrumpApproval 0.904415 2.23483 -0.705833 1.33501 42.6904 Exponentially Weighted Average ChickWeights 120.54 139.462 -2.95334 0.183387 12.3806 Exponentially Weighted Average TrumpApproval 40.7536 40.7895 -567.257 0.316642 30.0432 Hoeffding Adaptive Tree ChickWeights 23.2557 37.579 0.712962 0.0946112 5.75782 Hoeffding Adaptive Tree TrumpApproval 0.910675 2.2343 -0.705019 0.138225 6.69917 Hoeffding Tree ChickWeights 23.0842 36.6638 0.726773 0.0440512 4.02236 Hoeffding Tree TrumpApproval 0.949745 2.24815 -0.726224 0.148639 9.13796 Linear Regression ChickWeights 23.8353 37.0287 0.721307 0.00421047 2.10647 Linear Regression TrumpApproval 1.3486 4.12828 -4.82084 0.00497341 3.6327 Linear Regression with l1 regularization ChickWeights 23.868 37.0773 0.720575 0.00444126 1.13401 Linear Regression with l1 regularization TrumpApproval 1.21585 4.06821 -4.65269 0.0052042 2.06156 Linear Regression with l2 regularization ChickWeights 25.5204 38.6553 0.696284 0.00423336 1.11618 Linear Regression with l2 regularization TrumpApproval 1.99918 4.40997 -5.64232 0.0049963 1.98704 Passive-Aggressive Regressor, mode 1 ChickWeights 24.2339 37.5576 0.713289 0.00345898 1.33977 Passive-Aggressive Regressor, mode 1 TrumpApproval 4.90639 6.6656 -14.1749 0.00443554 2.18425 Passive-Aggressive Regressor, mode 2 ChickWeights 99.5681 141.4 -3.06396 0.00345898 1.99155 Passive-Aggressive Regressor, mode 2 TrumpApproval 31.1288 34.4257 -403.774 0.00443554 2.19594 River MLP ChickWeights 49.5783 77.9026 -0.233541 0.0123129 18.4913 River MLP TrumpApproval 1.59139 5.147 -8.04808 0.0133505 30.7873 Stochastic Gradient Tree ChickWeights 68.1198 79.5649 -0.286746 1.12059 9.48214 Stochastic Gradient Tree TrumpApproval 9.43874 17.9468 -109.008 3.08244 24.6638 Streaming Random Patches ChickWeights 23.5162 38.2072 0.703285 0.558536 50.7829 Streaming Random Patches TrumpApproval 0.640561 1.97134 -0.32731 1.05934 101.873 [baseline] Mean predictor ChickWeights 49.4914 70.2457 -0.00297194 0.000490189 0.529127 [baseline] Mean predictor TrumpApproval 1.56814 2.20374 -0.658701 0.000490189 0.8379 k-Nearest Neighbors ChickWeights 22.9043 34.7945 0.753924 0.0461216 4.35991 k-Nearest Neighbors TrumpApproval 0.493975 1.50807 0.223232 0.0660038 9.48546

    Try reloading the page if something is buggy

    { \"$schema\": \"https://vega.github.io/schema/vega-lite/v5.json\", \"data\": { \"values\": [ { \"step\": 11, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"ChickWeights\", \"MAE\": 32.364564569910335, \"RMSE\": 32.97872020361878, \"R2\": -1398.9905780691188, \"Memory in Mb\": 0.0041303634643554, \"Time in s\": 0.003051 }, { \"step\": 22, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"ChickWeights\", \"MAE\": 22.977933628105813, \"RMSE\": 25.38362603225939, \"R2\": -681.3960169454474, \"Memory in Mb\": 0.0041303634643554, \"Time in s\": 0.0083 }, { \"step\": 33, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"ChickWeights\", \"MAE\": 16.216942910977988, \"RMSE\": 20.82463881551788, \"R2\": -300.18738429635704, \"Memory in Mb\": 0.0041303634643554, \"Time in s\": 0.014943 }, { \"step\": 44, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"ChickWeights\", \"MAE\": 12.450847696587651, \"RMSE\": 18.04722398474583, \"R2\": -255.42929659358052, \"Memory in Mb\": 0.0041303634643554, \"Time in s\": 0.023007 }, { \"step\": 55, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"ChickWeights\", \"MAE\": 11.8883407017882, \"RMSE\": 18.699705575978975, \"R2\": -67.26141846932143, \"Memory in Mb\": 0.0041303634643554, \"Time in s\": 0.032524 }, { \"step\": 66, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"ChickWeights\", \"MAE\": 11.481406471145082, \"RMSE\": 17.562600262725994, \"R2\": -24.95549321582236, \"Memory in Mb\": 0.0041303634643554, \"Time in s\": 0.043462 }, { \"step\": 77, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"ChickWeights\", \"MAE\": 10.781108661026623, \"RMSE\": 16.493572764286025, \"R2\": -14.34295652053857, \"Memory in Mb\": 0.0041303634643554, \"Time in s\": 0.055844 }, { \"step\": 88, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"ChickWeights\", \"MAE\": 9.717703273355898, \"RMSE\": 15.46585610846664, \"R2\": -11.231382330967593, \"Memory in Mb\": 0.0041303634643554, \"Time in s\": 0.069657 }, { \"step\": 99, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"ChickWeights\", \"MAE\": 8.826979124235404, \"RMSE\": 14.601347274688614, \"R2\": -8.118374730562003, \"Memory in Mb\": 0.0041303634643554, \"Time in s\": 0.084902 }, { \"step\": 110, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"ChickWeights\", \"MAE\": 8.34720326953035, \"RMSE\": 13.931298318002057, \"R2\": -4.796525071049026, \"Memory in Mb\": 0.0041303634643554, \"Time in s\": 0.101965 }, { \"step\": 121, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"ChickWeights\", \"MAE\": 8.037877082888846, \"RMSE\": 13.41080638289134, \"R2\": -3.136902586442697, \"Memory in Mb\": 0.0041303634643554, \"Time in s\": 0.120474 }, { \"step\": 132, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"ChickWeights\", \"MAE\": 7.656433924417384, \"RMSE\": 12.898278689410905, \"R2\": -2.1275837609073576, \"Memory in Mb\": 0.0041303634643554, \"Time in s\": 0.140407 }, { \"step\": 143, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"ChickWeights\", \"MAE\": 7.307942088554156, \"RMSE\": 12.437137940834392, \"R2\": -1.3553409371460328, \"Memory in Mb\": 0.0041303634643554, \"Time in s\": 0.161783 }, { \"step\": 154, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"ChickWeights\", \"MAE\": 7.037714222368383, \"RMSE\": 12.042115748312936, \"R2\": -0.8765797740197239, \"Memory in Mb\": 0.0041303634643554, \"Time in s\": 0.184594 }, { \"step\": 165, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"ChickWeights\", \"MAE\": 7.129031762481882, \"RMSE\": 11.913307711374014, \"R2\": -0.4764258010150459, \"Memory in Mb\": 0.0041303634643554, \"Time in s\": 0.208855 }, { \"step\": 176, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"ChickWeights\", \"MAE\": 7.184514897799321, \"RMSE\": 11.77636646389892, \"R2\": -0.1632359842489146, \"Memory in Mb\": 0.0041303634643554, \"Time in s\": 0.23453 }, { \"step\": 187, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"ChickWeights\", \"MAE\": 7.115123062484693, \"RMSE\": 11.572523949602724, \"R2\": 0.0802677819230903, \"Memory in Mb\": 0.0041303634643554, \"Time in s\": 0.261536 }, { \"step\": 198, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"ChickWeights\", \"MAE\": 7.006474290899419, \"RMSE\": 11.366304822809298, \"R2\": 0.2942397460202306, \"Memory in Mb\": 0.0041303634643554, \"Time in s\": 0.289991 }, { \"step\": 209, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"ChickWeights\", \"MAE\": 7.129008217053805, \"RMSE\": 11.440940870898142, \"R2\": 0.4105268603250641, \"Memory in Mb\": 0.0041303634643554, \"Time in s\": 0.319876 }, { \"step\": 220, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"ChickWeights\", \"MAE\": 7.62421928608864, \"RMSE\": 12.045617517752785, \"R2\": 0.4279229250366536, \"Memory in Mb\": 0.0041303634643554, \"Time in s\": 0.351191 }, { \"step\": 231, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"ChickWeights\", \"MAE\": 7.729844807682863, \"RMSE\": 12.068921171072352, \"R2\": 0.5087708730950672, \"Memory in Mb\": 0.0041303634643554, \"Time in s\": 0.383941 }, { \"step\": 242, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"ChickWeights\", \"MAE\": 7.873703200353374, \"RMSE\": 12.23342730557754, \"R2\": 0.5938836560989084, \"Memory in Mb\": 0.0041303634643554, \"Time in s\": 0.4181169999999999 }, { \"step\": 253, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"ChickWeights\", \"MAE\": 7.894340397324045, \"RMSE\": 12.218207932001455, \"R2\": 0.6481596201604607, \"Memory in Mb\": 0.0041303634643554, \"Time in s\": 0.4537249999999999 }, { \"step\": 264, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"ChickWeights\", \"MAE\": 8.479294890454037, \"RMSE\": 13.126132095776898, \"R2\": 0.6289858847198173, \"Memory in Mb\": 0.0042104721069335, \"Time in s\": 0.4907699999999999 }, { \"step\": 275, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"ChickWeights\", \"MAE\": 8.914096443559163, \"RMSE\": 13.971715828104037, \"R2\": 0.6301108693673194, \"Memory in Mb\": 0.0042104721069335, \"Time in s\": 0.529253 }, { \"step\": 286, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"ChickWeights\", \"MAE\": 9.123963222012373, \"RMSE\": 14.305597328390173, \"R2\": 0.6641373552910966, \"Memory in Mb\": 0.0042104721069335, \"Time in s\": 0.569186 }, { \"step\": 297, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"ChickWeights\", \"MAE\": 9.083791720841957, \"RMSE\": 14.24670706195338, \"R2\": 0.7111028643570333, \"Memory in Mb\": 0.0042104721069335, \"Time in s\": 0.610599 }, { \"step\": 308, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"ChickWeights\", \"MAE\": 9.589205789771716, \"RMSE\": 14.956254664628933, \"R2\": 0.716435491318643, \"Memory in Mb\": 0.0042104721069335, \"Time in s\": 0.653398 }, { \"step\": 319, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"ChickWeights\", \"MAE\": 10.6480954226875, \"RMSE\": 17.335456678654833, \"R2\": 0.654294294845865, \"Memory in Mb\": 0.0042104721069335, \"Time in s\": 0.697581 }, { \"step\": 330, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"ChickWeights\", \"MAE\": 11.061417554605155, \"RMSE\": 17.89416376383148, \"R2\": 0.6847745473646168, \"Memory in Mb\": 0.0042104721069335, \"Time in s\": 0.743241 }, { \"step\": 341, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"ChickWeights\", \"MAE\": 11.240970714084437, \"RMSE\": 17.96809449059472, \"R2\": 0.7153933828209167, \"Memory in Mb\": 0.0042104721069335, \"Time in s\": 0.7902760000000001 }, { \"step\": 352, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"ChickWeights\", \"MAE\": 11.393007763406809, \"RMSE\": 18.07679096199219, \"R2\": 0.7381404893604309, \"Memory in Mb\": 0.0042104721069335, \"Time in s\": 0.8386990000000001 }, { \"step\": 363, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"ChickWeights\", \"MAE\": 12.251680566634816, \"RMSE\": 19.3891577397662, \"R2\": 0.7074601934283691, \"Memory in Mb\": 0.0042104721069335, \"Time in s\": 0.8885620000000001 }, { \"step\": 374, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"ChickWeights\", \"MAE\": 12.75183556333798, \"RMSE\": 20.473547618215623, \"R2\": 0.7001526953506461, \"Memory in Mb\": 0.0042104721069335, \"Time in s\": 0.93984 }, { \"step\": 385, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"ChickWeights\", \"MAE\": 13.120977867369843, \"RMSE\": 21.06680160073653, \"R2\": 0.7191139726408686, \"Memory in Mb\": 0.0042104721069335, \"Time in s\": 0.992561 }, { \"step\": 396, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"ChickWeights\", \"MAE\": 13.243904830041805, \"RMSE\": 21.04850718241465, \"R2\": 0.7385587649833809, \"Memory in Mb\": 0.0042104721069335, \"Time in s\": 1.046701 }, { \"step\": 407, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"ChickWeights\", \"MAE\": 14.114140715648691, \"RMSE\": 22.50284796635845, \"R2\": 0.7222415724766076, \"Memory in Mb\": 0.0042104721069335, \"Time in s\": 1.102244 }, { \"step\": 418, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"ChickWeights\", \"MAE\": 14.877176135328032, \"RMSE\": 23.91912678123439, \"R2\": 0.7054123344015044, \"Memory in Mb\": 0.0042104721069335, \"Time in s\": 1.159239 }, { \"step\": 429, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"ChickWeights\", \"MAE\": 15.420211528669606, \"RMSE\": 24.826921056607983, \"R2\": 0.71797392321154, \"Memory in Mb\": 0.0042104721069335, \"Time in s\": 1.2176479999999998 }, { \"step\": 440, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"ChickWeights\", \"MAE\": 15.588380621816588, \"RMSE\": 24.89946727120753, \"R2\": 0.7364018543257719, \"Memory in Mb\": 0.0042104721069335, \"Time in s\": 1.277456 }, { \"step\": 451, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"ChickWeights\", \"MAE\": 16.102138383202178, \"RMSE\": 25.5012042182244, \"R2\": 0.73526123694725, \"Memory in Mb\": 0.0042104721069335, \"Time in s\": 1.338723 }, { \"step\": 462, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"ChickWeights\", \"MAE\": 17.19666374070754, \"RMSE\": 27.602141070792264, \"R2\": 0.7086782414730581, \"Memory in Mb\": 0.0042104721069335, \"Time in s\": 1.4014049999999998 }, { \"step\": 473, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"ChickWeights\", \"MAE\": 17.97145397683086, \"RMSE\": 28.90516312323801, \"R2\": 0.7179019616037816, \"Memory in Mb\": 0.0042104721069335, \"Time in s\": 1.4654839999999998 }, { \"step\": 484, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"ChickWeights\", \"MAE\": 18.29792978215437, \"RMSE\": 29.184271659667463, \"R2\": 0.728186505594778, \"Memory in Mb\": 0.0042104721069335, \"Time in s\": 1.531041 }, { \"step\": 495, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"ChickWeights\", \"MAE\": 18.74962346435519, \"RMSE\": 29.70957841185893, \"R2\": 0.7350194821983969, \"Memory in Mb\": 0.0042104721069335, \"Time in s\": 1.597992 }, { \"step\": 506, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"ChickWeights\", \"MAE\": 19.63242676502778, \"RMSE\": 31.145843529930996, \"R2\": 0.717244444772776, \"Memory in Mb\": 0.0042104721069335, \"Time in s\": 1.666343 }, { \"step\": 517, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"ChickWeights\", \"MAE\": 20.352340621675207, \"RMSE\": 32.13418072986834, \"R2\": 0.7159654376794024, \"Memory in Mb\": 0.0042104721069335, \"Time in s\": 1.736162 }, { \"step\": 528, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"ChickWeights\", \"MAE\": 21.13777985928475, \"RMSE\": 33.324214910779105, \"R2\": 0.7253645356808669, \"Memory in Mb\": 0.0042104721069335, \"Time in s\": 1.807369 }, { \"step\": 539, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"ChickWeights\", \"MAE\": 21.30552841968683, \"RMSE\": 33.32197733500869, \"R2\": 0.7367405849979859, \"Memory in Mb\": 0.0042104721069335, \"Time in s\": 1.880014 }, { \"step\": 550, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"ChickWeights\", \"MAE\": 22.28842093535661, \"RMSE\": 34.93191609140748, \"R2\": 0.7196038878445231, \"Memory in Mb\": 0.0042104721069335, \"Time in s\": 1.954091 }, { \"step\": 561, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"ChickWeights\", \"MAE\": 22.98790385213596, \"RMSE\": 35.84862508987654, \"R2\": 0.7176082524890277, \"Memory in Mb\": 0.0042104721069335, \"Time in s\": 2.029559 }, { \"step\": 572, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"ChickWeights\", \"MAE\": 23.835304128485923, \"RMSE\": 37.028707868367256, \"R2\": 0.7213067136137974, \"Memory in Mb\": 0.0042104721069335, \"Time in s\": 2.106474 }, { \"step\": 20, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"TrumpApproval\", \"MAE\": 23.20376765378399, \"RMSE\": 26.086393589237737, \"R2\": -1595.1823041445402, \"Memory in Mb\": 0.0048131942749023, \"Time in s\": 0.006002 }, { \"step\": 40, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"TrumpApproval\", \"MAE\": 14.037845165976735, \"RMSE\": 19.010285970857197, \"R2\": -144.292318589198, \"Memory in Mb\": 0.0048131942749023, \"Time in s\": 0.015135 }, { \"step\": 60, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"TrumpApproval\", \"MAE\": 11.507970876430278, \"RMSE\": 16.25440462414082, \"R2\": -142.20309184289852, \"Memory in Mb\": 0.0048131942749023, \"Time in s\": 0.026737 }, { \"step\": 80, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"TrumpApproval\", \"MAE\": 9.557907896578074, \"RMSE\": 14.248619966820993, \"R2\": -109.38231735939875, \"Memory in Mb\": 0.0048131942749023, \"Time in s\": 0.04106 }, { \"step\": 100, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"TrumpApproval\", \"MAE\": 8.00119890237694, \"RMSE\": 12.784639272000032, \"R2\": -54.757167221204185, \"Memory in Mb\": 0.0048131942749023, \"Time in s\": 0.058228 }, { \"step\": 120, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"TrumpApproval\", \"MAE\": 6.9642139928158, \"RMSE\": 11.706689332840265, \"R2\": -38.660847151370525, \"Memory in Mb\": 0.0048131942749023, \"Time in s\": 0.078171 }, { \"step\": 140, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"TrumpApproval\", \"MAE\": 6.158017211594616, \"RMSE\": 10.855926078196225, \"R2\": -34.244125473921144, \"Memory in Mb\": 0.0048131942749023, \"Time in s\": 0.100823 }, { \"step\": 160, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"TrumpApproval\", \"MAE\": 5.477712824897756, \"RMSE\": 10.159717829752896, \"R2\": -26.22221848793916, \"Memory in Mb\": 0.0048131942749023, \"Time in s\": 0.126265 }, { \"step\": 180, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"TrumpApproval\", \"MAE\": 5.024407839120485, \"RMSE\": 9.597258286357787, \"R2\": -20.33422740878964, \"Memory in Mb\": 0.0048131942749023, \"Time in s\": 0.154466 }, { \"step\": 200, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"TrumpApproval\", \"MAE\": 4.585662202332267, \"RMSE\": 9.108145701438088, \"R2\": -18.272229834363905, \"Memory in Mb\": 0.0048131942749023, \"Time in s\": 0.185533 }, { \"step\": 220, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"TrumpApproval\", \"MAE\": 4.26060909213789, \"RMSE\": 8.692057179629266, \"R2\": -17.933082537971817, \"Memory in Mb\": 0.0048131942749023, \"Time in s\": 0.219335 }, { \"step\": 240, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"TrumpApproval\", \"MAE\": 3.9717866152166015, \"RMSE\": 8.326248244302885, \"R2\": -16.503720237291063, \"Memory in Mb\": 0.0048131942749023, \"Time in s\": 0.255864 }, { \"step\": 260, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"TrumpApproval\", \"MAE\": 3.713770572650404, \"RMSE\": 8.00217875002923, \"R2\": -15.385557669694744, \"Memory in Mb\": 0.0049734115600585, \"Time in s\": 0.2951479999999999 }, { \"step\": 280, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"TrumpApproval\", \"MAE\": 3.519033617242816, \"RMSE\": 7.718418241237259, \"R2\": -14.960370233444367, \"Memory in Mb\": 0.0049734115600585, \"Time in s\": 0.3371889999999999 }, { \"step\": 300, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"TrumpApproval\", \"MAE\": 3.3459125962612686, \"RMSE\": 7.4642342223287805, \"R2\": -13.679347912302555, \"Memory in Mb\": 0.0049734115600585, \"Time in s\": 0.382005 }, { \"step\": 320, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"TrumpApproval\", \"MAE\": 3.2142611116185447, \"RMSE\": 7.238080925352425, \"R2\": -13.486769876410833, \"Memory in Mb\": 0.0049734115600585, \"Time in s\": 0.429554 }, { \"step\": 340, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"TrumpApproval\", \"MAE\": 3.0579195410777067, \"RMSE\": 7.023783188903098, \"R2\": -13.41588572736102, \"Memory in Mb\": 0.0049734115600585, \"Time in s\": 0.479826 }, { \"step\": 360, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.945682332324278, \"RMSE\": 6.834004497968132, \"R2\": -12.75946181118139, \"Memory in Mb\": 0.0049734115600585, \"Time in s\": 0.532818 }, { \"step\": 380, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.834623050495076, \"RMSE\": 6.655478314361804, \"R2\": -12.501407484394289, \"Memory in Mb\": 0.0049734115600585, \"Time in s\": 0.588545 }, { \"step\": 400, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.750580257859316, \"RMSE\": 6.492898516140861, \"R2\": -12.2130072039923, \"Memory in Mb\": 0.0049734115600585, \"Time in s\": 0.647075 }, { \"step\": 420, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.6430441633874877, \"RMSE\": 6.337629923196658, \"R2\": -12.005235448499764, \"Memory in Mb\": 0.0049734115600585, \"Time in s\": 0.708317 }, { \"step\": 440, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.55209658354648, \"RMSE\": 6.194505226365406, \"R2\": -11.19965041203295, \"Memory in Mb\": 0.0049734115600585, \"Time in s\": 0.7723099999999999 }, { \"step\": 460, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.456089002686458, \"RMSE\": 6.059000096335146, \"R2\": -10.068304379054997, \"Memory in Mb\": 0.0049734115600585, \"Time in s\": 0.8388639999999999 }, { \"step\": 480, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.366054305985814, \"RMSE\": 5.93188196569365, \"R2\": -9.364683952709628, \"Memory in Mb\": 0.0049734115600585, \"Time in s\": 0.907942 }, { \"step\": 500, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.2878529832492878, \"RMSE\": 5.812913918334153, \"R2\": -8.7442989221461, \"Memory in Mb\": 0.0049734115600585, \"Time in s\": 0.979676 }, { \"step\": 520, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.2263077878678064, \"RMSE\": 5.701877933590318, \"R2\": -8.391958889485423, \"Memory in Mb\": 0.0049734115600585, \"Time in s\": 1.054282 }, { \"step\": 540, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.159275760054389, \"RMSE\": 5.596308740310266, \"R2\": -8.01424271274666, \"Memory in Mb\": 0.0049734115600585, \"Time in s\": 1.13157 }, { \"step\": 560, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.121286703179314, \"RMSE\": 5.500929056902255, \"R2\": -7.917124287747498, \"Memory in Mb\": 0.0049734115600585, \"Time in s\": 1.211538 }, { \"step\": 580, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.058130800812745, \"RMSE\": 5.4056516105350205, \"R2\": -7.823875188349783, \"Memory in Mb\": 0.0049734115600585, \"Time in s\": 1.294212 }, { \"step\": 600, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.010772210317983, \"RMSE\": 5.316298216027806, \"R2\": -7.440165070210115, \"Memory in Mb\": 0.0049734115600585, \"Time in s\": 1.379521 }, { \"step\": 620, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.9712240547218984, \"RMSE\": 5.232121316388296, \"R2\": -7.05039272692726, \"Memory in Mb\": 0.0049734115600585, \"Time in s\": 1.467481 }, { \"step\": 640, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.918906116628168, \"RMSE\": 5.150155111235484, \"R2\": -6.654334565315682, \"Memory in Mb\": 0.0049734115600585, \"Time in s\": 1.558 }, { \"step\": 660, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.87787066207154, \"RMSE\": 5.072802363597129, \"R2\": -6.372735616761029, \"Memory in Mb\": 0.0049734115600585, \"Time in s\": 1.651027 }, { \"step\": 680, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.8268195769848845, \"RMSE\": 4.997758130035794, \"R2\": -6.2693000939147145, \"Memory in Mb\": 0.0049734115600585, \"Time in s\": 1.746732 }, { \"step\": 700, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.786028440025988, \"RMSE\": 4.9266674679383895, \"R2\": -6.249499636750513, \"Memory in Mb\": 0.0049734115600585, \"Time in s\": 1.84509 }, { \"step\": 720, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.7407901109286728, \"RMSE\": 4.857855812572241, \"R2\": -6.20325679847918, \"Memory in Mb\": 0.0049734115600585, \"Time in s\": 1.946268 }, { \"step\": 740, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.6980813320245582, \"RMSE\": 4.791872643159282, \"R2\": -6.00468916158508, \"Memory in Mb\": 0.0049734115600585, \"Time in s\": 2.050093 }, { \"step\": 760, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.6641755726043943, \"RMSE\": 4.729168490426344, \"R2\": -5.896482494172518, \"Memory in Mb\": 0.0049734115600585, \"Time in s\": 2.156434 }, { \"step\": 780, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.6304504193200038, \"RMSE\": 4.6685949390255965, \"R2\": -5.751055148180852, \"Memory in Mb\": 0.0049734115600585, \"Time in s\": 2.265329 }, { \"step\": 800, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.6024144632936517, \"RMSE\": 4.610765602340218, \"R2\": -5.644394777378336, \"Memory in Mb\": 0.0049734115600585, \"Time in s\": 2.376883 }, { \"step\": 820, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.5772046524793362, \"RMSE\": 4.555342563217192, \"R2\": -5.556843815680599, \"Memory in Mb\": 0.0049734115600585, \"Time in s\": 2.490967 }, { \"step\": 840, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.5504230286371177, \"RMSE\": 4.501494961913348, \"R2\": -5.462190804899245, \"Memory in Mb\": 0.0049734115600585, \"Time in s\": 2.607626 }, { \"step\": 860, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.5213504760602443, \"RMSE\": 4.449264316210896, \"R2\": -5.302224210455831, \"Memory in Mb\": 0.0049734115600585, \"Time in s\": 2.72689 }, { \"step\": 880, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.4920620594434295, \"RMSE\": 4.398585386750051, \"R2\": -5.128866413959423, \"Memory in Mb\": 0.0049734115600585, \"Time in s\": 2.848697 }, { \"step\": 900, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.4631535468073946, \"RMSE\": 4.3495699730724, \"R2\": -5.018290064232882, \"Memory in Mb\": 0.0049734115600585, \"Time in s\": 2.973079 }, { \"step\": 920, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.4376774845864433, \"RMSE\": 4.302379067062498, \"R2\": -4.985157602999735, \"Memory in Mb\": 0.0049734115600585, \"Time in s\": 3.100144 }, { \"step\": 940, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.415413283450155, \"RMSE\": 4.2569033587476754, \"R2\": -4.909007968017405, \"Memory in Mb\": 0.0049734115600585, \"Time in s\": 3.229628 }, { \"step\": 960, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.3929189597034646, \"RMSE\": 4.212790914002432, \"R2\": -4.847686152244137, \"Memory in Mb\": 0.0049734115600585, \"Time in s\": 3.361574 }, { \"step\": 980, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.3689716642677323, \"RMSE\": 4.1697584324400925, \"R2\": -4.840094251784054, \"Memory in Mb\": 0.0049734115600585, \"Time in s\": 3.49595 }, { \"step\": 1000, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.348598310616665, \"RMSE\": 4.128277744647548, \"R2\": -4.8208398605179, \"Memory in Mb\": 0.0049734115600585, \"Time in s\": 3.6327 }, { \"step\": 11, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 32.42675747760146, \"RMSE\": 33.032143455333795, \"R2\": -1403.530028209614, \"Memory in Mb\": 0.0043611526489257, \"Time in s\": 0.002172 }, { \"step\": 22, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 23.11671120534681, \"RMSE\": 25.467535638550565, \"R2\": -685.9150105173057, \"Memory in Mb\": 0.0043611526489257, \"Time in s\": 0.005938 }, { \"step\": 33, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 16.40645850052153, \"RMSE\": 20.90890407573329, \"R2\": -302.6297778360383, \"Memory in Mb\": 0.0043611526489257, \"Time in s\": 0.010657 }, { \"step\": 44, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 12.633013937743208, \"RMSE\": 18.123648450153382, \"R2\": -257.60569409037487, \"Memory in Mb\": 0.0043611526489257, \"Time in s\": 0.016312 }, { \"step\": 55, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 12.09340740686418, \"RMSE\": 18.75532087846616, \"R2\": -67.66805855020911, \"Memory in Mb\": 0.0043611526489257, \"Time in s\": 0.022822 }, { \"step\": 66, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 11.723217014070244, \"RMSE\": 17.64468538999345, \"R2\": -25.19868487320792, \"Memory in Mb\": 0.0043611526489257, \"Time in s\": 0.030046 }, { \"step\": 77, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 10.995180265837302, \"RMSE\": 16.56912334002292, \"R2\": -14.483838555564008, \"Memory in Mb\": 0.0043611526489257, \"Time in s\": 0.037972 }, { \"step\": 88, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 9.883907021821408, \"RMSE\": 15.530287677810511, \"R2\": -11.333507781967656, \"Memory in Mb\": 0.0043611526489257, \"Time in s\": 0.046598 }, { \"step\": 99, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 8.972176235897166, \"RMSE\": 14.66146594146288, \"R2\": -8.193616152032533, \"Memory in Mb\": 0.0043611526489257, \"Time in s\": 0.055927 }, { \"step\": 110, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 8.50780758363674, \"RMSE\": 13.99831063395296, \"R2\": -4.852424068253989, \"Memory in Mb\": 0.0043611526489257, \"Time in s\": 0.066187 }, { \"step\": 121, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 8.189643323772653, \"RMSE\": 13.479618530659062, \"R2\": -3.1794651999709123, \"Memory in Mb\": 0.0043611526489257, \"Time in s\": 0.077165 }, { \"step\": 132, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 7.801679323915957, \"RMSE\": 12.961634417305982, \"R2\": -2.158384304610562, \"Memory in Mb\": 0.0043611526489257, \"Time in s\": 0.088845 }, { \"step\": 143, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 7.451861020480359, \"RMSE\": 12.498785048420814, \"R2\": -1.3787482214976663, \"Memory in Mb\": 0.0043611526489257, \"Time in s\": 0.101232 }, { \"step\": 154, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 7.149646459280303, \"RMSE\": 12.093459492377487, \"R2\": -0.8926161646086501, \"Memory in Mb\": 0.0043611526489257, \"Time in s\": 0.114319 }, { \"step\": 165, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 7.229782522017506, \"RMSE\": 11.96532542528415, \"R2\": -0.4893471433346175, \"Memory in Mb\": 0.0043611526489257, \"Time in s\": 0.128105 }, { \"step\": 176, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 7.272512426798904, \"RMSE\": 11.818048782353436, \"R2\": -0.1714850789711801, \"Memory in Mb\": 0.0043611526489257, \"Time in s\": 0.1426069999999999 }, { \"step\": 187, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 7.196746625780547, \"RMSE\": 11.610365671998538, \"R2\": 0.0742429673312855, \"Memory in Mb\": 0.0043611526489257, \"Time in s\": 0.1578139999999999 }, { \"step\": 198, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 7.1064479168500405, \"RMSE\": 11.42347112116664, \"R2\": 0.2871227171728154, \"Memory in Mb\": 0.0043611526489257, \"Time in s\": 0.173728 }, { \"step\": 209, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 7.187961051781539, \"RMSE\": 11.470757896418933, \"R2\": 0.4074503232991815, \"Memory in Mb\": 0.0043611526489257, \"Time in s\": 0.1903399999999999 }, { \"step\": 220, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 7.669011107337858, \"RMSE\": 12.056664202246258, \"R2\": 0.426873173509426, \"Memory in Mb\": 0.0043611526489257, \"Time in s\": 0.2076519999999999 }, { \"step\": 231, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 7.786810512364482, \"RMSE\": 12.097059810994589, \"R2\": 0.5064776054701067, \"Memory in Mb\": 0.0043611526489257, \"Time in s\": 0.2256619999999999 }, { \"step\": 242, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 7.967587416991401, \"RMSE\": 12.312376354870244, \"R2\": 0.5886249568088757, \"Memory in Mb\": 0.0043611526489257, \"Time in s\": 0.2443719999999999 }, { \"step\": 253, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 7.942191618805254, \"RMSE\": 12.251768500136135, \"R2\": 0.6462241186586895, \"Memory in Mb\": 0.0043611526489257, \"Time in s\": 0.2637739999999999 }, { \"step\": 264, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 8.532657015260138, \"RMSE\": 13.159069559279288, \"R2\": 0.6271215737447722, \"Memory in Mb\": 0.0044412612915039, \"Time in s\": 0.2838889999999999 }, { \"step\": 275, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 8.974527826218258, \"RMSE\": 14.016709692996267, \"R2\": 0.6277246858047626, \"Memory in Mb\": 0.0044412612915039, \"Time in s\": 0.3047049999999999 }, { \"step\": 286, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 9.187875132430849, \"RMSE\": 14.367497338174372, \"R2\": 0.6612245262436964, \"Memory in Mb\": 0.0044412612915039, \"Time in s\": 0.3262349999999999 }, { \"step\": 297, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 9.146460078204475, \"RMSE\": 14.316362398212211, \"R2\": 0.7082709930315267, \"Memory in Mb\": 0.0044412612915039, \"Time in s\": 0.3484709999999999 }, { \"step\": 308, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 9.641370323857412, \"RMSE\": 15.001693346690402, \"R2\": 0.7147098761119641, \"Memory in Mb\": 0.0044412612915039, \"Time in s\": 0.3714179999999999 }, { \"step\": 319, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 10.700825117113602, \"RMSE\": 17.38383679543193, \"R2\": 0.6523619983610591, \"Memory in Mb\": 0.0044412612915039, \"Time in s\": 0.3950729999999999 }, { \"step\": 330, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 11.121905143066762, \"RMSE\": 17.96551370253039, \"R2\": 0.6822557197544588, \"Memory in Mb\": 0.0044412612915039, \"Time in s\": 0.4194319999999999 }, { \"step\": 341, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 11.300130820443067, \"RMSE\": 18.038310133249198, \"R2\": 0.7131646675929553, \"Memory in Mb\": 0.0044412612915039, \"Time in s\": 0.4444929999999999 }, { \"step\": 352, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 11.446939695127732, \"RMSE\": 18.13441953669637, \"R2\": 0.7364682185789984, \"Memory in Mb\": 0.0044412612915039, \"Time in s\": 0.4702589999999999 }, { \"step\": 363, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 12.306713664516929, \"RMSE\": 19.446501901626007, \"R2\": 0.7057272396553849, \"Memory in Mb\": 0.0044412612915039, \"Time in s\": 0.4967349999999999 }, { \"step\": 374, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 12.8047145381729, \"RMSE\": 20.530886427306594, \"R2\": 0.6984708214392588, \"Memory in Mb\": 0.0044412612915039, \"Time in s\": 0.523921 }, { \"step\": 385, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 13.174415073298738, \"RMSE\": 21.133761140382827, \"R2\": 0.7173255769005842, \"Memory in Mb\": 0.0044412612915039, \"Time in s\": 0.551805 }, { \"step\": 396, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 13.2883642577537, \"RMSE\": 21.10340115690396, \"R2\": 0.7371933225224319, \"Memory in Mb\": 0.0044412612915039, \"Time in s\": 0.580392 }, { \"step\": 407, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 14.156717848187574, \"RMSE\": 22.549679209142333, \"R2\": 0.7210842693854467, \"Memory in Mb\": 0.0044412612915039, \"Time in s\": 0.6096860000000001 }, { \"step\": 418, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 14.91944953335544, \"RMSE\": 23.967687063528587, \"R2\": 0.7042149845564116, \"Memory in Mb\": 0.0044412612915039, \"Time in s\": 0.6396860000000001 }, { \"step\": 429, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 15.467166242517186, \"RMSE\": 24.886955839016704, \"R2\": 0.7166083213097005, \"Memory in Mb\": 0.0044412612915039, \"Time in s\": 0.6704000000000001 }, { \"step\": 440, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 15.631989433801651, \"RMSE\": 24.954278611820005, \"R2\": 0.735240056765428, \"Memory in Mb\": 0.0044412612915039, \"Time in s\": 0.7018220000000001 }, { \"step\": 451, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 16.140858755557055, \"RMSE\": 25.549476814516595, \"R2\": 0.7342580119275103, \"Memory in Mb\": 0.0044412612915039, \"Time in s\": 0.7339490000000001 }, { \"step\": 462, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 17.234115417053438, \"RMSE\": 27.64913352119068, \"R2\": 0.7076854506057617, \"Memory in Mb\": 0.0044412612915039, \"Time in s\": 0.766781 }, { \"step\": 473, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 18.0152369206823, \"RMSE\": 28.967470484053976, \"R2\": 0.7166844816496443, \"Memory in Mb\": 0.0044412612915039, \"Time in s\": 0.800319 }, { \"step\": 484, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 18.33784663215877, \"RMSE\": 29.23326362096376, \"R2\": 0.727273146935064, \"Memory in Mb\": 0.0044412612915039, \"Time in s\": 0.834558 }, { \"step\": 495, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 18.785497932712666, \"RMSE\": 29.755293652524703, \"R2\": 0.7342033839127224, \"Memory in Mb\": 0.0044412612915039, \"Time in s\": 0.86951 }, { \"step\": 506, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 19.668627686193567, \"RMSE\": 31.19471320280161, \"R2\": 0.7163564282233008, \"Memory in Mb\": 0.0044412612915039, \"Time in s\": 0.90517 }, { \"step\": 517, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 20.38947136061499, \"RMSE\": 32.18441644636668, \"R2\": 0.7150766748346216, \"Memory in Mb\": 0.0044412612915039, \"Time in s\": 0.941543 }, { \"step\": 528, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 21.174247281712567, \"RMSE\": 33.375214332088184, \"R2\": 0.7245232875146275, \"Memory in Mb\": 0.0044412612915039, \"Time in s\": 0.978621 }, { \"step\": 539, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 21.335113723003413, \"RMSE\": 33.369412847265615, \"R2\": 0.7359905254430201, \"Memory in Mb\": 0.0044412612915039, \"Time in s\": 1.016405 }, { \"step\": 550, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 22.319908918673335, \"RMSE\": 34.98038586656285, \"R2\": 0.7188252208291055, \"Memory in Mb\": 0.0044412612915039, \"Time in s\": 1.054903 }, { \"step\": 561, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 23.02102944586984, \"RMSE\": 35.899425045778656, \"R2\": 0.7168073485461736, \"Memory in Mb\": 0.0044412612915039, \"Time in s\": 1.094104 }, { \"step\": 572, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 23.86795045847124, \"RMSE\": 37.077313876148, \"R2\": 0.7205745757906983, \"Memory in Mb\": 0.0044412612915039, \"Time in s\": 1.134008 }, { \"step\": 20, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 23.431235633428948, \"RMSE\": 26.218144216470428, \"R2\": -1611.3462157506035, \"Memory in Mb\": 0.0050439834594726, \"Time in s\": 0.004147 }, { \"step\": 40, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 14.103513545807008, \"RMSE\": 19.087149489688155, \"R2\": -145.46960290724672, \"Memory in Mb\": 0.0050439834594726, \"Time in s\": 0.010783 }, { \"step\": 60, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 11.461367198760032, \"RMSE\": 16.23222714650599, \"R2\": -141.81258639462544, \"Memory in Mb\": 0.0050439834594726, \"Time in s\": 0.018889 }, { \"step\": 80, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 9.44127715126052, \"RMSE\": 14.201674320759096, \"R2\": -108.65615123709478, \"Memory in Mb\": 0.0050439834594726, \"Time in s\": 0.028452 }, { \"step\": 100, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 7.797022024035496, \"RMSE\": 12.721240349043525, \"R2\": -54.205539694816935, \"Memory in Mb\": 0.0050439834594726, \"Time in s\": 0.039551 }, { \"step\": 120, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 6.691147023027369, \"RMSE\": 11.629122321270428, \"R2\": -38.13701304656029, \"Memory in Mb\": 0.0050439834594726, \"Time in s\": 0.052122 }, { \"step\": 140, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 5.831783229321132, \"RMSE\": 10.770680162674168, \"R2\": -33.69279126993988, \"Memory in Mb\": 0.0050439834594726, \"Time in s\": 0.066155 }, { \"step\": 160, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 5.153144449790517, \"RMSE\": 10.076741978726949, \"R2\": -25.77937886024605, \"Memory in Mb\": 0.0050439834594726, \"Time in s\": 0.08166 }, { \"step\": 180, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 4.653806702754108, \"RMSE\": 9.504457154537077, \"R2\": -19.92363756907144, \"Memory in Mb\": 0.0050439834594726, \"Time in s\": 0.098629 }, { \"step\": 200, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 4.222103651464603, \"RMSE\": 9.017588926226493, \"R2\": -17.890910704295415, \"Memory in Mb\": 0.0050439834594726, \"Time in s\": 0.117152 }, { \"step\": 220, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 3.8867321432249606, \"RMSE\": 8.600346446062781, \"R2\": -17.535660715889627, \"Memory in Mb\": 0.0050439834594726, \"Time in s\": 0.137154 }, { \"step\": 240, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 3.599340946609919, \"RMSE\": 8.235553698215059, \"R2\": -16.124474758948196, \"Memory in Mb\": 0.0050439834594726, \"Time in s\": 0.15861 }, { \"step\": 260, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 3.3420538385748757, \"RMSE\": 7.912854669167724, \"R2\": -15.021792727856036, \"Memory in Mb\": 0.0052042007446289, \"Time in s\": 0.181525 }, { \"step\": 280, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 3.121876705697044, \"RMSE\": 7.625390851691296, \"R2\": -14.577959242562436, \"Memory in Mb\": 0.0052042007446289, \"Time in s\": 0.205897 }, { \"step\": 300, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.9443386382725367, \"RMSE\": 7.3683114276631025, \"R2\": -13.30448388815742, \"Memory in Mb\": 0.0052042007446289, \"Time in s\": 0.231796 }, { \"step\": 320, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.815755936756583, \"RMSE\": 7.138230019587049, \"R2\": -13.089830522149688, \"Memory in Mb\": 0.0052042007446289, \"Time in s\": 0.259173 }, { \"step\": 340, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.6933833627330244, \"RMSE\": 6.928597052658441, \"R2\": -13.027805837336182, \"Memory in Mb\": 0.0052042007446289, \"Time in s\": 0.288012 }, { \"step\": 360, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.6051143953909115, \"RMSE\": 6.739167122054826, \"R2\": -12.380223856378365, \"Memory in Mb\": 0.0052042007446289, \"Time in s\": 0.318313 }, { \"step\": 380, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.490520088621087, \"RMSE\": 6.560408055638074, \"R2\": -12.118440379947032, \"Memory in Mb\": 0.0052042007446289, \"Time in s\": 0.350072 }, { \"step\": 400, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.379600080775968, \"RMSE\": 6.3947378153227445, \"R2\": -11.816514349639151, \"Memory in Mb\": 0.0052042007446289, \"Time in s\": 0.383345 }, { \"step\": 420, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.2783211278818065, \"RMSE\": 6.240996339547436, \"R2\": -11.61166202267384, \"Memory in Mb\": 0.0052042007446289, \"Time in s\": 0.418092 }, { \"step\": 440, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.2145782025697205, \"RMSE\": 6.101000859547544, \"R2\": -10.83412932108482, \"Memory in Mb\": 0.0052042007446289, \"Time in s\": 0.454298 }, { \"step\": 460, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.1464112051078263, \"RMSE\": 5.968705291729145, \"R2\": -9.740869666058233, \"Memory in Mb\": 0.0052042007446289, \"Time in s\": 0.491959 }, { \"step\": 480, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.072454283271577, \"RMSE\": 5.843773480387036, \"R2\": -9.059069500744918, \"Memory in Mb\": 0.0052042007446289, \"Time in s\": 0.5310900000000001 }, { \"step\": 500, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.9987939253418097, \"RMSE\": 5.726023081885353, \"R2\": -8.45516263823347, \"Memory in Mb\": 0.0052042007446289, \"Time in s\": 0.5716870000000001 }, { \"step\": 520, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.9284894530733132, \"RMSE\": 5.614979238753044, \"R2\": -8.107866604370331, \"Memory in Mb\": 0.0052042007446289, \"Time in s\": 0.613828 }, { \"step\": 540, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.8708966196370944, \"RMSE\": 5.510640598064868, \"R2\": -7.740375491210358, \"Memory in Mb\": 0.0052042007446289, \"Time in s\": 0.657455 }, { \"step\": 560, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.824494929332068, \"RMSE\": 5.412971505312132, \"R2\": -7.634241934578528, \"Memory in Mb\": 0.0052042007446289, \"Time in s\": 0.702504 }, { \"step\": 580, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.781951127238344, \"RMSE\": 5.3203622791847724, \"R2\": -7.547628985954546, \"Memory in Mb\": 0.0052042007446289, \"Time in s\": 0.748994 }, { \"step\": 600, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.749732390051898, \"RMSE\": 5.233421242207161, \"R2\": -7.179064952823026, \"Memory in Mb\": 0.0052042007446289, \"Time in s\": 0.796981 }, { \"step\": 620, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.7052484400139905, \"RMSE\": 5.14921027958835, \"R2\": -6.797272491389374, \"Memory in Mb\": 0.0052042007446289, \"Time in s\": 0.846385 }, { \"step\": 640, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.6664817162996513, \"RMSE\": 5.068963198399273, \"R2\": -6.414896594696412, \"Memory in Mb\": 0.0052042007446289, \"Time in s\": 0.897251 }, { \"step\": 660, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.6214900718153242, \"RMSE\": 4.99171743637858, \"R2\": -6.138924064419633, \"Memory in Mb\": 0.0052042007446289, \"Time in s\": 0.949568 }, { \"step\": 680, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.5818233283584242, \"RMSE\": 4.918048415051679, \"R2\": -6.03927170869104, \"Memory in Mb\": 0.0052042007446289, \"Time in s\": 1.003319 }, { \"step\": 700, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.555259840641907, \"RMSE\": 4.848763195223404, \"R2\": -6.02204294951599, \"Memory in Mb\": 0.0052042007446289, \"Time in s\": 1.058593 }, { \"step\": 720, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.532054525069809, \"RMSE\": 4.783346185353484, \"R2\": -5.983984772166844, \"Memory in Mb\": 0.0052042007446289, \"Time in s\": 1.1153050000000002 }, { \"step\": 740, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.5146024654094865, \"RMSE\": 4.7215191517857305, \"R2\": -5.800515662606876, \"Memory in Mb\": 0.0052042007446289, \"Time in s\": 1.1734340000000003 }, { \"step\": 760, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.4818289913711118, \"RMSE\": 4.659306491272199, \"R2\": -5.694229888242945, \"Memory in Mb\": 0.0052042007446289, \"Time in s\": 1.2330550000000002 }, { \"step\": 780, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.452402157072035, \"RMSE\": 4.59956290700914, \"R2\": -5.552882667907455, \"Memory in Mb\": 0.0052042007446289, \"Time in s\": 1.294088 }, { \"step\": 800, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.4293016341621465, \"RMSE\": 4.542852809132106, \"R2\": -5.450103306475059, \"Memory in Mb\": 0.0052042007446289, \"Time in s\": 1.356613 }, { \"step\": 820, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.4031025761620075, \"RMSE\": 4.487566758646917, \"R2\": -5.363185739412862, \"Memory in Mb\": 0.0052042007446289, \"Time in s\": 1.420603 }, { \"step\": 840, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.381369306663701, \"RMSE\": 4.43470615373713, \"R2\": -5.271853959037296, \"Memory in Mb\": 0.0052042007446289, \"Time in s\": 1.48601 }, { \"step\": 860, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.3592027324075535, \"RMSE\": 4.3834303912536, \"R2\": -5.11710119981194, \"Memory in Mb\": 0.0052042007446289, \"Time in s\": 1.552867 }, { \"step\": 880, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.3365452172324814, \"RMSE\": 4.3337346544721145, \"R2\": -4.949476248649982, \"Memory in Mb\": 0.0052042007446289, \"Time in s\": 1.621167 }, { \"step\": 900, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.309932841675593, \"RMSE\": 4.285399956971287, \"R2\": -4.842022075269781, \"Memory in Mb\": 0.0052042007446289, \"Time in s\": 1.690888 }, { \"step\": 920, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.286195364366444, \"RMSE\": 4.238743982476022, \"R2\": -4.809417914789597, \"Memory in Mb\": 0.0052042007446289, \"Time in s\": 1.762146 }, { \"step\": 940, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.270754149650798, \"RMSE\": 4.194574036632911, \"R2\": -4.737236102715975, \"Memory in Mb\": 0.0052042007446289, \"Time in s\": 1.834847 }, { \"step\": 960, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.252007480522268, \"RMSE\": 4.151199175197265, \"R2\": -4.677947706919012, \"Memory in Mb\": 0.0052042007446289, \"Time in s\": 1.908959 }, { \"step\": 980, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.2327553756112288, \"RMSE\": 4.10900051997898, \"R2\": -4.671141158917876, \"Memory in Mb\": 0.0052042007446289, \"Time in s\": 1.984552 }, { \"step\": 1000, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.2158544056174447, \"RMSE\": 4.0682125513101886, \"R2\": -4.652689174637866, \"Memory in Mb\": 0.0052042007446289, \"Time in s\": 2.061558 }, { \"step\": 11, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 32.49979765090093, \"RMSE\": 33.085767570527814, \"R2\": -1408.093935143081, \"Memory in Mb\": 0.0041532516479492, \"Time in s\": 0.00221 }, { \"step\": 22, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 23.548763427243948, \"RMSE\": 25.711783397814365, \"R2\": -699.1539821884553, \"Memory in Mb\": 0.0041532516479492, \"Time in s\": 0.0060149999999999 }, { \"step\": 33, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 16.994606748791693, \"RMSE\": 21.16216382986949, \"R2\": -310.0297747454571, \"Memory in Mb\": 0.0041532516479492, \"Time in s\": 0.010773 }, { \"step\": 44, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 13.434663043069094, \"RMSE\": 18.386175360023177, \"R2\": -265.1519301746234, \"Memory in Mb\": 0.0041532516479492, \"Time in s\": 0.016452 }, { \"step\": 55, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 12.685726760044922, \"RMSE\": 18.64479618798502, \"R2\": -66.86112450289035, \"Memory in Mb\": 0.0041532516479492, \"Time in s\": 0.023006 }, { \"step\": 66, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 12.3059577367172, \"RMSE\": 17.58876192176611, \"R2\": -25.03287862681572, \"Memory in Mb\": 0.0041532516479492, \"Time in s\": 0.030252 }, { \"step\": 77, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 11.844694328458631, \"RMSE\": 16.6807536659431, \"R2\": -14.693178357367543, \"Memory in Mb\": 0.0041532516479492, \"Time in s\": 0.03818 }, { \"step\": 88, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 11.154725738202892, \"RMSE\": 15.783555067193374, \"R2\": -11.739056703820452, \"Memory in Mb\": 0.0041532516479492, \"Time in s\": 0.046793 }, { \"step\": 99, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 10.383929359740238, \"RMSE\": 14.970988963724652, \"R2\": -8.585892537614809, \"Memory in Mb\": 0.0041532516479492, \"Time in s\": 0.056087 }, { \"step\": 110, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 9.96579366260941, \"RMSE\": 14.36848414897767, \"R2\": -5.166041463970149, \"Memory in Mb\": 0.0041532516479492, \"Time in s\": 0.066275 }, { \"step\": 121, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 9.69456937849415, \"RMSE\": 13.920059192886765, \"R2\": -3.4570517192093604, \"Memory in Mb\": 0.0041532516479492, \"Time in s\": 0.077161 }, { \"step\": 132, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 9.550940690791585, \"RMSE\": 13.540299798742517, \"R2\": -2.4466881997122822, \"Memory in Mb\": 0.0041532516479492, \"Time in s\": 0.088733 }, { \"step\": 143, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 9.359343163302276, \"RMSE\": 13.17888693683795, \"R2\": -1.6446630191344274, \"Memory in Mb\": 0.0041532516479492, \"Time in s\": 0.100994 }, { \"step\": 154, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 9.050096583806178, \"RMSE\": 12.809003240652473, \"R2\": -1.1232058766616235, \"Memory in Mb\": 0.0041532516479492, \"Time in s\": 0.113939 }, { \"step\": 165, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 9.0176805612097, \"RMSE\": 12.690905771048246, \"R2\": -0.6754526017728211, \"Memory in Mb\": 0.0041532516479492, \"Time in s\": 0.127575 }, { \"step\": 176, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 9.128420828629457, \"RMSE\": 12.67565049026222, \"R2\": -0.3476766887041909, \"Memory in Mb\": 0.0041532516479492, \"Time in s\": 0.141896 }, { \"step\": 187, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 9.200055067293626, \"RMSE\": 12.61943948921252, \"R2\": -0.0936676208508318, \"Memory in Mb\": 0.0041532516479492, \"Time in s\": 0.156913 }, { \"step\": 198, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 9.131920516304556, \"RMSE\": 12.48608852319409, \"R2\": 0.14832986501041, \"Memory in Mb\": 0.0041532516479492, \"Time in s\": 0.172621 }, { \"step\": 209, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 9.262178084838377, \"RMSE\": 12.632807163510387, \"R2\": 0.2813122010719644, \"Memory in Mb\": 0.0041532516479492, \"Time in s\": 0.18901 }, { \"step\": 220, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 9.624585089266471, \"RMSE\": 13.14522964439942, \"R2\": 0.3187088278286061, \"Memory in Mb\": 0.0041532516479492, \"Time in s\": 0.206079 }, { \"step\": 231, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 9.871229105762628, \"RMSE\": 13.33182219595452, \"R2\": 0.4005868940419749, \"Memory in Mb\": 0.0041532516479492, \"Time in s\": 0.223824 }, { \"step\": 242, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 10.053594156641497, \"RMSE\": 13.615837484576032, \"R2\": 0.496913232046656, \"Memory in Mb\": 0.0041532516479492, \"Time in s\": 0.242255 }, { \"step\": 253, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 10.103586276318884, \"RMSE\": 13.654347763291469, \"R2\": 0.5605873283913356, \"Memory in Mb\": 0.0041532516479492, \"Time in s\": 0.261365 }, { \"step\": 264, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 10.539757430756955, \"RMSE\": 14.361033144769577, \"R2\": 0.5558923432468688, \"Memory in Mb\": 0.0042333602905273, \"Time in s\": 0.281177 }, { \"step\": 275, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 11.000663746720075, \"RMSE\": 15.253690514733572, \"R2\": 0.5591184315358017, \"Memory in Mb\": 0.0042333602905273, \"Time in s\": 0.301675 }, { \"step\": 286, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 11.301045102393736, \"RMSE\": 15.716738058687294, \"R2\": 0.5946085911267809, \"Memory in Mb\": 0.0042333602905273, \"Time in s\": 0.32286 }, { \"step\": 297, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 11.275670942063872, \"RMSE\": 15.722526759958118, \"R2\": 0.648148884657232, \"Memory in Mb\": 0.0042333602905273, \"Time in s\": 0.344736 }, { \"step\": 308, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 11.737413962135747, \"RMSE\": 16.425717512690383, \"R2\": 0.6579773485389515, \"Memory in Mb\": 0.0042333602905273, \"Time in s\": 0.367308 }, { \"step\": 319, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 12.62956768598283, \"RMSE\": 18.5239816734196, \"R2\": 0.6052658886851463, \"Memory in Mb\": 0.0042333602905273, \"Time in s\": 0.390571 }, { \"step\": 330, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 13.20835169207171, \"RMSE\": 19.400144953397177, \"R2\": 0.6294827674482892, \"Memory in Mb\": 0.0042333602905273, \"Time in s\": 0.41452 }, { \"step\": 341, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 13.34580485099439, \"RMSE\": 19.47322215763284, \"R2\": 0.6657152345574865, \"Memory in Mb\": 0.0042333602905273, \"Time in s\": 0.439156 }, { \"step\": 352, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 13.572829398695534, \"RMSE\": 19.644456145190084, \"R2\": 0.6907528542453616, \"Memory in Mb\": 0.0042333602905273, \"Time in s\": 0.464482 }, { \"step\": 363, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 14.286348966120116, \"RMSE\": 20.694687599962585, \"R2\": 0.666738740088638, \"Memory in Mb\": 0.0042333602905273, \"Time in s\": 0.490494 }, { \"step\": 374, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 14.777113556436731, \"RMSE\": 21.8206517710938, \"R2\": 0.6593962849465681, \"Memory in Mb\": 0.0042333602905273, \"Time in s\": 0.517211 }, { \"step\": 385, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 15.215863000720542, \"RMSE\": 22.583610768099227, \"R2\": 0.6772102871974224, \"Memory in Mb\": 0.0042333602905273, \"Time in s\": 0.544615 }, { \"step\": 396, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 15.33144965807796, \"RMSE\": 22.564695888148663, \"R2\": 0.6995373757169706, \"Memory in Mb\": 0.0042333602905273, \"Time in s\": 0.5727059999999999 }, { \"step\": 407, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 16.156138146741625, \"RMSE\": 23.924755047114477, \"R2\": 0.6860306363812331, \"Memory in Mb\": 0.0042333602905273, \"Time in s\": 0.601482 }, { \"step\": 418, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 16.826141311926307, \"RMSE\": 25.281544830782227, \"R2\": 0.6708975363085852, \"Memory in Mb\": 0.0042333602905273, \"Time in s\": 0.6309469999999999 }, { \"step\": 429, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 17.40263926327312, \"RMSE\": 26.38004441662919, \"R2\": 0.6815842184892376, \"Memory in Mb\": 0.0042333602905273, \"Time in s\": 0.6611069999999999 }, { \"step\": 440, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 17.533400796677356, \"RMSE\": 26.42712307382207, \"R2\": 0.7030645738539452, \"Memory in Mb\": 0.0042333602905273, \"Time in s\": 0.6919729999999998 }, { \"step\": 451, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 18.01931805998843, \"RMSE\": 26.98764790902567, \"R2\": 0.7034989551644695, \"Memory in Mb\": 0.0042333602905273, \"Time in s\": 0.7235269999999998 }, { \"step\": 462, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 19.02005973582853, \"RMSE\": 28.983219342716676, \"R2\": 0.6787962347144068, \"Memory in Mb\": 0.0042333602905273, \"Time in s\": 0.7557739999999998 }, { \"step\": 473, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 19.888963224686226, \"RMSE\": 30.578078926209333, \"R2\": 0.6843036130043219, \"Memory in Mb\": 0.0042333602905273, \"Time in s\": 0.7887069999999998 }, { \"step\": 484, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 20.14556805064173, \"RMSE\": 30.710181129007665, \"R2\": 0.6990197135707891, \"Memory in Mb\": 0.0042333602905273, \"Time in s\": 0.8223269999999998 }, { \"step\": 495, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 20.606054171923702, \"RMSE\": 31.270986299633183, \"R2\": 0.7064351021760091, \"Memory in Mb\": 0.0042333602905273, \"Time in s\": 0.8566339999999998 }, { \"step\": 506, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 21.410220326067677, \"RMSE\": 32.615082621422005, \"R2\": 0.6899384474766328, \"Memory in Mb\": 0.0042333602905273, \"Time in s\": 0.8916409999999998 }, { \"step\": 517, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 22.149063292155795, \"RMSE\": 33.66176418126127, \"R2\": 0.6883188968774838, \"Memory in Mb\": 0.0042333602905273, \"Time in s\": 0.9273449999999998 }, { \"step\": 528, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 22.923596011881333, \"RMSE\": 34.92960124509041, \"R2\": 0.6982661596564212, \"Memory in Mb\": 0.0042333602905273, \"Time in s\": 0.9637369999999996 }, { \"step\": 539, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 23.042465823580866, \"RMSE\": 34.93124976178739, \"R2\": 0.7106985365247873, \"Memory in Mb\": 0.0042333602905273, \"Time in s\": 1.0008159999999997 }, { \"step\": 550, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 23.974366627279803, \"RMSE\": 36.47485289150521, \"R2\": 0.6942867450600009, \"Memory in Mb\": 0.0042333602905273, \"Time in s\": 1.0385829999999998 }, { \"step\": 561, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 24.688352372874245, \"RMSE\": 37.45551228620605, \"R2\": 0.6917248794696187, \"Memory in Mb\": 0.0042333602905273, \"Time in s\": 1.077041 }, { \"step\": 572, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 25.52040552278537, \"RMSE\": 38.65530944983144, \"R2\": 0.6962839796503111, \"Memory in Mb\": 0.0042333602905273, \"Time in s\": 1.116183 }, { \"step\": 20, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 23.51586240468561, \"RMSE\": 26.237375459551668, \"R2\": -1613.712423965852, \"Memory in Mb\": 0.004836082458496, \"Time in s\": 0.004083 }, { \"step\": 40, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 14.404588626352648, \"RMSE\": 19.15618772462805, \"R2\": -146.53108046561562, \"Memory in Mb\": 0.004836082458496, \"Time in s\": 0.01063 }, { \"step\": 60, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 12.231791081689297, \"RMSE\": 16.474815193156783, \"R2\": -146.113106004694, \"Memory in Mb\": 0.004836082458496, \"Time in s\": 0.018597 }, { \"step\": 80, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 9.897294138330498, \"RMSE\": 14.380930849374858, \"R2\": -111.44182781593445, \"Memory in Mb\": 0.004836082458496, \"Time in s\": 0.027966 }, { \"step\": 100, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 8.086918618304638, \"RMSE\": 12.871841233853624, \"R2\": -55.52038254270653, \"Memory in Mb\": 0.004836082458496, \"Time in s\": 0.038819 }, { \"step\": 120, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 7.090525087262731, \"RMSE\": 11.800543733353924, \"R2\": -39.29933105483862, \"Memory in Mb\": 0.004836082458496, \"Time in s\": 0.051073 }, { \"step\": 140, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 6.252895179240796, \"RMSE\": 10.939748534807466, \"R2\": -34.79049149741283, \"Memory in Mb\": 0.004836082458496, \"Time in s\": 0.064731 }, { \"step\": 160, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 5.613172563674658, \"RMSE\": 10.244365728872303, \"R2\": -26.67772385659461, \"Memory in Mb\": 0.004836082458496, \"Time in s\": 0.079792 }, { \"step\": 180, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 5.139122523864994, \"RMSE\": 9.674573881172426, \"R2\": -20.679349421494624, \"Memory in Mb\": 0.004836082458496, \"Time in s\": 0.096247 }, { \"step\": 200, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 4.970287512907828, \"RMSE\": 9.286148634805688, \"R2\": -19.03287536608377, \"Memory in Mb\": 0.004836082458496, \"Time in s\": 0.114161 }, { \"step\": 220, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 4.714509288646119, \"RMSE\": 8.88589717245386, \"R2\": -18.78694495096694, \"Memory in Mb\": 0.004836082458496, \"Time in s\": 0.133481 }, { \"step\": 240, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 4.3785831664508095, \"RMSE\": 8.51169894931726, \"R2\": -17.292125083299812, \"Memory in Mb\": 0.004836082458496, \"Time in s\": 0.154212 }, { \"step\": 260, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 4.192432902948977, \"RMSE\": 8.203158141559566, \"R2\": -16.21895925323482, \"Memory in Mb\": 0.0049962997436523, \"Time in s\": 0.176332 }, { \"step\": 280, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 4.032216676138626, \"RMSE\": 7.92689648751503, \"R2\": -15.834209174335763, \"Memory in Mb\": 0.0049962997436523, \"Time in s\": 0.199865 }, { \"step\": 300, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 3.888249411356283, \"RMSE\": 7.680244711632193, \"R2\": -14.54126486396457, \"Memory in Mb\": 0.0049962997436523, \"Time in s\": 0.224857 }, { \"step\": 320, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 3.851910342336546, \"RMSE\": 7.48583024126048, \"R2\": -14.495465984230798, \"Memory in Mb\": 0.0049962997436523, \"Time in s\": 0.251273 }, { \"step\": 340, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 3.7628843932429423, \"RMSE\": 7.2990742240635536, \"R2\": -14.5680671641431, \"Memory in Mb\": 0.0049962997436523, \"Time in s\": 0.279095 }, { \"step\": 360, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 3.741498329747232, \"RMSE\": 7.147194018854174, \"R2\": -14.04949995366026, \"Memory in Mb\": 0.0049962997436523, \"Time in s\": 0.308317 }, { \"step\": 380, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 3.6331417160345065, \"RMSE\": 6.972318267910069, \"R2\": -13.817498979914417, \"Memory in Mb\": 0.0049962997436523, \"Time in s\": 0.3389480000000001 }, { \"step\": 400, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 3.505723677240456, \"RMSE\": 6.801269751447825, \"R2\": -13.497878046830476, \"Memory in Mb\": 0.0049962997436523, \"Time in s\": 0.371039 }, { \"step\": 420, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 3.377789937544477, \"RMSE\": 6.6406714986639335, \"R2\": -13.278693194916952, \"Memory in Mb\": 0.0049962997436523, \"Time in s\": 0.4045350000000001 }, { \"step\": 440, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 3.285565585155429, \"RMSE\": 6.496417025168413, \"R2\": -12.417819031510929, \"Memory in Mb\": 0.0049962997436523, \"Time in s\": 0.4394350000000001 }, { \"step\": 460, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 3.204438206990859, \"RMSE\": 6.363151879091182, \"R2\": -11.207416254643826, \"Memory in Mb\": 0.0049962997436523, \"Time in s\": 0.4757300000000001 }, { \"step\": 480, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 3.1147239220792944, \"RMSE\": 6.234124280033156, \"R2\": -10.44779843680249, \"Memory in Mb\": 0.0049962997436523, \"Time in s\": 0.5134280000000001 }, { \"step\": 500, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 3.0674610457420317, \"RMSE\": 6.126558214637352, \"R2\": -9.824203383261038, \"Memory in Mb\": 0.0049962997436523, \"Time in s\": 0.5525380000000001 }, { \"step\": 520, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.990407253638538, \"RMSE\": 6.01302433311803, \"R2\": -9.444947809169491, \"Memory in Mb\": 0.0049962997436523, \"Time in s\": 0.593122 }, { \"step\": 540, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.9353658306947947, \"RMSE\": 5.909270916056388, \"R2\": -9.05064000964308, \"Memory in Mb\": 0.0049962997436523, \"Time in s\": 0.635128 }, { \"step\": 560, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.8633890512526734, \"RMSE\": 5.806679023039649, \"R2\": -8.935926541145857, \"Memory in Mb\": 0.0049962997436523, \"Time in s\": 0.6785490000000001 }, { \"step\": 580, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.807495338487497, \"RMSE\": 5.711109374041071, \"R2\": -8.849273711490637, \"Memory in Mb\": 0.0049962997436523, \"Time in s\": 0.723357 }, { \"step\": 600, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.7371272959787114, \"RMSE\": 5.616984296672238, \"R2\": -8.421904346026553, \"Memory in Mb\": 0.0049962997436523, \"Time in s\": 0.769575 }, { \"step\": 620, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.695279458158905, \"RMSE\": 5.533794104458184, \"R2\": -8.005492094038127, \"Memory in Mb\": 0.0049962997436523, \"Time in s\": 0.817237 }, { \"step\": 640, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.663427445960627, \"RMSE\": 5.457208108806078, \"R2\": -7.594247452705627, \"Memory in Mb\": 0.0049962997436523, \"Time in s\": 0.866263 }, { \"step\": 660, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.616152871221604, \"RMSE\": 5.378587544649896, \"R2\": -7.28837246231315, \"Memory in Mb\": 0.0049962997436523, \"Time in s\": 0.916653 }, { \"step\": 680, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.5798661012588893, \"RMSE\": 5.305972052989116, \"R2\": -7.193548818831784, \"Memory in Mb\": 0.0049962997436523, \"Time in s\": 0.968481 }, { \"step\": 700, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.537320459903927, \"RMSE\": 5.236098928386573, \"R2\": -7.188742583674767, \"Memory in Mb\": 0.0049962997436523, \"Time in s\": 1.021719 }, { \"step\": 720, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.487562037475977, \"RMSE\": 5.165214649048708, \"R2\": -7.14359946989749, \"Memory in Mb\": 0.0049962997436523, \"Time in s\": 1.076348 }, { \"step\": 740, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.4359344014471898, \"RMSE\": 5.096521995605296, \"R2\": -6.923665614900413, \"Memory in Mb\": 0.0049962997436523, \"Time in s\": 1.1323770000000002 }, { \"step\": 760, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.407655610747697, \"RMSE\": 5.035258504842907, \"R2\": -6.818106944929671, \"Memory in Mb\": 0.0049962997436523, \"Time in s\": 1.1897670000000002 }, { \"step\": 780, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.359216458068109, \"RMSE\": 4.971257259303496, \"R2\": -6.65476288050581, \"Memory in Mb\": 0.0049962997436523, \"Time in s\": 1.2485520000000003 }, { \"step\": 800, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.316070029510264, \"RMSE\": 4.9101929612142525, \"R2\": -6.535402552442101, \"Memory in Mb\": 0.0049962997436523, \"Time in s\": 1.3087800000000005 }, { \"step\": 820, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.2777366252623445, \"RMSE\": 4.852446886619337, \"R2\": -6.440024000118236, \"Memory in Mb\": 0.0049962997436523, \"Time in s\": 1.3703650000000005 }, { \"step\": 840, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.243368105900121, \"RMSE\": 4.7970088928814505, \"R2\": -6.33849981001193, \"Memory in Mb\": 0.0049962997436523, \"Time in s\": 1.4333480000000005 }, { \"step\": 860, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.208161873346368, \"RMSE\": 4.742699334581194, \"R2\": -6.1609167229990645, \"Memory in Mb\": 0.0049962997436523, \"Time in s\": 1.4977110000000002 }, { \"step\": 880, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.173524312605847, \"RMSE\": 4.690026300839657, \"R2\": -5.967944096931786, \"Memory in Mb\": 0.0049962997436523, \"Time in s\": 1.5634250000000005 }, { \"step\": 900, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.143113317062195, \"RMSE\": 4.639957881226245, \"R2\": -5.848706397355668, \"Memory in Mb\": 0.0049962997436523, \"Time in s\": 1.6305320000000003 }, { \"step\": 920, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.107662636673197, \"RMSE\": 4.590154711256589, \"R2\": -5.812600067991807, \"Memory in Mb\": 0.0049962997436523, \"Time in s\": 1.6990770000000004 }, { \"step\": 940, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.084055644614135, \"RMSE\": 4.5438188766398575, \"R2\": -5.732386133187966, \"Memory in Mb\": 0.0049962997436523, \"Time in s\": 1.7689810000000004 }, { \"step\": 960, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.0573225686306618, \"RMSE\": 4.498049663013517, \"R2\": -5.666421016566231, \"Memory in Mb\": 0.0049962997436523, \"Time in s\": 1.8402960000000004 }, { \"step\": 980, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.029372876222096, \"RMSE\": 4.453478872426294, \"R2\": -5.661880798559547, \"Memory in Mb\": 0.0049962997436523, \"Time in s\": 1.9129870000000004 }, { \"step\": 1000, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.9991816433402003, \"RMSE\": 4.409973662813209, \"R2\": -5.642320489885111, \"Memory in Mb\": 0.0049962997436523, \"Time in s\": 1.9870360000000005 }, { \"step\": 11, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"ChickWeights\", \"MAE\": 29.34636433128918, \"RMSE\": 30.877867366178624, \"R2\": -1226.303892160441, \"Memory in Mb\": 0.0034055709838867, \"Time in s\": 0.002755 }, { \"step\": 22, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"ChickWeights\", \"MAE\": 16.78579478575624, \"RMSE\": 22.219906445728544, \"R2\": -521.8939460594183, \"Memory in Mb\": 0.0034055709838867, \"Time in s\": 0.007689 }, { \"step\": 33, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"ChickWeights\", \"MAE\": 12.764406226748012, \"RMSE\": 18.43476392899385, \"R2\": -235.02444355689545, \"Memory in Mb\": 0.0034055709838867, \"Time in s\": 0.0139269999999999 }, { \"step\": 44, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"ChickWeights\", \"MAE\": 10.51475037374614, \"RMSE\": 16.140786164803156, \"R2\": -204.11441945614396, \"Memory in Mb\": 0.0034055709838867, \"Time in s\": 0.021043 }, { \"step\": 55, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"ChickWeights\", \"MAE\": 11.118868381910096, \"RMSE\": 17.807152193193623, \"R2\": -60.900579144648304, \"Memory in Mb\": 0.0034055709838867, \"Time in s\": 0.0290209999999999 }, { \"step\": 66, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"ChickWeights\", \"MAE\": 10.068652098820438, \"RMSE\": 16.444921319285292, \"R2\": -21.75701273895947, \"Memory in Mb\": 0.0034055709838867, \"Time in s\": 0.037852 }, { \"step\": 77, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"ChickWeights\", \"MAE\": 9.238561614797426, \"RMSE\": 15.414518181428049, \"R2\": -12.40107017683018, \"Memory in Mb\": 0.0034055709838867, \"Time in s\": 0.0475289999999999 }, { \"step\": 88, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"ChickWeights\", \"MAE\": 8.397438800335843, \"RMSE\": 14.475112340817043, \"R2\": -9.714489831884093, \"Memory in Mb\": 0.0034055709838867, \"Time in s\": 0.0580659999999999 }, { \"step\": 99, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"ChickWeights\", \"MAE\": 7.833048644024143, \"RMSE\": 13.715858800506394, \"R2\": -7.045954767890709, \"Memory in Mb\": 0.0034055709838867, \"Time in s\": 0.069447 }, { \"step\": 110, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"ChickWeights\", \"MAE\": 7.655503029296072, \"RMSE\": 13.224205829308971, \"R2\": -4.223044606682061, \"Memory in Mb\": 0.0034055709838867, \"Time in s\": 0.0819039999999999 }, { \"step\": 121, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"ChickWeights\", \"MAE\": 7.616633860178707, \"RMSE\": 12.86945366769748, \"R2\": -2.809655727072758, \"Memory in Mb\": 0.0034055709838867, \"Time in s\": 0.0952199999999999 }, { \"step\": 132, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"ChickWeights\", \"MAE\": 7.226514673025618, \"RMSE\": 12.359535435581538, \"R2\": -1.871770505407636, \"Memory in Mb\": 0.0034055709838867, \"Time in s\": 0.1094009999999999 }, { \"step\": 143, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"ChickWeights\", \"MAE\": 7.028644030968596, \"RMSE\": 11.955222747545204, \"R2\": -1.1763474084194208, \"Memory in Mb\": 0.0034055709838867, \"Time in s\": 0.1244559999999999 }, { \"step\": 154, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"ChickWeights\", \"MAE\": 6.986848640780892, \"RMSE\": 11.69340351574852, \"R2\": -0.7694704308339801, \"Memory in Mb\": 0.0034055709838867, \"Time in s\": 0.1403709999999999 }, { \"step\": 165, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"ChickWeights\", \"MAE\": 7.055944609610511, \"RMSE\": 11.647997480703344, \"R2\": -0.411397823157807, \"Memory in Mb\": 0.0034055709838867, \"Time in s\": 0.1571089999999999 }, { \"step\": 176, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"ChickWeights\", \"MAE\": 7.165300854035116, \"RMSE\": 11.69334605191042, \"R2\": -0.146892755517725, \"Memory in Mb\": 0.0034055709838867, \"Time in s\": 0.1746439999999999 }, { \"step\": 187, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"ChickWeights\", \"MAE\": 7.086580322778478, \"RMSE\": 11.479257625599036, \"R2\": 0.0950328202686906, \"Memory in Mb\": 0.0034055709838867, \"Time in s\": 0.1929879999999999 }, { \"step\": 198, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"ChickWeights\", \"MAE\": 7.0079421524022685, \"RMSE\": 11.279628541389028, \"R2\": 0.3049625679809165, \"Memory in Mb\": 0.0034055709838867, \"Time in s\": 0.2121359999999999 }, { \"step\": 209, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"ChickWeights\", \"MAE\": 7.136281210496879, \"RMSE\": 11.437970564343969, \"R2\": 0.4108328996032877, \"Memory in Mb\": 0.0034055709838867, \"Time in s\": 0.2320939999999999 }, { \"step\": 220, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"ChickWeights\", \"MAE\": 7.591733813835971, \"RMSE\": 12.23821647621677, \"R2\": 0.409482641430799, \"Memory in Mb\": 0.0034055709838867, \"Time in s\": 0.252837 }, { \"step\": 231, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"ChickWeights\", \"MAE\": 7.63642424272378, \"RMSE\": 12.197368986664095, \"R2\": 0.4982590675647103, \"Memory in Mb\": 0.0034055709838867, \"Time in s\": 0.274371 }, { \"step\": 242, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"ChickWeights\", \"MAE\": 7.775639220351206, \"RMSE\": 12.334191292520584, \"R2\": 0.5871659255406134, \"Memory in Mb\": 0.0034055709838867, \"Time in s\": 0.296703 }, { \"step\": 253, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"ChickWeights\", \"MAE\": 7.787415018822619, \"RMSE\": 12.26821713761009, \"R2\": 0.6452735558950271, \"Memory in Mb\": 0.0034055709838867, \"Time in s\": 0.31983 }, { \"step\": 264, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"ChickWeights\", \"MAE\": 8.387229506824328, \"RMSE\": 13.12794439290609, \"R2\": 0.6288834273883384, \"Memory in Mb\": 0.0034589767456054, \"Time in s\": 0.343762 }, { \"step\": 275, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"ChickWeights\", \"MAE\": 8.893165599265544, \"RMSE\": 14.22060275652947, \"R2\": 0.6168153604937621, \"Memory in Mb\": 0.0034589767456054, \"Time in s\": 0.368493 }, { \"step\": 286, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"ChickWeights\", \"MAE\": 9.145334455404228, \"RMSE\": 14.488680433063887, \"R2\": 0.6554856011642076, \"Memory in Mb\": 0.0034589767456054, \"Time in s\": 0.394025 }, { \"step\": 297, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"ChickWeights\", \"MAE\": 9.138984650870034, \"RMSE\": 14.40937364029996, \"R2\": 0.7044680409221951, \"Memory in Mb\": 0.0034589767456054, \"Time in s\": 0.420371 }, { \"step\": 308, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"ChickWeights\", \"MAE\": 9.653721426872565, \"RMSE\": 15.186897141025446, \"R2\": 0.7076222812215553, \"Memory in Mb\": 0.0034589767456054, \"Time in s\": 0.447515 }, { \"step\": 319, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"ChickWeights\", \"MAE\": 10.688547371510406, \"RMSE\": 17.568442046558065, \"R2\": 0.6449394075517852, \"Memory in Mb\": 0.0034589767456054, \"Time in s\": 0.475457 }, { \"step\": 330, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"ChickWeights\", \"MAE\": 11.136888804773204, \"RMSE\": 18.130051576409294, \"R2\": 0.6764089203981734, \"Memory in Mb\": 0.0034589767456054, \"Time in s\": 0.5041950000000001 }, { \"step\": 341, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"ChickWeights\", \"MAE\": 11.389709627918275, \"RMSE\": 18.31814497212097, \"R2\": 0.7041960757214223, \"Memory in Mb\": 0.0034589767456054, \"Time in s\": 0.53373 }, { \"step\": 352, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"ChickWeights\", \"MAE\": 11.506556624125665, \"RMSE\": 18.357319157972537, \"R2\": 0.7299499902857889, \"Memory in Mb\": 0.0034589767456054, \"Time in s\": 0.56408 }, { \"step\": 363, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"ChickWeights\", \"MAE\": 12.507799925411486, \"RMSE\": 19.94157204039453, \"R2\": 0.6905532928077196, \"Memory in Mb\": 0.0034589767456054, \"Time in s\": 0.5952430000000001 }, { \"step\": 374, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"ChickWeights\", \"MAE\": 13.00643913732256, \"RMSE\": 20.915910426515573, \"R2\": 0.6870553798396234, \"Memory in Mb\": 0.0034589767456054, \"Time in s\": 0.6272240000000001 }, { \"step\": 385, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"ChickWeights\", \"MAE\": 13.44126440090726, \"RMSE\": 21.59107831138786, \"R2\": 0.7049595304644938, \"Memory in Mb\": 0.0034589767456054, \"Time in s\": 0.6600050000000001 }, { \"step\": 396, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"ChickWeights\", \"MAE\": 13.452600873244052, \"RMSE\": 21.4799043653453, \"R2\": 0.7277322681469997, \"Memory in Mb\": 0.0034589767456054, \"Time in s\": 0.6935880000000001 }, { \"step\": 407, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"ChickWeights\", \"MAE\": 14.30954893105116, \"RMSE\": 22.795153034451378, \"R2\": 0.7149787127692513, \"Memory in Mb\": 0.0034589767456054, \"Time in s\": 0.7279810000000001 }, { \"step\": 418, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"ChickWeights\", \"MAE\": 15.058465273046648, \"RMSE\": 24.12824896117789, \"R2\": 0.7002387244037227, \"Memory in Mb\": 0.0034589767456054, \"Time in s\": 0.7631730000000001 }, { \"step\": 429, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"ChickWeights\", \"MAE\": 15.61316127520364, \"RMSE\": 25.00709438423813, \"R2\": 0.7138656442877266, \"Memory in Mb\": 0.0034589767456054, \"Time in s\": 0.799166 }, { \"step\": 440, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"ChickWeights\", \"MAE\": 15.833192306896644, \"RMSE\": 25.159785721055627, \"R2\": 0.7308613212218023, \"Memory in Mb\": 0.0034589767456054, \"Time in s\": 0.8359580000000001 }, { \"step\": 451, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"ChickWeights\", \"MAE\": 16.36714766376461, \"RMSE\": 25.770608582556893, \"R2\": 0.729638089688956, \"Memory in Mb\": 0.0034589767456054, \"Time in s\": 0.87355 }, { \"step\": 462, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"ChickWeights\", \"MAE\": 17.39003054773241, \"RMSE\": 27.77320733878432, \"R2\": 0.7050560764454472, \"Memory in Mb\": 0.0034589767456054, \"Time in s\": 0.911945 }, { \"step\": 473, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"ChickWeights\", \"MAE\": 18.288428268963266, \"RMSE\": 29.396681708172505, \"R2\": 0.7082265052542642, \"Memory in Mb\": 0.0034589767456054, \"Time in s\": 0.951137 }, { \"step\": 484, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"ChickWeights\", \"MAE\": 18.65703381705754, \"RMSE\": 29.64739580601693, \"R2\": 0.7194912595024185, \"Memory in Mb\": 0.0034589767456054, \"Time in s\": 0.991127 }, { \"step\": 495, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"ChickWeights\", \"MAE\": 19.170202167844984, \"RMSE\": 30.22319045197901, \"R2\": 0.7257784495510498, \"Memory in Mb\": 0.0034589767456054, \"Time in s\": 1.031919 }, { \"step\": 506, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"ChickWeights\", \"MAE\": 20.00164363454025, \"RMSE\": 31.52072905752619, \"R2\": 0.7103967314785831, \"Memory in Mb\": 0.0034589767456054, \"Time in s\": 1.073505 }, { \"step\": 517, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"ChickWeights\", \"MAE\": 20.7279777099298, \"RMSE\": 32.51187613530653, \"R2\": 0.7092492864364832, \"Memory in Mb\": 0.0034589767456054, \"Time in s\": 1.115928 }, { \"step\": 528, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"ChickWeights\", \"MAE\": 21.56683033620196, \"RMSE\": 33.84128388534863, \"R2\": 0.7167757554755416, \"Memory in Mb\": 0.0034589767456054, \"Time in s\": 1.159144 }, { \"step\": 539, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"ChickWeights\", \"MAE\": 21.76383497583081, \"RMSE\": 33.92033428284125, \"R2\": 0.727201090633804, \"Memory in Mb\": 0.0034589767456054, \"Time in s\": 1.203152 }, { \"step\": 550, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"ChickWeights\", \"MAE\": 22.69913149057359, \"RMSE\": 35.42417858076478, \"R2\": 0.7116454901231712, \"Memory in Mb\": 0.0034589767456054, \"Time in s\": 1.247925 }, { \"step\": 561, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"ChickWeights\", \"MAE\": 23.377380034069706, \"RMSE\": 36.32705612571005, \"R2\": 0.7100204288247138, \"Memory in Mb\": 0.0034589767456054, \"Time in s\": 1.293462 }, { \"step\": 572, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"ChickWeights\", \"MAE\": 24.23392939046311, \"RMSE\": 37.557568322570944, \"R2\": 0.7132890208408607, \"Memory in Mb\": 0.0034589767456054, \"Time in s\": 1.339772 }, { \"step\": 20, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"TrumpApproval\", \"MAE\": 19.88769754963664, \"RMSE\": 24.32970381980572, \"R2\": -1387.4429851591376, \"Memory in Mb\": 0.0043020248413085, \"Time in s\": 0.004188 }, { \"step\": 40, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"TrumpApproval\", \"MAE\": 13.670966252574736, \"RMSE\": 18.65150015508889, \"R2\": -138.85979610511808, \"Memory in Mb\": 0.0043020248413085, \"Time in s\": 0.011118 }, { \"step\": 60, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"TrumpApproval\", \"MAE\": 10.92834996141842, \"RMSE\": 15.667746469337834, \"R2\": -132.052589810652, \"Memory in Mb\": 0.0043020248413085, \"Time in s\": 0.019635 }, { \"step\": 80, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"TrumpApproval\", \"MAE\": 9.678171261463673, \"RMSE\": 14.124417656525663, \"R2\": -107.46634425227307, \"Memory in Mb\": 0.0043020248413085, \"Time in s\": 0.0297069999999999 }, { \"step\": 100, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"TrumpApproval\", \"MAE\": 9.352313294103828, \"RMSE\": 13.38210191485773, \"R2\": -60.090321390572015, \"Memory in Mb\": 0.0043020248413085, \"Time in s\": 0.041437 }, { \"step\": 120, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"TrumpApproval\", \"MAE\": 8.620142702346234, \"RMSE\": 12.447697479286916, \"R2\": -43.84064485890953, \"Memory in Mb\": 0.0043020248413085, \"Time in s\": 0.05473 }, { \"step\": 140, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"TrumpApproval\", \"MAE\": 8.069393725677067, \"RMSE\": 11.747669450243144, \"R2\": -40.272086023690846, \"Memory in Mb\": 0.0043020248413085, \"Time in s\": 0.069574 }, { \"step\": 160, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"TrumpApproval\", \"MAE\": 7.915997483818508, \"RMSE\": 11.323556682786094, \"R2\": -32.81628847194888, \"Memory in Mb\": 0.0043020248413085, \"Time in s\": 0.085987 }, { \"step\": 180, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"TrumpApproval\", \"MAE\": 7.901057285338727, \"RMSE\": 11.048721851620664, \"R2\": -27.27526182903414, \"Memory in Mb\": 0.0043020248413085, \"Time in s\": 0.103947 }, { \"step\": 200, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"TrumpApproval\", \"MAE\": 7.54356570823564, \"RMSE\": 10.615481381947994, \"R2\": -25.17890116869019, \"Memory in Mb\": 0.0043020248413085, \"Time in s\": 0.1235479999999999 }, { \"step\": 220, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"TrumpApproval\", \"MAE\": 7.241521693861677, \"RMSE\": 10.231227443733497, \"R2\": -25.232015334984823, \"Memory in Mb\": 0.0043020248413085, \"Time in s\": 0.1447119999999999 }, { \"step\": 240, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"TrumpApproval\", \"MAE\": 7.008192421558015, \"RMSE\": 9.935910620042463, \"R2\": -23.925679436807183, \"Memory in Mb\": 0.0043020248413085, \"Time in s\": 0.1674199999999999 }, { \"step\": 260, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"TrumpApproval\", \"MAE\": 6.801990869552961, \"RMSE\": 9.666795781283112, \"R2\": -22.91166502123009, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 0.191681 }, { \"step\": 280, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"TrumpApproval\", \"MAE\": 6.624313646675292, \"RMSE\": 9.422664085622769, \"R2\": -22.786676328633025, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 0.217492 }, { \"step\": 300, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"TrumpApproval\", \"MAE\": 6.459069309379183, \"RMSE\": 9.212027904845302, \"R2\": -21.358709628275356, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 0.244937 }, { \"step\": 320, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"TrumpApproval\", \"MAE\": 6.271769455500056, \"RMSE\": 8.978276576497144, \"R2\": -21.290029949269996, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 0.2739479999999999 }, { \"step\": 340, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"TrumpApproval\", \"MAE\": 6.106653016686588, \"RMSE\": 8.760106381933458, \"R2\": -21.42424847489592, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 0.304509 }, { \"step\": 360, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"TrumpApproval\", \"MAE\": 5.987363514041657, \"RMSE\": 8.573066448421043, \"R2\": -20.653260781415117, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 0.336642 }, { \"step\": 380, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"TrumpApproval\", \"MAE\": 5.8616356212919145, \"RMSE\": 8.393806584529749, \"R2\": -20.475259224624704, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 0.370328 }, { \"step\": 400, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"TrumpApproval\", \"MAE\": 5.709779552257037, \"RMSE\": 8.216191215364908, \"R2\": -20.15755692495403, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 0.4056389999999999 }, { \"step\": 420, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"TrumpApproval\", \"MAE\": 5.611842526965273, \"RMSE\": 8.072589482864162, \"R2\": -20.100376445858966, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 0.442509 }, { \"step\": 440, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"TrumpApproval\", \"MAE\": 5.621266668644241, \"RMSE\": 8.061169250733263, \"R2\": -19.659995268253144, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 0.480923 }, { \"step\": 460, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"TrumpApproval\", \"MAE\": 5.589374981160082, \"RMSE\": 7.977922595195435, \"R2\": -18.1892858176961, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 0.520896 }, { \"step\": 480, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"TrumpApproval\", \"MAE\": 5.601949193869223, \"RMSE\": 7.939757215258646, \"R2\": -17.568871361729258, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 0.5624170000000001 }, { \"step\": 500, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"TrumpApproval\", \"MAE\": 5.5323381947357735, \"RMSE\": 7.827118471157092, \"R2\": -16.667155417363553, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 0.6055100000000001 }, { \"step\": 520, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"TrumpApproval\", \"MAE\": 5.482642323931563, \"RMSE\": 7.735647602994712, \"R2\": -16.286764124140948, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 0.6502260000000001 }, { \"step\": 540, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"TrumpApproval\", \"MAE\": 5.40625682094428, \"RMSE\": 7.63238246035087, \"R2\": -15.7666448554263, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 0.696447 }, { \"step\": 560, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"TrumpApproval\", \"MAE\": 5.339029679572209, \"RMSE\": 7.532223621796607, \"R2\": -15.718570546376949, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 0.744222 }, { \"step\": 580, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"TrumpApproval\", \"MAE\": 5.278136676956568, \"RMSE\": 7.444054065699633, \"R2\": -15.733326658615429, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 0.7935180000000001 }, { \"step\": 600, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"TrumpApproval\", \"MAE\": 5.267448053107655, \"RMSE\": 7.406286860836784, \"R2\": -15.380732931295814, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 0.8443630000000001 }, { \"step\": 620, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"TrumpApproval\", \"MAE\": 5.236749685364014, \"RMSE\": 7.349650271122191, \"R2\": -14.88527744664384, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 0.8968030000000001 }, { \"step\": 640, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"TrumpApproval\", \"MAE\": 5.212060707085285, \"RMSE\": 7.303465599867649, \"R2\": -14.393054201831111, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 0.95074 }, { \"step\": 660, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"TrumpApproval\", \"MAE\": 5.169763730821092, \"RMSE\": 7.239859498572405, \"R2\": -14.017341318649356, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 1.0062160000000002 }, { \"step\": 680, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"TrumpApproval\", \"MAE\": 5.126488999027155, \"RMSE\": 7.167741867944588, \"R2\": -13.952260083042017, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 1.063227 }, { \"step\": 700, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"TrumpApproval\", \"MAE\": 5.093088048073085, \"RMSE\": 7.110044108134988, \"R2\": -14.098928285386927, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 1.121789 }, { \"step\": 720, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"TrumpApproval\", \"MAE\": 5.090740367293551, \"RMSE\": 7.074302078930798, \"R2\": -14.275901824522656, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 1.181923 }, { \"step\": 740, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"TrumpApproval\", \"MAE\": 5.094807885255693, \"RMSE\": 7.045283722404726, \"R2\": -14.141723406350016, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 1.243554 }, { \"step\": 760, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"TrumpApproval\", \"MAE\": 5.043787658869922, \"RMSE\": 6.975646542541488, \"R2\": -14.00468895273832, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 1.306712 }, { \"step\": 780, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"TrumpApproval\", \"MAE\": 5.064043270517397, \"RMSE\": 6.970645877797938, \"R2\": -14.050305184338798, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 1.3714110000000002 }, { \"step\": 800, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"TrumpApproval\", \"MAE\": 5.100755566375936, \"RMSE\": 7.010389089224324, \"R2\": -14.360083784619212, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 1.4376520000000002 }, { \"step\": 820, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"TrumpApproval\", \"MAE\": 5.088064827359337, \"RMSE\": 6.979627415872227, \"R2\": -14.392786134873418, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 1.505445 }, { \"step\": 840, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"TrumpApproval\", \"MAE\": 5.068856460674791, \"RMSE\": 6.93736545783203, \"R2\": -14.348127111309871, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 1.574755 }, { \"step\": 860, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"TrumpApproval\", \"MAE\": 5.054699520351681, \"RMSE\": 6.900248160625591, \"R2\": -14.158172906563095, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 1.645553 }, { \"step\": 880, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"TrumpApproval\", \"MAE\": 5.051365258335469, \"RMSE\": 6.881479694679562, \"R2\": -14.000915574481752, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 1.717914 }, { \"step\": 900, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"TrumpApproval\", \"MAE\": 5.025346349320704, \"RMSE\": 6.842894221251935, \"R2\": -13.895672840614656, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 1.791764 }, { \"step\": 920, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"TrumpApproval\", \"MAE\": 4.9962011230931616, \"RMSE\": 6.800765764747934, \"R2\": -13.95456831471299, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 1.867216 }, { \"step\": 940, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"TrumpApproval\", \"MAE\": 4.964612353038952, \"RMSE\": 6.764345134580912, \"R2\": -13.920332812605436, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 1.944198 }, { \"step\": 960, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"TrumpApproval\", \"MAE\": 4.9296416909632255, \"RMSE\": 6.717192667284049, \"R2\": -13.866880687662514, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 2.02269 }, { \"step\": 980, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"TrumpApproval\", \"MAE\": 4.910978624057615, \"RMSE\": 6.682715130965835, \"R2\": -14.000438748691009, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 2.102738 }, { \"step\": 1000, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"TrumpApproval\", \"MAE\": 4.906394390750375, \"RMSE\": 6.665596501187553, \"R2\": -14.174901311798424, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 2.184247 }, { \"step\": 11, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"ChickWeights\", \"MAE\": 40.361343182089286, \"RMSE\": 50.93510711941157, \"R2\": -3338.580868182736, \"Memory in Mb\": 0.0034055709838867, \"Time in s\": 0.003338 }, { \"step\": 22, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"ChickWeights\", \"MAE\": 33.77268754890631, \"RMSE\": 41.67984599422324, \"R2\": -1838.845575618055, \"Memory in Mb\": 0.0034055709838867, \"Time in s\": 0.00919 }, { \"step\": 33, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"ChickWeights\", \"MAE\": 32.39258875507137, \"RMSE\": 38.96806999674433, \"R2\": -1053.6287703611629, \"Memory in Mb\": 0.0034055709838867, \"Time in s\": 0.01639 }, { \"step\": 44, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"ChickWeights\", \"MAE\": 30.94881031854868, \"RMSE\": 36.76152485506615, \"R2\": -1062.9809670274265, \"Memory in Mb\": 0.0034055709838867, \"Time in s\": 0.024921 }, { \"step\": 55, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"ChickWeights\", \"MAE\": 33.955857035779495, \"RMSE\": 41.369655851763525, \"R2\": -333.094700988151, \"Memory in Mb\": 0.0034055709838867, \"Time in s\": 0.034827 }, { \"step\": 66, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"ChickWeights\", \"MAE\": 34.010493836145145, \"RMSE\": 40.92418807176112, \"R2\": -139.93270784533922, \"Memory in Mb\": 0.0034055709838867, \"Time in s\": 0.046059 }, { \"step\": 77, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"ChickWeights\", \"MAE\": 34.064338631511326, \"RMSE\": 40.5595538563462, \"R2\": -91.78246602216656, \"Memory in Mb\": 0.0034055709838867, \"Time in s\": 0.058629 }, { \"step\": 88, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"ChickWeights\", \"MAE\": 34.363478110253816, \"RMSE\": 40.47408671194747, \"R2\": -82.76869054449229, \"Memory in Mb\": 0.0034055709838867, \"Time in s\": 0.072516 }, { \"step\": 99, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"ChickWeights\", \"MAE\": 34.108671766629826, \"RMSE\": 39.953033914579606, \"R2\": -67.2701887367714, \"Memory in Mb\": 0.0034055709838867, \"Time in s\": 0.087732 }, { \"step\": 110, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"ChickWeights\", \"MAE\": 35.5272246808861, \"RMSE\": 41.29414928968925, \"R2\": -49.9285816695273, \"Memory in Mb\": 0.0034055709838867, \"Time in s\": 0.104612 }, { \"step\": 121, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"ChickWeights\", \"MAE\": 35.85052464333277, \"RMSE\": 41.48724749727828, \"R2\": -38.59084342971371, \"Memory in Mb\": 0.0034055709838867, \"Time in s\": 0.122819 }, { \"step\": 132, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"ChickWeights\", \"MAE\": 36.414238795248615, \"RMSE\": 42.10793271457587, \"R2\": -32.332913655998325, \"Memory in Mb\": 0.0034055709838867, \"Time in s\": 0.142364 }, { \"step\": 143, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"ChickWeights\", \"MAE\": 37.27092918309872, \"RMSE\": 43.0841670883325, \"R2\": -27.26495387953688, \"Memory in Mb\": 0.0034055709838867, \"Time in s\": 0.163229 }, { \"step\": 154, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"ChickWeights\", \"MAE\": 37.68834060456834, \"RMSE\": 43.351809236536255, \"R2\": -23.3206899065102, \"Memory in Mb\": 0.0034055709838867, \"Time in s\": 0.18538 }, { \"step\": 165, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"ChickWeights\", \"MAE\": 37.63751931524077, \"RMSE\": 43.32469674855668, \"R2\": -18.52621065458175, \"Memory in Mb\": 0.0034055709838867, \"Time in s\": 0.208847 }, { \"step\": 176, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"ChickWeights\", \"MAE\": 38.77878602757167, \"RMSE\": 44.74953718825682, \"R2\": -15.796635622291838, \"Memory in Mb\": 0.0034055709838867, \"Time in s\": 0.233634 }, { \"step\": 187, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"ChickWeights\", \"MAE\": 39.47954967522975, \"RMSE\": 45.39032172466195, \"R2\": -13.149195443978996, \"Memory in Mb\": 0.0034055709838867, \"Time in s\": 0.259707 }, { \"step\": 198, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"ChickWeights\", \"MAE\": 40.91548075261064, \"RMSE\": 46.96428169788168, \"R2\": -11.049082215962626, \"Memory in Mb\": 0.0034055709838867, \"Time in s\": 0.287109 }, { \"step\": 209, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"ChickWeights\", \"MAE\": 41.53723737741547, \"RMSE\": 47.716579905431935, \"R2\": -9.253665706385002, \"Memory in Mb\": 0.0034055709838867, \"Time in s\": 0.31584 }, { \"step\": 220, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"ChickWeights\", \"MAE\": 41.99935380922626, \"RMSE\": 48.63121942098776, \"R2\": -8.324525171754294, \"Memory in Mb\": 0.0034055709838867, \"Time in s\": 0.345843 }, { \"step\": 231, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"ChickWeights\", \"MAE\": 42.76794868726854, \"RMSE\": 49.65880643089243, \"R2\": -7.316484115890946, \"Memory in Mb\": 0.0034055709838867, \"Time in s\": 0.377141 }, { \"step\": 242, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"ChickWeights\", \"MAE\": 44.368299619960766, \"RMSE\": 51.88245138837915, \"R2\": -6.304578357179455, \"Memory in Mb\": 0.0034055709838867, \"Time in s\": 0.409767 }, { \"step\": 253, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"ChickWeights\", \"MAE\": 45.426835880148886, \"RMSE\": 53.192808855117775, \"R2\": -5.668628208432534, \"Memory in Mb\": 0.0034055709838867, \"Time in s\": 0.443674 }, { \"step\": 264, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"ChickWeights\", \"MAE\": 46.93737170570451, \"RMSE\": 55.9021194350506, \"R2\": -5.729354993233594, \"Memory in Mb\": 0.0034589767456054, \"Time in s\": 0.478844 }, { \"step\": 275, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"ChickWeights\", \"MAE\": 48.507353400069945, \"RMSE\": 58.8434937271261, \"R2\": -5.560984055006233, \"Memory in Mb\": 0.0034589767456054, \"Time in s\": 0.515365 }, { \"step\": 286, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"ChickWeights\", \"MAE\": 49.83788736782816, \"RMSE\": 60.74084767697289, \"R2\": -5.054961799673954, \"Memory in Mb\": 0.0034589767456054, \"Time in s\": 0.5532039999999999 }, { \"step\": 297, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"ChickWeights\", \"MAE\": 51.62361105051165, \"RMSE\": 63.27455882125991, \"R2\": -4.698656745997159, \"Memory in Mb\": 0.0034589767456054, \"Time in s\": 0.5923289999999999 }, { \"step\": 308, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"ChickWeights\", \"MAE\": 53.1847087657747, \"RMSE\": 65.32139627595005, \"R2\": -4.409001340116382, \"Memory in Mb\": 0.0034589767456054, \"Time in s\": 0.6328059999999999 }, { \"step\": 319, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"ChickWeights\", \"MAE\": 55.18568520771326, \"RMSE\": 70.36874449488667, \"R2\": -4.696335728503607, \"Memory in Mb\": 0.0034589767456054, \"Time in s\": 0.6746009999999999 }, { \"step\": 330, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"ChickWeights\", \"MAE\": 56.89081675494835, \"RMSE\": 72.51519786174504, \"R2\": -4.176742159590063, \"Memory in Mb\": 0.0034589767456054, \"Time in s\": 0.7176629999999999 }, { \"step\": 341, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"ChickWeights\", \"MAE\": 58.741890997125225, \"RMSE\": 75.13624143509449, \"R2\": -3.976681879584251, \"Memory in Mb\": 0.0034589767456054, \"Time in s\": 0.7620199999999999 }, { \"step\": 352, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"ChickWeights\", \"MAE\": 60.2425190521958, \"RMSE\": 76.8261590755005, \"R2\": -3.729812475420129, \"Memory in Mb\": 0.0034589767456054, \"Time in s\": 0.8076819999999999 }, { \"step\": 363, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"ChickWeights\", \"MAE\": 62.22918871806441, \"RMSE\": 80.45649530418282, \"R2\": -4.037201263859445, \"Memory in Mb\": 0.0034589767456054, \"Time in s\": 0.854609 }, { \"step\": 374, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"ChickWeights\", \"MAE\": 64.15805403621584, \"RMSE\": 84.29062360683722, \"R2\": -4.082442538013045, \"Memory in Mb\": 0.0034589767456054, \"Time in s\": 0.902845 }, { \"step\": 385, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"ChickWeights\", \"MAE\": 65.35464556519459, \"RMSE\": 85.58979700811152, \"R2\": -3.6363575203718743, \"Memory in Mb\": 0.0034589767456054, \"Time in s\": 0.952397 }, { \"step\": 396, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"ChickWeights\", \"MAE\": 67.08652533638559, \"RMSE\": 87.70251677464411, \"R2\": -3.538952097004609, \"Memory in Mb\": 0.0034589767456054, \"Time in s\": 1.00322 }, { \"step\": 407, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"ChickWeights\", \"MAE\": 68.34565949899115, \"RMSE\": 89.3540667816312, \"R2\": -3.3794635851122994, \"Memory in Mb\": 0.0034589767456054, \"Time in s\": 1.055319 }, { \"step\": 418, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"ChickWeights\", \"MAE\": 70.95506872236689, \"RMSE\": 94.70758550832085, \"R2\": -3.618420231648276, \"Memory in Mb\": 0.0034589767456054, \"Time in s\": 1.108722 }, { \"step\": 429, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"ChickWeights\", \"MAE\": 72.57879663609631, \"RMSE\": 96.822819417077, \"R2\": -3.2894241164712765, \"Memory in Mb\": 0.0034589767456054, \"Time in s\": 1.163438 }, { \"step\": 440, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"ChickWeights\", \"MAE\": 74.40667828471298, \"RMSE\": 99.12463362784464, \"R2\": -3.1775863007897582, \"Memory in Mb\": 0.0034589767456054, \"Time in s\": 1.219419 }, { \"step\": 451, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"ChickWeights\", \"MAE\": 75.93202948942836, \"RMSE\": 101.47969042740628, \"R2\": -3.192319994595937, \"Memory in Mb\": 0.0034589767456054, \"Time in s\": 1.276691 }, { \"step\": 462, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"ChickWeights\", \"MAE\": 78.59712471455953, \"RMSE\": 106.68213481552291, \"R2\": -3.3518185198786687, \"Memory in Mb\": 0.0034589767456054, \"Time in s\": 1.335258 }, { \"step\": 473, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"ChickWeights\", \"MAE\": 81.70573920737151, \"RMSE\": 112.24508574603004, \"R2\": -3.253866822086197, \"Memory in Mb\": 0.0034589767456054, \"Time in s\": 1.395089 }, { \"step\": 484, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"ChickWeights\", \"MAE\": 82.72251416230137, \"RMSE\": 113.16810597159808, \"R2\": -3.0871576545332315, \"Memory in Mb\": 0.0034589767456054, \"Time in s\": 1.456215 }, { \"step\": 495, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"ChickWeights\", \"MAE\": 84.44211178292737, \"RMSE\": 115.99711612480068, \"R2\": -3.039385949207989, \"Memory in Mb\": 0.0034589767456054, \"Time in s\": 1.518639 }, { \"step\": 506, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"ChickWeights\", \"MAE\": 86.55149997089892, \"RMSE\": 119.94151559804617, \"R2\": -3.193242857597108, \"Memory in Mb\": 0.0034589767456054, \"Time in s\": 1.582321 }, { \"step\": 517, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"ChickWeights\", \"MAE\": 87.81070722823603, \"RMSE\": 121.26627191062052, \"R2\": -3.0449837234699952, \"Memory in Mb\": 0.0034589767456054, \"Time in s\": 1.64731 }, { \"step\": 528, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"ChickWeights\", \"MAE\": 90.44199567451936, \"RMSE\": 126.238673662019, \"R2\": -2.9411377147279807, \"Memory in Mb\": 0.0034589767456054, \"Time in s\": 1.713605 }, { \"step\": 539, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"ChickWeights\", \"MAE\": 91.59067370330054, \"RMSE\": 127.24192286613216, \"R2\": -2.8386881284150203, \"Memory in Mb\": 0.0034589767456054, \"Time in s\": 1.781138 }, { \"step\": 550, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"ChickWeights\", \"MAE\": 93.78609063040562, \"RMSE\": 130.999704877259, \"R2\": -2.943372518937519, \"Memory in Mb\": 0.0034589767456054, \"Time in s\": 1.84997 }, { \"step\": 561, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"ChickWeights\", \"MAE\": 96.49865979675556, \"RMSE\": 135.93192637304293, \"R2\": -3.060223463970532, \"Memory in Mb\": 0.0034589767456054, \"Time in s\": 1.920139 }, { \"step\": 572, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"ChickWeights\", \"MAE\": 99.56805853722273, \"RMSE\": 141.40025114882988, \"R2\": -3.0639630782357843, \"Memory in Mb\": 0.0034589767456054, \"Time in s\": 1.991547 }, { \"step\": 20, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"TrumpApproval\", \"MAE\": 38.256966119949794, \"RMSE\": 53.46437671117289, \"R2\": -6703.762875072117, \"Memory in Mb\": 0.0043020248413085, \"Time in s\": 0.004258 }, { \"step\": 40, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"TrumpApproval\", \"MAE\": 36.86518407094958, \"RMSE\": 46.91757933405302, \"R2\": -883.9863015306486, \"Memory in Mb\": 0.0043020248413085, \"Time in s\": 0.0112559999999999 }, { \"step\": 60, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"TrumpApproval\", \"MAE\": 34.81538726709292, \"RMSE\": 43.77024226536395, \"R2\": -1037.408328049847, \"Memory in Mb\": 0.0043020248413085, \"Time in s\": 0.019887 }, { \"step\": 80, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"TrumpApproval\", \"MAE\": 32.82099099523828, \"RMSE\": 40.95636211937148, \"R2\": -911.003802678922, \"Memory in Mb\": 0.0043020248413085, \"Time in s\": 0.0300789999999999 }, { \"step\": 100, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"TrumpApproval\", \"MAE\": 32.41507697560151, \"RMSE\": 39.67328525303196, \"R2\": -535.9329715822871, \"Memory in Mb\": 0.0043020248413085, \"Time in s\": 0.041942 }, { \"step\": 120, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"TrumpApproval\", \"MAE\": 32.126533810299065, \"RMSE\": 38.74392424963554, \"R2\": -433.4111998192701, \"Memory in Mb\": 0.0043020248413085, \"Time in s\": 0.055368 }, { \"step\": 140, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"TrumpApproval\", \"MAE\": 31.578696449953306, \"RMSE\": 37.792227545537656, \"R2\": -426.12792465890055, \"Memory in Mb\": 0.0043020248413085, \"Time in s\": 0.070349 }, { \"step\": 160, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"TrumpApproval\", \"MAE\": 30.74326128631808, \"RMSE\": 36.75147996394125, \"R2\": -355.2131008927076, \"Memory in Mb\": 0.0043020248413085, \"Time in s\": 0.086898 }, { \"step\": 180, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"TrumpApproval\", \"MAE\": 30.01457804514465, \"RMSE\": 35.792387341570375, \"R2\": -295.7316609698654, \"Memory in Mb\": 0.0043020248413085, \"Time in s\": 0.104998 }, { \"step\": 200, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"TrumpApproval\", \"MAE\": 29.69358993814741, \"RMSE\": 35.287452593511624, \"R2\": -288.27615949420783, \"Memory in Mb\": 0.0043020248413085, \"Time in s\": 0.124741 }, { \"step\": 220, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"TrumpApproval\", \"MAE\": 29.653103799764548, \"RMSE\": 34.981694493871686, \"R2\": -305.6605175079266, \"Memory in Mb\": 0.0043020248413085, \"Time in s\": 0.146064 }, { \"step\": 240, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"TrumpApproval\", \"MAE\": 29.438535707776744, \"RMSE\": 34.575414655319626, \"R2\": -300.8328104705327, \"Memory in Mb\": 0.0043020248413085, \"Time in s\": 0.168934 }, { \"step\": 260, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"TrumpApproval\", \"MAE\": 29.47891951475184, \"RMSE\": 34.56744094622709, \"R2\": -304.7589578451177, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 0.193372 }, { \"step\": 280, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"TrumpApproval\", \"MAE\": 29.26309099262944, \"RMSE\": 34.21824031120625, \"R2\": -312.6907330516863, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 0.2193709999999999 }, { \"step\": 300, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"TrumpApproval\", \"MAE\": 29.22587791049338, \"RMSE\": 34.04470159277256, \"R2\": -304.37628668920115, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 0.246997 }, { \"step\": 320, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"TrumpApproval\", \"MAE\": 29.39018713755992, \"RMSE\": 34.027497016659055, \"R2\": -319.1729973057711, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 0.276184 }, { \"step\": 340, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"TrumpApproval\", \"MAE\": 29.42027037377748, \"RMSE\": 33.93053373578466, \"R2\": -335.4190027425263, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 0.306921 }, { \"step\": 360, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"TrumpApproval\", \"MAE\": 29.664307062669348, \"RMSE\": 34.045635299267715, \"R2\": -340.48671423689984, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 0.339233 }, { \"step\": 380, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"TrumpApproval\", \"MAE\": 29.83498723746747, \"RMSE\": 34.11808971579651, \"R2\": -353.80514851974, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 0.373101 }, { \"step\": 400, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"TrumpApproval\", \"MAE\": 30.001082895685222, \"RMSE\": 34.17628998525774, \"R2\": -365.0785427385626, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 0.408596 }, { \"step\": 420, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"TrumpApproval\", \"MAE\": 30.14554243966483, \"RMSE\": 34.20268208319664, \"R2\": -377.7780423674053, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 0.445652 }, { \"step\": 440, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"TrumpApproval\", \"MAE\": 30.318997649838, \"RMSE\": 34.29163258825692, \"R2\": -372.8612574130533, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 0.484259 }, { \"step\": 460, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"TrumpApproval\", \"MAE\": 30.44816166194883, \"RMSE\": 34.34474274542465, \"R2\": -354.6310813895727, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 0.524426 }, { \"step\": 480, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"TrumpApproval\", \"MAE\": 30.547707035634, \"RMSE\": 34.3460988289287, \"R2\": -346.476863625336, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 0.5661419999999999 }, { \"step\": 500, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"TrumpApproval\", \"MAE\": 30.677713278160866, \"RMSE\": 34.40102042286509, \"R2\": -340.27577766039445, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 0.6094289999999999 }, { \"step\": 520, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"TrumpApproval\", \"MAE\": 30.774567489967996, \"RMSE\": 34.41616981796676, \"R2\": -341.1727526506248, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 0.6543389999999999 }, { \"step\": 540, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"TrumpApproval\", \"MAE\": 30.8774276068303, \"RMSE\": 34.45885995564912, \"R2\": -340.7651110108783, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 0.7007559999999999 }, { \"step\": 560, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"TrumpApproval\", \"MAE\": 30.809160821418956, \"RMSE\": 34.35467967149731, \"R2\": -346.7959646189386, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 0.7487349999999999 }, { \"step\": 580, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"TrumpApproval\", \"MAE\": 30.794626934327955, \"RMSE\": 34.31319945532044, \"R2\": -354.5377185045104, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 0.798249 }, { \"step\": 600, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"TrumpApproval\", \"MAE\": 30.95165244409411, \"RMSE\": 34.41180694760753, \"R2\": -352.62847406429205, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 0.849322 }, { \"step\": 620, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"TrumpApproval\", \"MAE\": 31.05924245979316, \"RMSE\": 34.47559777081781, \"R2\": -348.53049085374005, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 0.901986 }, { \"step\": 640, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"TrumpApproval\", \"MAE\": 31.17486346702509, \"RMSE\": 34.55495775749415, \"R2\": -343.578007451748, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 0.956154 }, { \"step\": 660, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"TrumpApproval\", \"MAE\": 31.18138785276698, \"RMSE\": 34.53217565702543, \"R2\": -340.6493968041527, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 1.011873 }, { \"step\": 680, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"TrumpApproval\", \"MAE\": 31.262098564397714, \"RMSE\": 34.57860649898553, \"R2\": -346.98225985471817, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 1.06915 }, { \"step\": 700, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"TrumpApproval\", \"MAE\": 31.21351117376332, \"RMSE\": 34.56642775102796, \"R2\": -355.8704038677769, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 1.127995 }, { \"step\": 720, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"TrumpApproval\", \"MAE\": 31.187412408778744, \"RMSE\": 34.52480046664081, \"R2\": -362.8329367085535, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 1.188423 }, { \"step\": 740, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"TrumpApproval\", \"MAE\": 31.11951946368673, \"RMSE\": 34.44075758421072, \"R2\": -360.84595808308967, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 1.250358 }, { \"step\": 760, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"TrumpApproval\", \"MAE\": 31.06286774404838, \"RMSE\": 34.392033812715184, \"R2\": -363.7319274626152, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 1.313829 }, { \"step\": 780, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"TrumpApproval\", \"MAE\": 31.122129081397333, \"RMSE\": 34.4147077867913, \"R2\": -365.8490836115921, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 1.378848 }, { \"step\": 800, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"TrumpApproval\", \"MAE\": 31.106214253085103, \"RMSE\": 34.39160753515401, \"R2\": -368.6700713862459, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 1.445416 }, { \"step\": 820, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"TrumpApproval\", \"MAE\": 31.221689813214, \"RMSE\": 34.48240321448139, \"R2\": -374.7057202811433, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 1.513551 }, { \"step\": 840, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"TrumpApproval\", \"MAE\": 31.279425902447173, \"RMSE\": 34.50697415799086, \"R2\": -378.7344488771949, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 1.583215 }, { \"step\": 860, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"TrumpApproval\", \"MAE\": 31.342876956175303, \"RMSE\": 34.54160049479585, \"R2\": -378.8414458535583, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 1.65438 }, { \"step\": 880, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"TrumpApproval\", \"MAE\": 31.26946109805924, \"RMSE\": 34.463329541799936, \"R2\": -375.2431199026635, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 1.727121 }, { \"step\": 900, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"TrumpApproval\", \"MAE\": 31.252963502360984, \"RMSE\": 34.437602544585566, \"R2\": -376.2648040183398, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 1.801358 }, { \"step\": 920, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"TrumpApproval\", \"MAE\": 31.17140034618248, \"RMSE\": 34.35879469425013, \"R2\": -380.710483609337, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 1.877212 }, { \"step\": 940, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"TrumpApproval\", \"MAE\": 31.028682196255257, \"RMSE\": 34.23221645186374, \"R2\": -381.1175926718204, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 1.954607 }, { \"step\": 960, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"TrumpApproval\", \"MAE\": 31.091829199349828, \"RMSE\": 34.349474902309865, \"R2\": -387.76257537598985, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 2.033516 }, { \"step\": 980, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"TrumpApproval\", \"MAE\": 31.00094350988743, \"RMSE\": 34.303507421677764, \"R2\": -394.25294990859175, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 2.113994 }, { \"step\": 1000, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"TrumpApproval\", \"MAE\": 31.128814077399102, \"RMSE\": 34.425663215951964, \"R2\": -403.7738674316034, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 2.195941 }, { \"step\": 11, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ChickWeights\", \"MAE\": 4.6439393939393945, \"RMSE\": 12.708027567111456, \"R2\": -206.8805289598106, \"Memory in Mb\": 0.007791519165039, \"Time in s\": 0.002301 }, { \"step\": 22, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ChickWeights\", \"MAE\": 2.7674242424242426, \"RMSE\": 9.021574170013263, \"R2\": -85.19732920009746, \"Memory in Mb\": 0.0116405487060546, \"Time in s\": 0.007035 }, { \"step\": 33, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ChickWeights\", \"MAE\": 2.366161616161617, \"RMSE\": 7.437810062008745, \"R2\": -37.42129411139464, \"Memory in Mb\": 0.0161724090576171, \"Time in s\": 0.013843 }, { \"step\": 44, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ChickWeights\", \"MAE\": 2.015530303030304, \"RMSE\": 6.463663489621867, \"R2\": -31.893061768560024, \"Memory in Mb\": 0.0202007293701171, \"Time in s\": 0.022662 }, { \"step\": 55, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ChickWeights\", \"MAE\": 2.2124242424242424, \"RMSE\": 6.080421054665558, \"R2\": -6.217272109648366, \"Memory in Mb\": 0.0243968963623046, \"Time in s\": 0.033503 }, { \"step\": 66, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ChickWeights\", \"MAE\": 2.280050505050505, \"RMSE\": 5.694858940322259, \"R2\": -1.7290883479828647, \"Memory in Mb\": 0.0287837982177734, \"Time in s\": 0.046733 }, { \"step\": 77, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ChickWeights\", \"MAE\": 2.61926406926407, \"RMSE\": 5.707950266794224, \"R2\": -0.8375532519268223, \"Memory in Mb\": 0.0331974029541015, \"Time in s\": 0.062616 }, { \"step\": 88, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ChickWeights\", \"MAE\": 2.530492424242425, \"RMSE\": 5.412982721609634, \"R2\": -0.4983072905775765, \"Memory in Mb\": 0.0375041961669921, \"Time in s\": 0.081466 }, { \"step\": 99, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ChickWeights\", \"MAE\": 2.4755892255892267, \"RMSE\": 5.17010990945742, \"R2\": -0.1432234574096695, \"Memory in Mb\": 0.0422878265380859, \"Time in s\": 0.103593 }, { \"step\": 110, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ChickWeights\", \"MAE\": 2.7716666666666683, \"RMSE\": 5.296236752390676, \"R2\": 0.1622405877971293, \"Memory in Mb\": 0.0433177947998046, \"Time in s\": 0.129424 }, { \"step\": 121, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ChickWeights\", \"MAE\": 3.180853994490359, \"RMSE\": 5.621206607854847, \"R2\": 0.2731837882445769, \"Memory in Mb\": 0.0438785552978515, \"Time in s\": 0.158775 }, { \"step\": 132, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ChickWeights\", \"MAE\": 3.3642676767676765, \"RMSE\": 5.706770043255583, \"R2\": 0.3877536814355664, \"Memory in Mb\": 0.0436267852783203, \"Time in s\": 0.191664 }, { \"step\": 143, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ChickWeights\", \"MAE\": 3.646736596736596, \"RMSE\": 5.919243012407738, \"R2\": 0.4664867393310171, \"Memory in Mb\": 0.0439319610595703, \"Time in s\": 0.228099 }, { \"step\": 154, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ChickWeights\", \"MAE\": 3.7550865800865783, \"RMSE\": 5.97572666401829, \"R2\": 0.537892640768072, \"Memory in Mb\": 0.0440692901611328, \"Time in s\": 0.268063 }, { \"step\": 165, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ChickWeights\", \"MAE\": 4.093838383838381, \"RMSE\": 6.488494998076776, \"R2\": 0.562039588096868, \"Memory in Mb\": 0.0446529388427734, \"Time in s\": 0.311593 }, { \"step\": 176, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ChickWeights\", \"MAE\": 4.458428030303029, \"RMSE\": 6.947478945595657, \"R2\": 0.5951448357515823, \"Memory in Mb\": 0.0446796417236328, \"Time in s\": 0.358632 }, { \"step\": 187, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ChickWeights\", \"MAE\": 4.792959001782529, \"RMSE\": 7.272258331212408, \"R2\": 0.6368016898131145, \"Memory in Mb\": 0.0447597503662109, \"Time in s\": 0.40948 }, { \"step\": 198, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ChickWeights\", \"MAE\": 5.229713804713803, \"RMSE\": 7.766788141562423, \"R2\": 0.6704650236153215, \"Memory in Mb\": 0.0442829132080078, \"Time in s\": 0.46388 }, { \"step\": 209, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ChickWeights\", \"MAE\": 5.61188197767145, \"RMSE\": 8.429860803311705, \"R2\": 0.6799768871245477, \"Memory in Mb\": 0.0443363189697265, \"Time in s\": 0.521779 }, { \"step\": 220, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ChickWeights\", \"MAE\": 6.048560606060604, \"RMSE\": 9.536044923225656, \"R2\": 0.6414638231876792, \"Memory in Mb\": 0.0443096160888671, \"Time in s\": 0.583178 }, { \"step\": 231, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ChickWeights\", \"MAE\": 6.582178932178929, \"RMSE\": 10.20324912411692, \"R2\": 0.648905367768132, \"Memory in Mb\": 0.0448932647705078, \"Time in s\": 0.6480619999999999 }, { \"step\": 242, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ChickWeights\", \"MAE\": 7.071418732782365, \"RMSE\": 10.928542055135823, \"R2\": 0.6759002976153703, \"Memory in Mb\": 0.0449466705322265, \"Time in s\": 0.7164429999999999 }, { \"step\": 253, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ChickWeights\", \"MAE\": 7.477799736495386, \"RMSE\": 11.323352624926212, \"R2\": 0.6978095597045382, \"Memory in Mb\": 0.045053482055664, \"Time in s\": 0.7883119999999999 }, { \"step\": 264, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ChickWeights\", \"MAE\": 7.970770202020199, \"RMSE\": 12.28335187867794, \"R2\": 0.6750992767833781, \"Memory in Mb\": 0.0446300506591796, \"Time in s\": 0.8636689999999999 }, { \"step\": 275, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ChickWeights\", \"MAE\": 8.55812121212121, \"RMSE\": 13.382565810664548, \"R2\": 0.6606476529151027, \"Memory in Mb\": 0.0446834564208984, \"Time in s\": 0.942555 }, { \"step\": 286, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ChickWeights\", \"MAE\": 9.054137529137526, \"RMSE\": 14.013384412631826, \"R2\": 0.6777181990167639, \"Memory in Mb\": 0.0448436737060546, \"Time in s\": 1.024953 }, { \"step\": 297, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ChickWeights\", \"MAE\": 9.468967452300786, \"RMSE\": 14.435360812541292, \"R2\": 0.7034011013652389, \"Memory in Mb\": 0.0454006195068359, \"Time in s\": 1.110877 }, { \"step\": 308, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ChickWeights\", \"MAE\": 9.90871212121212, \"RMSE\": 15.173853281638724, \"R2\": 0.7081243055691319, \"Memory in Mb\": 0.0454273223876953, \"Time in s\": 1.200187 }, { \"step\": 319, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ChickWeights\", \"MAE\": 10.713740856844304, \"RMSE\": 17.013635837866804, \"R2\": 0.6670107307192514, \"Memory in Mb\": 0.0455341339111328, \"Time in s\": 1.2928449999999998 }, { \"step\": 330, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ChickWeights\", \"MAE\": 11.460252525252525, \"RMSE\": 18.125243873896306, \"R2\": 0.6765805165314649, \"Memory in Mb\": 0.045083999633789, \"Time in s\": 1.388826 }, { \"step\": 341, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ChickWeights\", \"MAE\": 11.901710654936462, \"RMSE\": 18.5766916053512, \"R2\": 0.6957870549438744, \"Memory in Mb\": 0.0451908111572265, \"Time in s\": 1.488176 }, { \"step\": 352, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ChickWeights\", \"MAE\": 12.310464015151512, \"RMSE\": 18.922178666477887, \"R2\": 0.7130752857476492, \"Memory in Mb\": 0.0451641082763671, \"Time in s\": 1.591164 }, { \"step\": 363, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ChickWeights\", \"MAE\": 12.780394857667584, \"RMSE\": 19.823234941774256, \"R2\": 0.694215027528111, \"Memory in Mb\": 0.0456142425537109, \"Time in s\": 1.697605 }, { \"step\": 374, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ChickWeights\", \"MAE\": 13.344073083778964, \"RMSE\": 20.889730456192645, \"R2\": 0.6878383009059359, \"Memory in Mb\": 0.0456142425537109, \"Time in s\": 1.807438 }, { \"step\": 385, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ChickWeights\", \"MAE\": 13.830865800865798, \"RMSE\": 21.557316750546796, \"R2\": 0.7058815074667231, \"Memory in Mb\": 0.045694351196289, \"Time in s\": 1.920665 }, { \"step\": 396, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ChickWeights\", \"MAE\": 14.08051346801346, \"RMSE\": 21.615344438143325, \"R2\": 0.7242879119502419, \"Memory in Mb\": 0.0452175140380859, \"Time in s\": 2.037277 }, { \"step\": 407, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ChickWeights\", \"MAE\": 14.665069615069608, \"RMSE\": 22.79756192033108, \"R2\": 0.714918470135155, \"Memory in Mb\": 0.0452442169189453, \"Time in s\": 2.157231 }, { \"step\": 418, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ChickWeights\", \"MAE\": 15.362400318979262, \"RMSE\": 24.076729101709564, \"R2\": 0.7015174886253861, \"Memory in Mb\": 0.0457477569580078, \"Time in s\": 2.280525 }, { \"step\": 429, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ChickWeights\", \"MAE\": 15.914413364413358, \"RMSE\": 24.924104546372128, \"R2\": 0.7157616535295273, \"Memory in Mb\": 0.0458278656005859, \"Time in s\": 2.407192 }, { \"step\": 440, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ChickWeights\", \"MAE\": 16.21655303030303, \"RMSE\": 25.17906749713446, \"R2\": 0.730448642009748, \"Memory in Mb\": 0.0458545684814453, \"Time in s\": 2.537243 }, { \"step\": 451, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ChickWeights\", \"MAE\": 16.571359940872135, \"RMSE\": 25.529131814454708, \"R2\": 0.7346810631079229, \"Memory in Mb\": 0.0458812713623046, \"Time in s\": 2.670662 }, { \"step\": 462, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ChickWeights\", \"MAE\": 17.517063492063492, \"RMSE\": 27.45837911410348, \"R2\": 0.7117049574257082, \"Memory in Mb\": 0.0453777313232421, \"Time in s\": 2.807433 }, { \"step\": 473, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ChickWeights\", \"MAE\": 18.23357998590557, \"RMSE\": 28.586680380220997, \"R2\": 0.7240841374900429, \"Memory in Mb\": 0.0453510284423828, \"Time in s\": 2.947633 }, { \"step\": 484, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ChickWeights\", \"MAE\": 18.61876721763086, \"RMSE\": 29.038858036362505, \"R2\": 0.7308884346272555, \"Memory in Mb\": 0.0458812713623046, \"Time in s\": 3.0911699999999995 }, { \"step\": 495, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ChickWeights\", \"MAE\": 19.16138047138048, \"RMSE\": 29.754410032566323, \"R2\": 0.7342191699916846, \"Memory in Mb\": 0.0458812713623046, \"Time in s\": 3.2380789999999995 }, { \"step\": 506, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ChickWeights\", \"MAE\": 19.69344532279316, \"RMSE\": 30.658970587616192, \"R2\": 0.7260154404176653, \"Memory in Mb\": 0.0458545684814453, \"Time in s\": 3.388348 }, { \"step\": 517, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ChickWeights\", \"MAE\": 20.317762733720187, \"RMSE\": 31.53258587823862, \"R2\": 0.7265009007981393, \"Memory in Mb\": 0.0453777313232421, \"Time in s\": 3.541995 }, { \"step\": 528, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ChickWeights\", \"MAE\": 21.03841540404041, \"RMSE\": 32.63466371480821, \"R2\": 0.7366125677104822, \"Memory in Mb\": 0.0454311370849609, \"Time in s\": 3.699025 }, { \"step\": 539, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ChickWeights\", \"MAE\": 21.282900432900444, \"RMSE\": 32.8391739011002, \"R2\": 0.7443140702924032, \"Memory in Mb\": 0.0454044342041015, \"Time in s\": 3.859333 }, { \"step\": 550, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ChickWeights\", \"MAE\": 21.858333333333334, \"RMSE\": 33.61129662942374, \"R2\": 0.7404041745313898, \"Memory in Mb\": 0.0460147857666015, \"Time in s\": 4.022901 }, { \"step\": 561, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ChickWeights\", \"MAE\": 22.36307189542484, \"RMSE\": 34.18934989679706, \"R2\": 0.7431446126310046, \"Memory in Mb\": 0.0460681915283203, \"Time in s\": 4.189756 }, { \"step\": 572, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ChickWeights\", \"MAE\": 22.904341491841496, \"RMSE\": 34.79445522931405, \"R2\": 0.7539238777546076, \"Memory in Mb\": 0.046121597290039, \"Time in s\": 4.359905 }, { \"step\": 20, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.5579731333333355, \"RMSE\": 9.79490509533104, \"R2\": -224.0374880099697, \"Memory in Mb\": 0.0161190032958984, \"Time in s\": 0.00492 }, { \"step\": 40, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.814472516666667, \"RMSE\": 6.975921914401759, \"R2\": -18.564491994995524, \"Memory in Mb\": 0.0286769866943359, \"Time in s\": 0.014556 }, { \"step\": 60, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.386012811111112, \"RMSE\": 5.70893041508138, \"R2\": -16.665248891500116, \"Memory in Mb\": 0.0407314300537109, \"Time in s\": 0.029158 }, { \"step\": 80, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.1425351583333334, \"RMSE\": 4.950945892995169, \"R2\": -12.326934431680348, \"Memory in Mb\": 0.0527858734130859, \"Time in s\": 0.04994 }, { \"step\": 100, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.0609460066666667, \"RMSE\": 4.443635225860514, \"R2\": -5.735976224554387, \"Memory in Mb\": 0.065317153930664, \"Time in s\": 0.078368 }, { \"step\": 120, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.0173975888888886, \"RMSE\": 4.080152828774464, \"R2\": -3.8177766328983793, \"Memory in Mb\": 0.0658702850341796, \"Time in s\": 0.115087 }, { \"step\": 140, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.9499007047619044, \"RMSE\": 3.785073461941064, \"R2\": -3.284514427728187, \"Memory in Mb\": 0.0653667449951171, \"Time in s\": 0.160142 }, { \"step\": 160, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.8997031916666666, \"RMSE\": 3.548063392436267, \"R2\": -2.320037309218333, \"Memory in Mb\": 0.0653667449951171, \"Time in s\": 0.213621 }, { \"step\": 180, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.8894699925925924, \"RMSE\": 3.3651237174821174, \"R2\": -1.6229174672077478, \"Memory in Mb\": 0.0658702850341796, \"Time in s\": 0.275231 }, { \"step\": 200, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.8368248133333331, \"RMSE\": 3.1964619940401167, \"R2\": -1.3736195901717156, \"Memory in Mb\": 0.0653667449951171, \"Time in s\": 0.3451079999999999 }, { \"step\": 220, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.7973693939393937, \"RMSE\": 3.051737973437425, \"R2\": -1.333837761501293, \"Memory in Mb\": 0.0653667449951171, \"Time in s\": 0.4231859999999999 }, { \"step\": 240, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.7882918027777774, \"RMSE\": 2.9302063484469683, \"R2\": -1.1678441811700535, \"Memory in Mb\": 0.0658702850341796, \"Time in s\": 0.5093219999999999 }, { \"step\": 260, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.756480287179487, \"RMSE\": 2.818118800540444, \"R2\": -1.032185390259123, \"Memory in Mb\": 0.065500259399414, \"Time in s\": 0.603278 }, { \"step\": 280, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.7152656523809521, \"RMSE\": 2.716213897230066, \"R2\": -0.9765794643185606, \"Memory in Mb\": 0.065500259399414, \"Time in s\": 0.705058 }, { \"step\": 300, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.6924420288888888, \"RMSE\": 2.626670145740793, \"R2\": -0.8178051192110904, \"Memory in Mb\": 0.0660037994384765, \"Time in s\": 0.8148869999999999 }, { \"step\": 320, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.6685162833333335, \"RMSE\": 2.544831183351663, \"R2\": -0.7907817018280727, \"Memory in Mb\": 0.0660037994384765, \"Time in s\": 0.932496 }, { \"step\": 340, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.6729953196078432, \"RMSE\": 2.478015515638401, \"R2\": -0.7943500832815562, \"Memory in Mb\": 0.065500259399414, \"Time in s\": 1.057867 }, { \"step\": 360, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.6574798574074076, \"RMSE\": 2.410620514027796, \"R2\": -0.7120191674014065, \"Memory in Mb\": 0.0660037994384765, \"Time in s\": 1.1910109999999998 }, { \"step\": 380, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.6458375333333335, \"RMSE\": 2.3511270035956984, \"R2\": -0.6848943678054311, \"Memory in Mb\": 0.0660037994384765, \"Time in s\": 1.3319119999999998 }, { \"step\": 400, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.6474776666666668, \"RMSE\": 2.299895164719867, \"R2\": -0.6578320154352482, \"Memory in Mb\": 0.065500259399414, \"Time in s\": 1.4805999999999997 }, { \"step\": 420, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.6263061492063494, \"RMSE\": 2.245732257498697, \"R2\": -0.6329783328857519, \"Memory in Mb\": 0.0660037994384765, \"Time in s\": 1.6370479999999996 }, { \"step\": 440, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.6161101106060607, \"RMSE\": 2.196509834675512, \"R2\": -0.5339119919932027, \"Memory in Mb\": 0.0660037994384765, \"Time in s\": 1.8010919999999997 }, { \"step\": 460, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.6114796710144929, \"RMSE\": 2.151899880839346, \"R2\": -0.3961217660875815, \"Memory in Mb\": 0.065500259399414, \"Time in s\": 1.972734 }, { \"step\": 480, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.595679659722222, \"RMSE\": 2.1075134371992843, \"R2\": -0.3083133320099125, \"Memory in Mb\": 0.065500259399414, \"Time in s\": 2.151984 }, { \"step\": 500, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.5871453373333329, \"RMSE\": 2.067421382434369, \"R2\": -0.2325961934637197, \"Memory in Mb\": 0.0660037994384765, \"Time in s\": 2.338834 }, { \"step\": 520, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.5787837589743586, \"RMSE\": 2.029002666882104, \"R2\": -0.1892840306210153, \"Memory in Mb\": 0.065500259399414, \"Time in s\": 2.533354 }, { \"step\": 540, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.5648130308641971, \"RMSE\": 1.991660903442925, \"R2\": -0.1417123847500625, \"Memory in Mb\": 0.065500259399414, \"Time in s\": 2.735472 }, { \"step\": 560, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.5728799083333329, \"RMSE\": 1.964965242398572, \"R2\": -0.1377909554693113, \"Memory in Mb\": 0.0660037994384765, \"Time in s\": 2.945174 }, { \"step\": 580, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.5729694735632179, \"RMSE\": 1.9355227317321977, \"R2\": -0.1312531571152491, \"Memory in Mb\": 0.065500259399414, \"Time in s\": 3.162675 }, { \"step\": 600, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.5633090777777773, \"RMSE\": 1.904172510525728, \"R2\": -0.0827915396879483, \"Memory in Mb\": 0.065500259399414, \"Time in s\": 3.387869 }, { \"step\": 620, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.5627685010752685, \"RMSE\": 1.877938378401011, \"R2\": -0.0371083516253996, \"Memory in Mb\": 0.0660037994384765, \"Time in s\": 3.620659 }, { \"step\": 640, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.5527265229166664, \"RMSE\": 1.8490211996046173, \"R2\": 0.0133784354988776, \"Memory in Mb\": 0.0660037994384765, \"Time in s\": 3.861011 }, { \"step\": 660, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.5494711494949491, \"RMSE\": 1.8235613734746328, \"R2\": 0.0472618750543785, \"Memory in Mb\": 0.065500259399414, \"Time in s\": 4.108899 }, { \"step\": 680, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.5450294392156858, \"RMSE\": 1.7983422134533726, \"R2\": 0.05878940572589, \"Memory in Mb\": 0.0660037994384765, \"Time in s\": 4.364357 }, { \"step\": 700, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.5469558666666661, \"RMSE\": 1.7765896178823126, \"R2\": 0.0572950833274868, \"Memory in Mb\": 0.0660037994384765, \"Time in s\": 4.627459 }, { \"step\": 720, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.543456514814814, \"RMSE\": 1.7539567687409483, \"R2\": 0.0609744119033208, \"Memory in Mb\": 0.065500259399414, \"Time in s\": 4.898124 }, { \"step\": 740, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.5378193549549544, \"RMSE\": 1.7312282531924004, \"R2\": 0.085703628915721, \"Memory in Mb\": 0.0660037994384765, \"Time in s\": 5.1763650000000005 }, { \"step\": 760, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.5339037666666662, \"RMSE\": 1.709797998783685, \"R2\": 0.0985374850173486, \"Memory in Mb\": 0.0660037994384765, \"Time in s\": 5.462206 }, { \"step\": 780, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.5278528008547004, \"RMSE\": 1.688590710651063, \"R2\": 0.116822375782677, \"Memory in Mb\": 0.065500259399414, \"Time in s\": 5.755616 }, { \"step\": 800, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.5283612208333329, \"RMSE\": 1.6702821330140922, \"R2\": 0.1280551636806639, \"Memory in Mb\": 0.065500259399414, \"Time in s\": 6.056664 }, { \"step\": 820, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.5218793715447149, \"RMSE\": 1.650734458460968, \"R2\": 0.1389919937119252, \"Memory in Mb\": 0.0660037994384765, \"Time in s\": 6.365303 }, { \"step\": 840, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.5164909984126979, \"RMSE\": 1.6320321449108954, \"R2\": 0.1505777025063954, \"Memory in Mb\": 0.065500259399414, \"Time in s\": 6.681529 }, { \"step\": 860, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.5203083496124026, \"RMSE\": 1.617111920842916, \"R2\": 0.167474405637749, \"Memory in Mb\": 0.065500259399414, \"Time in s\": 7.005502 }, { \"step\": 880, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.5169651212121207, \"RMSE\": 1.6004898524254525, \"R2\": 0.188553388101048, \"Memory in Mb\": 0.0660037994384765, \"Time in s\": 7.337010999999999 }, { \"step\": 900, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.5126542296296291, \"RMSE\": 1.583708561642625, \"R2\": 0.2021320849395992, \"Memory in Mb\": 0.065500259399414, \"Time in s\": 7.676102999999999 }, { \"step\": 920, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.5104052702898545, \"RMSE\": 1.5679886127118026, \"R2\": 0.2050422348676139, \"Memory in Mb\": 0.065500259399414, \"Time in s\": 8.022855999999999 }, { \"step\": 940, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.5051605113475173, \"RMSE\": 1.551953998854848, \"R2\": 0.2146112384044058, \"Memory in Mb\": 0.0660037994384765, \"Time in s\": 8.377156 }, { \"step\": 960, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.5025567965277773, \"RMSE\": 1.5374441378201589, \"R2\": 0.2211695274632112, \"Memory in Mb\": 0.0660037994384765, \"Time in s\": 8.739002999999999 }, { \"step\": 980, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.4982526563945573, \"RMSE\": 1.522650494486571, \"R2\": 0.2212491734260424, \"Memory in Mb\": 0.065500259399414, \"Time in s\": 9.108430999999998 }, { \"step\": 1000, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.4939745494666663, \"RMSE\": 1.5080707141004983, \"R2\": 0.2232321406133778, \"Memory in Mb\": 0.0660037994384765, \"Time in s\": 9.485458999999995 }, { \"step\": 11, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 8.042756132756132, \"RMSE\": 17.336048579080593, \"R2\": -385.8634917094176, \"Memory in Mb\": 0.0165748596191406, \"Time in s\": 0.003293 }, { \"step\": 22, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 4.456785613727984, \"RMSE\": 12.282422261556867, \"R2\": -158.770726389092, \"Memory in Mb\": 0.0181541442871093, \"Time in s\": 0.009578 }, { \"step\": 33, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 3.4353973358733074, \"RMSE\": 10.07037651743448, \"R2\": -69.4325218162971, \"Memory in Mb\": 0.0234184265136718, \"Time in s\": 0.01815 }, { \"step\": 44, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 2.736909422894262, \"RMSE\": 8.732393473100391, \"R2\": -59.03623058514604, \"Memory in Mb\": 0.0244712829589843, \"Time in s\": 0.029162 }, { \"step\": 55, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 2.788577579622257, \"RMSE\": 8.074088551816661, \"R2\": -11.726025456653014, \"Memory in Mb\": 0.0313148498535156, \"Time in s\": 0.042723 }, { \"step\": 66, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 3.395880085598137, \"RMSE\": 7.878422021930021, \"R2\": -4.223121571879303, \"Memory in Mb\": 0.0407905578613281, \"Time in s\": 0.0593589999999999 }, { \"step\": 77, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 3.889526501621088, \"RMSE\": 7.800910386370324, \"R2\": -2.432180745921895, \"Memory in Mb\": 0.0471076965332031, \"Time in s\": 0.0796159999999999 }, { \"step\": 88, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 4.072650698433535, \"RMSE\": 7.572197783925699, \"R2\": -1.9320509270116557, \"Memory in Mb\": 0.0528984069824218, \"Time in s\": 0.103875 }, { \"step\": 99, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 4.410984939713907, \"RMSE\": 7.55185413515251, \"R2\": -1.439151418709002, \"Memory in Mb\": 0.0539512634277343, \"Time in s\": 0.132276 }, { \"step\": 110, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 4.441558524813062, \"RMSE\": 7.364764038532391, \"R2\": -0.6199522309877294, \"Memory in Mb\": 0.0555305480957031, \"Time in s\": 0.164898 }, { \"step\": 121, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 4.487290951327676, \"RMSE\": 7.260940155844585, \"R2\": -0.2126939871368238, \"Memory in Mb\": 0.0555305480957031, \"Time in s\": 0.201538 }, { \"step\": 132, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 4.401729970486312, \"RMSE\": 7.0591187066650845, \"R2\": 0.0632010249038049, \"Memory in Mb\": 0.0555305480957031, \"Time in s\": 0.242248 }, { \"step\": 143, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 4.303599977233167, \"RMSE\": 6.863829202938119, \"R2\": 0.2826256992169514, \"Memory in Mb\": 0.0560569763183593, \"Time in s\": 0.2871789999999999 }, { \"step\": 154, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 4.247967976141752, \"RMSE\": 6.717580819449276, \"R2\": 0.4160344373982124, \"Memory in Mb\": 0.0560569763183593, \"Time in s\": 0.3361619999999999 }, { \"step\": 165, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 4.525268599337025, \"RMSE\": 6.978492074792776, \"R2\": 0.493394283015475, \"Memory in Mb\": 0.0560569763183593, \"Time in s\": 0.3890969999999999 }, { \"step\": 176, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 4.7434869323510185, \"RMSE\": 7.161143757518859, \"R2\": 0.5698598432460567, \"Memory in Mb\": 0.0565834045410156, \"Time in s\": 0.4460369999999999 }, { \"step\": 187, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 4.817684977356876, \"RMSE\": 7.1877471099050325, \"R2\": 0.6451941261958376, \"Memory in Mb\": 0.0565834045410156, \"Time in s\": 0.5072089999999999 }, { \"step\": 198, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 4.83667165494537, \"RMSE\": 7.176577259975889, \"R2\": 0.7186458480933114, \"Memory in Mb\": 0.0565834045410156, \"Time in s\": 0.5725709999999999 }, { \"step\": 209, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 5.073405719834179, \"RMSE\": 7.569308518085582, \"R2\": 0.7419802486075263, \"Memory in Mb\": 0.0217466354370117, \"Time in s\": 0.645323 }, { \"step\": 220, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 5.671396913729996, \"RMSE\": 8.67042326781336, \"R2\": 0.7036008152378226, \"Memory in Mb\": 0.0280637741088867, \"Time in s\": 0.719465 }, { \"step\": 231, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 5.870013976865108, \"RMSE\": 8.892004937785565, \"R2\": 0.7333469233470653, \"Memory in Mb\": 0.0333280563354492, \"Time in s\": 0.7950900000000001 }, { \"step\": 242, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 6.098410134572789, \"RMSE\": 9.27860472778795, \"R2\": 0.7663748869623117, \"Memory in Mb\": 0.0380659103393554, \"Time in s\": 0.8723510000000001 }, { \"step\": 253, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 6.1962339865774, \"RMSE\": 9.406595007903094, \"R2\": 0.7914570321252903, \"Memory in Mb\": 0.0417509078979492, \"Time in s\": 0.951359 }, { \"step\": 264, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 6.851942913488504, \"RMSE\": 10.678395276366356, \"R2\": 0.7544562538164442, \"Memory in Mb\": 0.0418310165405273, \"Time in s\": 1.031981 }, { \"step\": 275, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 7.351838545672251, \"RMSE\": 11.801369148896674, \"R2\": 0.7361015298851068, \"Memory in Mb\": 0.0418310165405273, \"Time in s\": 1.114342 }, { \"step\": 286, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 7.621792166351879, \"RMSE\": 12.282711040561283, \"R2\": 0.7524071035845484, \"Memory in Mb\": 0.0423574447631835, \"Time in s\": 1.198422 }, { \"step\": 297, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 7.637256630205925, \"RMSE\": 12.295347873811286, \"R2\": 0.7848229800793282, \"Memory in Mb\": 0.0423574447631835, \"Time in s\": 1.284253 }, { \"step\": 308, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 8.1943326666584, \"RMSE\": 13.18128308543095, \"R2\": 0.7797471460356308, \"Memory in Mb\": 0.0423574447631835, \"Time in s\": 1.3717719999999998 }, { \"step\": 319, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 9.301321372784988, \"RMSE\": 15.883856969554804, \"R2\": 0.7097662534619076, \"Memory in Mb\": 0.0423574447631835, \"Time in s\": 1.4609289999999997 }, { \"step\": 330, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 9.759535973032726, \"RMSE\": 16.540475274696632, \"R2\": 0.7306639846914664, \"Memory in Mb\": 0.0423574447631835, \"Time in s\": 1.5518979999999998 }, { \"step\": 341, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 9.98108531256273, \"RMSE\": 16.6656027575944, \"R2\": 0.7551596457293974, \"Memory in Mb\": 0.0423574447631835, \"Time in s\": 1.6445489999999998 }, { \"step\": 352, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 10.172493780682656, \"RMSE\": 16.824682995393008, \"R2\": 0.773160093080841, \"Memory in Mb\": 0.0423574447631835, \"Time in s\": 1.7389499999999998 }, { \"step\": 363, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 11.068151856114426, \"RMSE\": 18.263714825485387, \"R2\": 0.7404354867888504, \"Memory in Mb\": 0.0423574447631835, \"Time in s\": 1.835003 }, { \"step\": 374, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 11.603116520774073, \"RMSE\": 19.443156920913136, \"R2\": 0.7295745843003554, \"Memory in Mb\": 0.0423574447631835, \"Time in s\": 1.932986 }, { \"step\": 385, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 12.00507936887919, \"RMSE\": 20.0961554988217, \"R2\": 0.744401153020958, \"Memory in Mb\": 0.0423574447631835, \"Time in s\": 2.032719 }, { \"step\": 396, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 12.159003512064782, \"RMSE\": 20.104597547074984, \"R2\": 0.7614813985176707, \"Memory in Mb\": 0.0423574447631835, \"Time in s\": 2.134109 }, { \"step\": 407, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 13.058561054914293, \"RMSE\": 21.64678300128301, \"R2\": 0.7429728504217219, \"Memory in Mb\": 0.0393133163452148, \"Time in s\": 2.2395339999999995 }, { \"step\": 418, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 13.849374886718222, \"RMSE\": 23.13707582414104, \"R2\": 0.7243608784812086, \"Memory in Mb\": 0.039839744567871, \"Time in s\": 2.346639 }, { \"step\": 429, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 14.418764158229274, \"RMSE\": 24.09396728520024, \"R2\": 0.7343803671174814, \"Memory in Mb\": 0.0408926010131835, \"Time in s\": 2.455353 }, { \"step\": 440, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 14.611969435637262, \"RMSE\": 24.1872515908579, \"R2\": 0.7512659254336986, \"Memory in Mb\": 0.0414190292358398, \"Time in s\": 2.5657939999999995 }, { \"step\": 451, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 15.14954210400284, \"RMSE\": 24.823452151261105, \"R2\": 0.7491462990276416, \"Memory in Mb\": 0.0429983139038085, \"Time in s\": 2.6778409999999995 }, { \"step\": 462, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 16.266748325298664, \"RMSE\": 26.99226997645693, \"R2\": 0.7214095667925529, \"Memory in Mb\": 0.0435247421264648, \"Time in s\": 2.791571 }, { \"step\": 473, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 17.063164501315207, \"RMSE\": 28.33702908143248, \"R2\": 0.7288823142196059, \"Memory in Mb\": 0.0435247421264648, \"Time in s\": 2.9070249999999995 }, { \"step\": 484, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 17.41028324926407, \"RMSE\": 28.63458736095403, \"R2\": 0.7383292654808864, \"Memory in Mb\": 0.0435247421264648, \"Time in s\": 3.0241479999999994 }, { \"step\": 495, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 17.881702409973563, \"RMSE\": 29.18189849457619, \"R2\": 0.7443486730566713, \"Memory in Mb\": 0.0435247421264648, \"Time in s\": 3.1430089999999997 }, { \"step\": 506, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 18.783373559654446, \"RMSE\": 30.65392766804094, \"R2\": 0.7261055653266129, \"Memory in Mb\": 0.0435247421264648, \"Time in s\": 3.263499999999999 }, { \"step\": 517, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 19.52135237811833, \"RMSE\": 31.66784012367412, \"R2\": 0.7241496029986892, \"Memory in Mb\": 0.0435247421264648, \"Time in s\": 3.3857279999999994 }, { \"step\": 528, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 20.32410387080197, \"RMSE\": 32.88418602247989, \"R2\": 0.7325694870351915, \"Memory in Mb\": 0.0435247421264648, \"Time in s\": 3.509655999999999 }, { \"step\": 539, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 20.50845806362208, \"RMSE\": 32.89095814819447, \"R2\": 0.7435070498169007, \"Memory in Mb\": 0.0435247421264648, \"Time in s\": 3.635277999999999 }, { \"step\": 550, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 21.507291986413147, \"RMSE\": 34.52927015042095, \"R2\": 0.7260306568753008, \"Memory in Mb\": 0.0435247421264648, \"Time in s\": 3.762600999999999 }, { \"step\": 561, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 22.222091157093345, \"RMSE\": 35.46412346985515, \"R2\": 0.7236334646353288, \"Memory in Mb\": 0.0435247421264648, \"Time in s\": 3.891587999999999 }, { \"step\": 572, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 23.084218600655667, \"RMSE\": 36.66377836765904, \"R2\": 0.7267728639741885, \"Memory in Mb\": 0.044051170349121, \"Time in s\": 4.022359999999999 }, { \"step\": 20, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 4.834704431652337, \"RMSE\": 13.708514217962266, \"R2\": -439.7934984576362, \"Memory in Mb\": 0.0508193969726562, \"Time in s\": 0.006949 }, { \"step\": 40, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 3.4692310697037447, \"RMSE\": 9.813795721313518, \"R2\": -37.72035957928713, \"Memory in Mb\": 0.0739822387695312, \"Time in s\": 0.020525 }, { \"step\": 60, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.530247618203559, \"RMSE\": 8.024836796214231, \"R2\": -33.90460110966681, \"Memory in Mb\": 0.0866165161132812, \"Time in s\": 0.041034 }, { \"step\": 80, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.1398752670733447, \"RMSE\": 6.982837000856316, \"R2\": -25.510487239912003, \"Memory in Mb\": 0.09661865234375, \"Time in s\": 0.06908 }, { \"step\": 100, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.2521629689485394, \"RMSE\": 6.362737158647257, \"R2\": -12.810573390910957, \"Memory in Mb\": 0.1060943603515625, \"Time in s\": 0.105159 }, { \"step\": 120, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.275331183116589, \"RMSE\": 5.895687482983747, \"R2\": -9.059182991303912, \"Memory in Mb\": 0.1103057861328125, \"Time in s\": 0.149228 }, { \"step\": 140, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.181766409647037, \"RMSE\": 5.493495699082884, \"R2\": -8.025069637302263, \"Memory in Mb\": 0.1124114990234375, \"Time in s\": 0.201546 }, { \"step\": 160, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.0641207293789914, \"RMSE\": 5.165105496730293, \"R2\": -6.03588310696345, \"Memory in Mb\": 0.1166229248046875, \"Time in s\": 0.262282 }, { \"step\": 180, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.9901037542149176, \"RMSE\": 4.906162642056599, \"R2\": -4.575276834209563, \"Memory in Mb\": 0.1187286376953125, \"Time in s\": 0.331582 }, { \"step\": 200, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.854788525917255, \"RMSE\": 4.661016718308231, \"R2\": -4.0470005140641305, \"Memory in Mb\": 0.015085220336914, \"Time in s\": 0.422383 }, { \"step\": 220, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.777996659033335, \"RMSE\": 4.4592908674997, \"R2\": -3.98319384245183, \"Memory in Mb\": 0.0312490463256835, \"Time in s\": 0.517121 }, { \"step\": 240, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.696058551537428, \"RMSE\": 4.277699003809556, \"R2\": -3.620107687238352, \"Memory in Mb\": 0.0370397567749023, \"Time in s\": 0.616341 }, { \"step\": 260, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.6130985138696277, \"RMSE\": 4.114896288193841, \"R2\": -3.332738894340625, \"Memory in Mb\": 0.044569969177246, \"Time in s\": 0.720441 }, { \"step\": 280, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.5797289220452373, \"RMSE\": 3.98595914915012, \"R2\": -3.256494063385272, \"Memory in Mb\": 0.0575590133666992, \"Time in s\": 0.829564 }, { \"step\": 300, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.5530598651848762, \"RMSE\": 3.87273637004692, \"R2\": -2.951592153393709, \"Memory in Mb\": 0.0675611495971679, \"Time in s\": 0.944355 }, { \"step\": 320, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.487708679701559, \"RMSE\": 3.753298690745279, \"R2\": -2.895390019001044, \"Memory in Mb\": 0.0754575729370117, \"Time in s\": 1.064913 }, { \"step\": 340, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.447577018485449, \"RMSE\": 3.651821310152781, \"R2\": -2.8968902417113367, \"Memory in Mb\": 0.0807218551635742, \"Time in s\": 1.191641 }, { \"step\": 360, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.4679964351503354, \"RMSE\": 3.577735862687295, \"R2\": -2.771095017816483, \"Memory in Mb\": 0.0886182785034179, \"Time in s\": 1.324785 }, { \"step\": 380, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.433746711521204, \"RMSE\": 3.490600620499155, \"R2\": -2.7138197592485107, \"Memory in Mb\": 0.0938825607299804, \"Time in s\": 1.4646529999999998 }, { \"step\": 400, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.394545215153479, \"RMSE\": 3.4083536810761967, \"R2\": -2.640941921411436, \"Memory in Mb\": 0.1012525558471679, \"Time in s\": 1.611633 }, { \"step\": 420, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.3582303513295786, \"RMSE\": 3.3304244373469776, \"R2\": -2.5913988321456767, \"Memory in Mb\": 0.1054639816284179, \"Time in s\": 1.7657539999999998 }, { \"step\": 440, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.3237600646089562, \"RMSE\": 3.2576165394889824, \"R2\": -2.3739144098898306, \"Memory in Mb\": 0.1112546920776367, \"Time in s\": 1.927203 }, { \"step\": 460, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.313374021763953, \"RMSE\": 3.19664903268704, \"R2\": -2.080839603370824, \"Memory in Mb\": 0.1196775436401367, \"Time in s\": 2.096272 }, { \"step\": 480, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.2871329178228548, \"RMSE\": 3.1331240379472574, \"R2\": -1.891520259695389, \"Memory in Mb\": 0.1275739669799804, \"Time in s\": 2.277254 }, { \"step\": 500, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.257405891185914, \"RMSE\": 3.073015209583509, \"R2\": -1.7232796098685204, \"Memory in Mb\": 0.1323118209838867, \"Time in s\": 2.471873 }, { \"step\": 520, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.236691492049667, \"RMSE\": 3.017934049641527, \"R2\": -1.6311150670478525, \"Memory in Mb\": 0.1375761032104492, \"Time in s\": 2.675336 }, { \"step\": 540, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.2120036461347103, \"RMSE\": 2.9640451937301795, \"R2\": -1.528689807762199, \"Memory in Mb\": 0.1396818161010742, \"Time in s\": 2.8858550000000003 }, { \"step\": 560, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.1973910792850353, \"RMSE\": 2.9159627023633448, \"R2\": -1.5056283897938432, \"Memory in Mb\": 0.1444196701049804, \"Time in s\": 3.1010690000000003 }, { \"step\": 580, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.1729976236433703, \"RMSE\": 2.867868059269123, \"R2\": -1.4835996265310456, \"Memory in Mb\": 0.1470518112182617, \"Time in s\": 3.321073 }, { \"step\": 600, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.164383909889498, \"RMSE\": 2.8255860627341494, \"R2\": -1.384236602114556, \"Memory in Mb\": 0.1414899826049804, \"Time in s\": 3.5505340000000003 }, { \"step\": 620, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.1607847453289375, \"RMSE\": 2.7880287925830083, \"R2\": -1.2858933574415188, \"Memory in Mb\": 0.1441221237182617, \"Time in s\": 3.784549 }, { \"step\": 640, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.1488689392891114, \"RMSE\": 2.748475985188827, \"R2\": -1.179970998158682, \"Memory in Mb\": 0.1462278366088867, \"Time in s\": 4.023051000000001 }, { \"step\": 660, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.1353328006378431, \"RMSE\": 2.711500109160092, \"R2\": -1.1064542348150137, \"Memory in Mb\": 0.0933332443237304, \"Time in s\": 4.270219000000001 }, { \"step\": 680, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.1237673891934872, \"RMSE\": 2.67586665558629, \"R2\": -1.083872122585975, \"Memory in Mb\": 0.1028089523315429, \"Time in s\": 4.520950000000001 }, { \"step\": 700, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.1197870211993892, \"RMSE\": 2.6450513004918434, \"R2\": -1.0896316212520398, \"Memory in Mb\": 0.1107053756713867, \"Time in s\": 4.77542 }, { \"step\": 720, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.0988128320741315, \"RMSE\": 2.6090976903580367, \"R2\": -1.077879402293885, \"Memory in Mb\": 0.1170225143432617, \"Time in s\": 5.033773 }, { \"step\": 740, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.0821696401958585, \"RMSE\": 2.5758102479785587, \"R2\": -1.0239793040320149, \"Memory in Mb\": 0.1207075119018554, \"Time in s\": 5.296116 }, { \"step\": 760, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.0646778366488154, \"RMSE\": 2.542912274197232, \"R2\": -0.9939800998628658, \"Memory in Mb\": 0.1238660812377929, \"Time in s\": 5.562507999999999 }, { \"step\": 780, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.0458773095514022, \"RMSE\": 2.5110610332394967, \"R2\": -0.953051985319172, \"Memory in Mb\": 0.1301832199096679, \"Time in s\": 5.833025999999999 }, { \"step\": 800, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.037522387437363, \"RMSE\": 2.4829468635274075, \"R2\": -0.9268335078372076, \"Memory in Mb\": 0.1405134201049804, \"Time in s\": 6.107975999999999 }, { \"step\": 820, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.0344825476074344, \"RMSE\": 2.459713101244265, \"R2\": -0.9117084698305152, \"Memory in Mb\": 0.1468305587768554, \"Time in s\": 6.387556999999999 }, { \"step\": 840, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.0265033672562451, \"RMSE\": 2.433853565150633, \"R2\": -0.8891007921837151, \"Memory in Mb\": 0.1505155563354492, \"Time in s\": 6.671677999999999 }, { \"step\": 860, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.0098876748057108, \"RMSE\": 2.406249644034785, \"R2\": -0.8433117943327111, \"Memory in Mb\": 0.1520948410034179, \"Time in s\": 6.960477999999999 }, { \"step\": 880, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.9987418826957182, \"RMSE\": 2.3807044173500915, \"R2\": -0.795415858642577, \"Memory in Mb\": 0.1552534103393554, \"Time in s\": 7.253952999999999 }, { \"step\": 900, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.9899512872354768, \"RMSE\": 2.356297962435036, \"R2\": -0.7662040947107138, \"Memory in Mb\": 0.1573591232299804, \"Time in s\": 7.552186999999999 }, { \"step\": 920, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.9763510674237786, \"RMSE\": 2.3315103197053237, \"R2\": -0.7576521562914318, \"Memory in Mb\": 0.1254529953002929, \"Time in s\": 7.860606999999999 }, { \"step\": 940, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.9721072130212216, \"RMSE\": 2.310027291919755, \"R2\": -0.7400479483856388, \"Memory in Mb\": 0.1344251632690429, \"Time in s\": 8.173459999999999 }, { \"step\": 960, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.9655343485152448, \"RMSE\": 2.289186508543074, \"R2\": -0.7266590967915565, \"Memory in Mb\": 0.1402158737182617, \"Time in s\": 8.490537999999999 }, { \"step\": 980, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.9583856890611192, \"RMSE\": 2.2686834337047155, \"R2\": -0.7288044208040367, \"Memory in Mb\": 0.1444272994995117, \"Time in s\": 8.812014999999999 }, { \"step\": 1000, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.9497447679766952, \"RMSE\": 2.248146643879841, \"R2\": -0.7262238291744263, \"Memory in Mb\": 0.1486387252807617, \"Time in s\": 9.137959 }, { \"step\": 11, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 8.051220648038832, \"RMSE\": 17.336198122120386, \"R2\": -385.8701660091343, \"Memory in Mb\": 0.0232887268066406, \"Time in s\": 0.00403 }, { \"step\": 22, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 4.498502947359929, \"RMSE\": 12.28528637536428, \"R2\": -158.84524831763767, \"Memory in Mb\": 0.0249290466308593, \"Time in s\": 0.011609 }, { \"step\": 33, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 3.4668695042339137, \"RMSE\": 10.074636808082968, \"R2\": -69.49212762837747, \"Memory in Mb\": 0.0301933288574218, \"Time in s\": 0.022078 }, { \"step\": 44, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 2.7637805804889557, \"RMSE\": 8.735764655686483, \"R2\": -59.08259408516962, \"Memory in Mb\": 0.0313072204589843, \"Time in s\": 0.035307 }, { \"step\": 55, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 2.814517498310432, \"RMSE\": 8.074396776941786, \"R2\": -11.726997097138026, \"Memory in Mb\": 0.0381507873535156, \"Time in s\": 0.051286 }, { \"step\": 66, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 3.396900059747575, \"RMSE\": 7.862006773633152, \"R2\": -4.201378762014764, \"Memory in Mb\": 0.0476264953613281, \"Time in s\": 0.070551 }, { \"step\": 77, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 3.8844336568547537, \"RMSE\": 7.782255505653143, \"R2\": -2.415785129732385, \"Memory in Mb\": 0.0540046691894531, \"Time in s\": 0.093692 }, { \"step\": 88, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 4.068768385552718, \"RMSE\": 7.555909217267645, \"R2\": -1.9194502155140076, \"Memory in Mb\": 0.0597953796386718, \"Time in s\": 0.121065 }, { \"step\": 99, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 4.311602452908636, \"RMSE\": 7.487314706483316, \"R2\": -1.3976387620786477, \"Memory in Mb\": 0.0608482360839843, \"Time in s\": 0.152902 }, { \"step\": 110, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 4.261918758035323, \"RMSE\": 7.240982145259267, \"R2\": -0.5659557565320237, \"Memory in Mb\": 0.0624275207519531, \"Time in s\": 0.189312 }, { \"step\": 121, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 4.32509570871032, \"RMSE\": 7.149348278127394, \"R2\": -0.1757051422939808, \"Memory in Mb\": 0.0624275207519531, \"Time in s\": 0.229979 }, { \"step\": 132, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 4.243770455182887, \"RMSE\": 6.949556168474376, \"R2\": 0.0920549285716371, \"Memory in Mb\": 0.0624275207519531, \"Time in s\": 0.275055 }, { \"step\": 143, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 4.119311205048765, \"RMSE\": 6.740083059431663, \"R2\": 0.3082592266545521, \"Memory in Mb\": 0.0241641998291015, \"Time in s\": 0.330922 }, { \"step\": 154, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 4.094718549433554, \"RMSE\": 6.618738421062464, \"R2\": 0.4330929316851147, \"Memory in Mb\": 0.034926414489746, \"Time in s\": 0.389383 }, { \"step\": 165, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 4.353591485820727, \"RMSE\": 6.858418841195889, \"R2\": 0.5106778054828556, \"Memory in Mb\": 0.041365623474121, \"Time in s\": 0.450937 }, { \"step\": 176, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 4.494676115333661, \"RMSE\": 6.99651956882687, \"R2\": 0.5894091082089881, \"Memory in Mb\": 0.0471563339233398, \"Time in s\": 0.515937 }, { \"step\": 187, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 4.531460188122701, \"RMSE\": 6.982633238942946, \"R2\": 0.6651551017011323, \"Memory in Mb\": 0.052016258239746, \"Time in s\": 0.584675 }, { \"step\": 198, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 4.550856564096301, \"RMSE\": 6.948954565412159, \"R2\": 0.736210476532317, \"Memory in Mb\": 0.0546483993530273, \"Time in s\": 0.6572239999999999 }, { \"step\": 209, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 4.745146525796211, \"RMSE\": 7.286245359964537, \"R2\": 0.7609173164436883, \"Memory in Mb\": 0.0546483993530273, \"Time in s\": 0.733675 }, { \"step\": 220, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 5.330595718754616, \"RMSE\": 8.515887777891804, \"R2\": 0.7140722769094752, \"Memory in Mb\": 0.0552968978881835, \"Time in s\": 0.814194 }, { \"step\": 231, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 5.50563091305374, \"RMSE\": 8.701126762111178, \"R2\": 0.7446721431039482, \"Memory in Mb\": 0.0552968978881835, \"Time in s\": 0.898563 }, { \"step\": 242, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 5.723610994733609, \"RMSE\": 9.068119167211083, \"R2\": 0.7768542529953268, \"Memory in Mb\": 0.0552968978881835, \"Time in s\": 0.986859 }, { \"step\": 253, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 5.834317193911452, \"RMSE\": 9.203767847944404, \"R2\": 0.8003533769470821, \"Memory in Mb\": 0.0552968978881835, \"Time in s\": 1.07894 }, { \"step\": 264, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 6.561326226922799, \"RMSE\": 10.595386608942691, \"R2\": 0.7582588922727441, \"Memory in Mb\": 0.0553770065307617, \"Time in s\": 1.17475 }, { \"step\": 275, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 7.055343281410319, \"RMSE\": 11.793355798881397, \"R2\": 0.7364597921881992, \"Memory in Mb\": 0.0553770065307617, \"Time in s\": 1.274735 }, { \"step\": 286, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 7.331998951413002, \"RMSE\": 12.245186296589464, \"R2\": 0.7539176280650666, \"Memory in Mb\": 0.0539121627807617, \"Time in s\": 1.383231 }, { \"step\": 297, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 7.416966166983629, \"RMSE\": 12.289761227218738, \"R2\": 0.7850184759487971, \"Memory in Mb\": 0.0544385910034179, \"Time in s\": 1.495361 }, { \"step\": 308, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 7.99448083149163, \"RMSE\": 13.217085318753208, \"R2\": 0.7785490451915651, \"Memory in Mb\": 0.0545606613159179, \"Time in s\": 1.6112419999999998 }, { \"step\": 319, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 9.157233410060112, \"RMSE\": 16.13339057164046, \"R2\": 0.7005755447430102, \"Memory in Mb\": 0.0561399459838867, \"Time in s\": 1.73087 }, { \"step\": 330, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 9.47509121278654, \"RMSE\": 16.446724789755304, \"R2\": 0.7337084949314072, \"Memory in Mb\": 0.0561399459838867, \"Time in s\": 1.854306 }, { \"step\": 341, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 9.757592549477597, \"RMSE\": 16.701217161288277, \"R2\": 0.7541120796137563, \"Memory in Mb\": 0.0561399459838867, \"Time in s\": 1.981549 }, { \"step\": 352, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 9.93501095513177, \"RMSE\": 16.87017564150386, \"R2\": 0.7719317193583981, \"Memory in Mb\": 0.0561399459838867, \"Time in s\": 2.112498 }, { \"step\": 363, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 10.85670423687554, \"RMSE\": 18.405576814105093, \"R2\": 0.7363875320655082, \"Memory in Mb\": 0.0561399459838867, \"Time in s\": 2.247206 }, { \"step\": 374, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 11.59956477803306, \"RMSE\": 20.214372545093333, \"R2\": 0.7076961913412723, \"Memory in Mb\": 0.0658864974975586, \"Time in s\": 2.386792 }, { \"step\": 385, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 12.011712885443345, \"RMSE\": 20.838356414394227, \"R2\": 0.7251727140820715, \"Memory in Mb\": 0.0713338851928711, \"Time in s\": 2.531165 }, { \"step\": 396, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 12.02179706392092, \"RMSE\": 20.699504894468426, \"R2\": 0.7471567277697432, \"Memory in Mb\": 0.0781774520874023, \"Time in s\": 2.680539 }, { \"step\": 407, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 12.867904342374958, \"RMSE\": 22.04935022682606, \"R2\": 0.733324041761514, \"Memory in Mb\": 0.0825719833374023, \"Time in s\": 2.8351709999999994 }, { \"step\": 418, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 13.7726214629037, \"RMSE\": 23.76360253855293, \"R2\": 0.7092307493255587, \"Memory in Mb\": 0.0846776962280273, \"Time in s\": 2.9951159999999994 }, { \"step\": 429, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 14.320664029675132, \"RMSE\": 24.67720892632965, \"R2\": 0.7213650338700139, \"Memory in Mb\": 0.0656805038452148, \"Time in s\": 3.164622 }, { \"step\": 440, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 14.560745017781516, \"RMSE\": 24.854467305977835, \"R2\": 0.7373537773889289, \"Memory in Mb\": 0.0706624984741211, \"Time in s\": 3.338776 }, { \"step\": 451, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 15.05271815758178, \"RMSE\": 25.416929688531035, \"R2\": 0.7370081245929037, \"Memory in Mb\": 0.0787420272827148, \"Time in s\": 3.5178329999999995 }, { \"step\": 462, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 16.182360709465044, \"RMSE\": 27.521690548068367, \"R2\": 0.7103739673524856, \"Memory in Mb\": 0.0857076644897461, \"Time in s\": 3.702085 }, { \"step\": 473, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 17.028395162723214, \"RMSE\": 29.038217655150596, \"R2\": 0.7152989103492285, \"Memory in Mb\": 0.0888662338256836, \"Time in s\": 3.891670999999999 }, { \"step\": 484, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 17.39410374569335, \"RMSE\": 29.368989764604034, \"R2\": 0.7247347991767862, \"Memory in Mb\": 0.0889272689819336, \"Time in s\": 4.084988999999999 }, { \"step\": 495, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 17.903720470221085, \"RMSE\": 29.931789274576047, \"R2\": 0.7310408495158558, \"Memory in Mb\": 0.0889272689819336, \"Time in s\": 4.281388999999999 }, { \"step\": 506, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 18.828904303353827, \"RMSE\": 31.360943457558232, \"R2\": 0.7133254165496098, \"Memory in Mb\": 0.0895147323608398, \"Time in s\": 4.480870999999999 }, { \"step\": 517, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 19.65231385288099, \"RMSE\": 32.53535290181733, \"R2\": 0.7088292337107098, \"Memory in Mb\": 0.0743856430053711, \"Time in s\": 4.686204999999998 }, { \"step\": 528, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 20.485248099981963, \"RMSE\": 33.800991401884744, \"R2\": 0.7174497851371298, \"Memory in Mb\": 0.0787191390991211, \"Time in s\": 4.894319999999999 }, { \"step\": 539, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 20.62693587606907, \"RMSE\": 33.76714158926765, \"R2\": 0.7296595823775684, \"Memory in Mb\": 0.0872030258178711, \"Time in s\": 5.105511999999998 }, { \"step\": 550, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 21.61100465238265, \"RMSE\": 35.363345192969206, \"R2\": 0.7126350131259431, \"Memory in Mb\": 0.0935201644897461, \"Time in s\": 5.319765999999999 }, { \"step\": 561, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 22.391562087266266, \"RMSE\": 36.4058649659661, \"R2\": 0.708760885936744, \"Memory in Mb\": 0.0934362411499023, \"Time in s\": 5.537231999999999 }, { \"step\": 572, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 23.25574522992599, \"RMSE\": 37.57896806973795, \"R2\": 0.7129622004044582, \"Memory in Mb\": 0.0946111679077148, \"Time in s\": 5.757816999999998 }, { \"step\": 20, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 4.828377634536296, \"RMSE\": 13.70786256219322, \"R2\": -439.7515918302183, \"Memory in Mb\": 0.0575942993164062, \"Time in s\": 0.005178 }, { \"step\": 40, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 3.453811275213839, \"RMSE\": 9.811073218407971, \"R2\": -37.69887927291551, \"Memory in Mb\": 0.0808181762695312, \"Time in s\": 0.01451 }, { \"step\": 60, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.5116544078850294, \"RMSE\": 8.021960641037959, \"R2\": -33.879585508404254, \"Memory in Mb\": 0.0934524536132812, \"Time in s\": 0.027873 }, { \"step\": 80, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.1224425015381523, \"RMSE\": 6.9797990571526345, \"R2\": -25.487425023640156, \"Memory in Mb\": 0.103515625, \"Time in s\": 0.045557 }, { \"step\": 100, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.246653919301699, \"RMSE\": 6.363694444016854, \"R2\": -12.814729355257526, \"Memory in Mb\": 0.1129913330078125, \"Time in s\": 0.06794 }, { \"step\": 120, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.270681160376927, \"RMSE\": 5.896666779393501, \"R2\": -9.06252500695684, \"Memory in Mb\": 0.1172027587890625, \"Time in s\": 0.095003 }, { \"step\": 140, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.1411489845018936, \"RMSE\": 5.486121567062232, \"R2\": -8.000856498367144, \"Memory in Mb\": 0.1193084716796875, \"Time in s\": 0.126859 }, { \"step\": 160, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.9595309296437795, \"RMSE\": 5.145701533389061, \"R2\": -5.983118424699933, \"Memory in Mb\": 0.048110008239746, \"Time in s\": 0.170894 }, { \"step\": 180, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.8606850760789413, \"RMSE\": 4.874784956472401, \"R2\": -4.504190782470528, \"Memory in Mb\": 0.0645513534545898, \"Time in s\": 0.218268 }, { \"step\": 200, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.753768292507887, \"RMSE\": 4.635064394721464, \"R2\": -3.990954055000616, \"Memory in Mb\": 0.0751142501831054, \"Time in s\": 0.2693379999999999 }, { \"step\": 220, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.6442100676088158, \"RMSE\": 4.426753978888705, \"R2\": -3.910740120483753, \"Memory in Mb\": 0.0809926986694336, \"Time in s\": 0.324221 }, { \"step\": 240, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.5586094458315778, \"RMSE\": 4.243291414416048, \"R2\": -3.546083102970604, \"Memory in Mb\": 0.0831594467163086, \"Time in s\": 0.383071 }, { \"step\": 260, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.4757349309283123, \"RMSE\": 4.079934399827006, \"R2\": -3.259426129696055, \"Memory in Mb\": 0.0869779586791992, \"Time in s\": 0.445946 }, { \"step\": 280, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.429182741517093, \"RMSE\": 3.94384178403937, \"R2\": -3.1670173911200505, \"Memory in Mb\": 0.0965147018432617, \"Time in s\": 0.513038 }, { \"step\": 300, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.395554812880875, \"RMSE\": 3.827170050036498, \"R2\": -2.859150937529973, \"Memory in Mb\": 0.1050596237182617, \"Time in s\": 0.584571 }, { \"step\": 320, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.342072644309252, \"RMSE\": 3.7085248950294263, \"R2\": -2.8030066952692625, \"Memory in Mb\": 0.1113767623901367, \"Time in s\": 0.660678 }, { \"step\": 340, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.2981450416106572, \"RMSE\": 3.6023493777501, \"R2\": -2.7920215666096264, \"Memory in Mb\": 0.0969266891479492, \"Time in s\": 0.7468359999999999 }, { \"step\": 360, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.3017629599133824, \"RMSE\": 3.52722030011446, \"R2\": -2.665355451896766, \"Memory in Mb\": 0.1048231124877929, \"Time in s\": 0.837109 }, { \"step\": 380, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.2791887566109008, \"RMSE\": 3.4421979658375847, \"R2\": -2.6115379824259644, \"Memory in Mb\": 0.1101484298706054, \"Time in s\": 0.931726 }, { \"step\": 400, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.2309239845123985, \"RMSE\": 3.35653802988408, \"R2\": -2.531080237482485, \"Memory in Mb\": 0.1175184249877929, \"Time in s\": 1.030965 }, { \"step\": 420, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.204488166009232, \"RMSE\": 3.279580566778272, \"R2\": -2.4825797995498564, \"Memory in Mb\": 0.1217298507690429, \"Time in s\": 1.13497 }, { \"step\": 440, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.1805943216757937, \"RMSE\": 3.209931042788128, \"R2\": -2.2758615914195226, \"Memory in Mb\": 0.1275205612182617, \"Time in s\": 1.243696 }, { \"step\": 460, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.1747748413206256, \"RMSE\": 3.150463961675437, \"R2\": -1.9924589881120156, \"Memory in Mb\": 0.1354780197143554, \"Time in s\": 1.3572460000000002 }, { \"step\": 480, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.1643181646721343, \"RMSE\": 3.0907225956471227, \"R2\": -1.8137863387673288, \"Memory in Mb\": 0.1433744430541992, \"Time in s\": 1.47581 }, { \"step\": 500, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.1467427258000205, \"RMSE\": 3.033773569773335, \"R2\": -1.6541724792729693, \"Memory in Mb\": 0.1362333297729492, \"Time in s\": 1.6042770000000002 }, { \"step\": 520, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.1310531754619513, \"RMSE\": 2.979836071514613, \"R2\": -1.5651047078316291, \"Memory in Mb\": 0.1415586471557617, \"Time in s\": 1.7377410000000002 }, { \"step\": 540, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.1142715036195536, \"RMSE\": 2.927551584361061, \"R2\": -1.466806182052764, \"Memory in Mb\": 0.1436643600463867, \"Time in s\": 1.876231 }, { \"step\": 560, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.1052485405576076, \"RMSE\": 2.88137538641146, \"R2\": -1.4465405352520455, \"Memory in Mb\": 0.1484022140502929, \"Time in s\": 2.0198690000000004 }, { \"step\": 580, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.0859142446379713, \"RMSE\": 2.834429565013283, \"R2\": -1.4260211929714153, \"Memory in Mb\": 0.1510343551635742, \"Time in s\": 2.168629 }, { \"step\": 600, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.0769717813954385, \"RMSE\": 2.7922355683541964, \"R2\": -1.3282862925750138, \"Memory in Mb\": 0.1547193527221679, \"Time in s\": 2.322692 }, { \"step\": 620, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.0766885404316004, \"RMSE\": 2.756385805344716, \"R2\": -1.234299900153052, \"Memory in Mb\": 0.1578779220581054, \"Time in s\": 2.481953 }, { \"step\": 640, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.066276571170311, \"RMSE\": 2.7176435544384367, \"R2\": -1.1313354614936588, \"Memory in Mb\": 0.1599836349487304, \"Time in s\": 2.646434 }, { \"step\": 660, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.0564634149761172, \"RMSE\": 2.6824145051502803, \"R2\": -1.061505763110988, \"Memory in Mb\": 0.1636686325073242, \"Time in s\": 2.816279 }, { \"step\": 680, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.0488950225738152, \"RMSE\": 2.6482621618516, \"R2\": -1.0410990478472515, \"Memory in Mb\": 0.1663007736206054, \"Time in s\": 2.991546 }, { \"step\": 700, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.0494335446356584, \"RMSE\": 2.619695292588707, \"R2\": -1.0497603683353351, \"Memory in Mb\": 0.1547193527221679, \"Time in s\": 3.177748 }, { \"step\": 720, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.0311572549278785, \"RMSE\": 2.584165722213007, \"R2\": -1.038357615399126, \"Memory in Mb\": 0.1594572067260742, \"Time in s\": 3.369155 }, { \"step\": 740, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.0166711191684197, \"RMSE\": 2.551525288520744, \"R2\": -0.9859947129346353, \"Memory in Mb\": 0.1632032394409179, \"Time in s\": 3.565871 }, { \"step\": 760, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.998612246058934, \"RMSE\": 2.518502326974768, \"R2\": -0.9558825700512557, \"Memory in Mb\": 0.1647825241088867, \"Time in s\": 3.768021 }, { \"step\": 780, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.9842422617410423, \"RMSE\": 2.4876653972852822, \"R2\": -0.916828228451222, \"Memory in Mb\": 0.1695814132690429, \"Time in s\": 3.975687 }, { \"step\": 800, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.9822601758268574, \"RMSE\": 2.462050972175237, \"R2\": -0.8945384291924348, \"Memory in Mb\": 0.1616849899291992, \"Time in s\": 4.194112 }, { \"step\": 820, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.9867607749501652, \"RMSE\": 2.442599600947298, \"R2\": -0.8851995145588711, \"Memory in Mb\": 0.1643171310424804, \"Time in s\": 4.4181 }, { \"step\": 840, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.9793985197413392, \"RMSE\": 2.417271328475225, \"R2\": -0.8634469862274994, \"Memory in Mb\": 0.1680021286010742, \"Time in s\": 4.647504 }, { \"step\": 860, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.9647458553132984, \"RMSE\": 2.3903176435066817, \"R2\": -0.8189831286734526, \"Memory in Mb\": 0.1701078414916992, \"Time in s\": 4.882465 }, { \"step\": 880, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.9550000119490066, \"RMSE\": 2.3652388018081414, \"R2\": -0.7721647401587992, \"Memory in Mb\": 0.1727399826049804, \"Time in s\": 5.123038 }, { \"step\": 900, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.945028697359752, \"RMSE\": 2.34076619049233, \"R2\": -0.7429966155689833, \"Memory in Mb\": 0.1115369796752929, \"Time in s\": 5.374164 }, { \"step\": 920, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.9340382087332012, \"RMSE\": 2.316653968016665, \"R2\": -0.7353240501222194, \"Memory in Mb\": 0.1163969039916992, \"Time in s\": 5.629799 }, { \"step\": 940, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.93159447743873, \"RMSE\": 2.295899595899596, \"R2\": -0.7188294143518761, \"Memory in Mb\": 0.1223096847534179, \"Time in s\": 5.8899680000000005 }, { \"step\": 960, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.9266882702688716, \"RMSE\": 2.2758824318695603, \"R2\": -0.7066477496763941, \"Memory in Mb\": 0.1296796798706054, \"Time in s\": 6.154803 }, { \"step\": 980, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.9199947893356224, \"RMSE\": 2.255690063945526, \"R2\": -0.7090584581073034, \"Memory in Mb\": 0.1349439620971679, \"Time in s\": 6.424549 }, { \"step\": 1000, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.910675347240908, \"RMSE\": 2.2342958873570486, \"R2\": -0.7050189374098543, \"Memory in Mb\": 0.1382246017456054, \"Time in s\": 6.699174 }, { \"step\": 11, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 41.63636363636363, \"RMSE\": 41.64569169030137, \"R2\": -2231.5319148936137, \"Memory in Mb\": 0.0096149444580078, \"Time in s\": 0.001833 }, { \"step\": 22, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 41.31818181818181, \"RMSE\": 41.32960638133835, \"R2\": -1808.0547045951903, \"Memory in Mb\": 0.0126094818115234, \"Time in s\": 0.00539 }, { \"step\": 33, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 41.12121212121212, \"RMSE\": 41.13871582091424, \"R2\": -1174.393494897962, \"Memory in Mb\": 0.015787124633789, \"Time in s\": 0.009921 }, { \"step\": 44, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 41.159090909090914, \"RMSE\": 41.17451771534076, \"R2\": -1333.7620984139928, \"Memory in Mb\": 0.0188732147216796, \"Time in s\": 0.015497 }, { \"step\": 55, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 41.5090909090909, \"RMSE\": 41.57075020645253, \"R2\": -336.3506066081568, \"Memory in Mb\": 0.0218257904052734, \"Time in s\": 0.022338 }, { \"step\": 66, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 42.681818181818166, \"RMSE\": 42.82080349691271, \"R2\": -153.29834830483878, \"Memory in Mb\": 0.0246181488037109, \"Time in s\": 0.030239 }, { \"step\": 77, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 43.50649350649351, \"RMSE\": 43.70978671356627, \"R2\": -106.75487995129542, \"Memory in Mb\": 0.0275020599365234, \"Time in s\": 0.039152 }, { \"step\": 88, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 44.21590909090909, \"RMSE\": 44.43649707984724, \"R2\": -99.97346126163, \"Memory in Mb\": 0.0300197601318359, \"Time in s\": 0.049123 }, { \"step\": 99, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 45.05050505050505, \"RMSE\": 45.309262771858165, \"R2\": -86.8022342468144, \"Memory in Mb\": 0.0329036712646484, \"Time in s\": 0.060256 }, { \"step\": 110, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 46.16363636363636, \"RMSE\": 46.52487115902242, \"R2\": -63.64797006437341, \"Memory in Mb\": 0.2696781158447265, \"Time in s\": 0.074892 }, { \"step\": 121, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 47.21487603305785, \"RMSE\": 47.67304278378361, \"R2\": -51.27707184490422, \"Memory in Mb\": 0.2696781158447265, \"Time in s\": 0.095588 }, { \"step\": 132, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 48.29545454545455, \"RMSE\": 48.843054157105485, \"R2\": -43.84882422437649, \"Memory in Mb\": 0.2696781158447265, \"Time in s\": 0.122121 }, { \"step\": 143, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 49.44055944055945, \"RMSE\": 50.100318941519305, \"R2\": -37.220279564063546, \"Memory in Mb\": 0.2696781158447265, \"Time in s\": 0.154488 }, { \"step\": 154, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 50.532467532467535, \"RMSE\": 51.29137544271156, \"R2\": -33.04474826644667, \"Memory in Mb\": 0.2696781158447265, \"Time in s\": 0.192654 }, { \"step\": 165, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 51.690909090909095, \"RMSE\": 52.61253451297311, \"R2\": -27.795548438273773, \"Memory in Mb\": 0.2696781158447265, \"Time in s\": 0.236647 }, { \"step\": 176, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 53.00568181818182, \"RMSE\": 54.11860921749895, \"R2\": -23.566226925646237, \"Memory in Mb\": 0.2696781158447265, \"Time in s\": 0.286373 }, { \"step\": 187, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 54.41176470588235, \"RMSE\": 55.733754017636336, \"R2\": -20.33250305682894, \"Memory in Mb\": 0.2696781158447265, \"Time in s\": 0.341821 }, { \"step\": 198, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 56.02525252525252, \"RMSE\": 57.635786091488654, \"R2\": -17.146924852486976, \"Memory in Mb\": 0.2696781158447265, \"Time in s\": 0.403009 }, { \"step\": 209, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 55.16354936929098, \"RMSE\": 57.0482200725598, \"R2\": -13.656313160472004, \"Memory in Mb\": 0.6838865280151367, \"Time in s\": 0.4890500000000001 }, { \"step\": 220, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 53.62203856749311, \"RMSE\": 56.03531795068661, \"R2\": -11.37998411824978, \"Memory in Mb\": 0.6869077682495117, \"Time in s\": 0.5845720000000001 }, { \"step\": 231, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 52.77279286370195, \"RMSE\": 55.29408706815337, \"R2\": -9.311090357596036, \"Memory in Mb\": 0.6899290084838867, \"Time in s\": 0.6897040000000001 }, { \"step\": 242, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 52.49661908339594, \"RMSE\": 55.0071045368674, \"R2\": -7.210918602421254, \"Memory in Mb\": 0.6929502487182617, \"Time in s\": 0.8041680000000001 }, { \"step\": 253, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 52.25631812193077, \"RMSE\": 54.71344660515688, \"R2\": -6.055353919833875, \"Memory in Mb\": 0.6947126388549805, \"Time in s\": 0.92814 }, { \"step\": 264, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 51.62511478420569, \"RMSE\": 54.312843786153664, \"R2\": -5.352168023774992, \"Memory in Mb\": 0.6947126388549805, \"Time in s\": 1.061635 }, { \"step\": 275, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 51.4425344352617, \"RMSE\": 54.29364548356293, \"R2\": -4.585603291722447, \"Memory in Mb\": 0.6947126388549805, \"Time in s\": 1.204612 }, { \"step\": 286, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 51.75651621106165, \"RMSE\": 54.635705044608144, \"R2\": -3.8989478253777694, \"Memory in Mb\": 0.6947126388549805, \"Time in s\": 1.357118 }, { \"step\": 297, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 52.373839404142416, \"RMSE\": 55.25476711535166, \"R2\": -3.3456400671942, \"Memory in Mb\": 0.6947126388549805, \"Time in s\": 1.518976 }, { \"step\": 308, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 52.87239275875638, \"RMSE\": 55.86677247417265, \"R2\": -2.9565197175813718, \"Memory in Mb\": 0.6947126388549805, \"Time in s\": 1.690095 }, { \"step\": 319, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 52.69554478958866, \"RMSE\": 56.2770501442128, \"R2\": -2.6433309475704183, \"Memory in Mb\": 0.6947126388549805, \"Time in s\": 1.870449 }, { \"step\": 330, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 53.85316804407712, \"RMSE\": 57.75044402630399, \"R2\": -2.2832890424968197, \"Memory in Mb\": 0.6947126388549805, \"Time in s\": 2.06005 }, { \"step\": 341, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 54.90678041411178, \"RMSE\": 59.01114057562677, \"R2\": -2.0697921090482247, \"Memory in Mb\": 0.6947126388549805, \"Time in s\": 2.258925 }, { \"step\": 352, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 56.00533746556472, \"RMSE\": 60.30224520856101, \"R2\": -1.9140207825503284, \"Memory in Mb\": 0.6947126388549805, \"Time in s\": 2.467073 }, { \"step\": 363, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 55.99599298772852, \"RMSE\": 60.54917173074773, \"R2\": -1.852879941931207, \"Memory in Mb\": 0.6947126388549805, \"Time in s\": 2.684461 }, { \"step\": 374, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 56.87222492302705, \"RMSE\": 61.81275171085535, \"R2\": -1.7331917323651345, \"Memory in Mb\": 0.6947126388549805, \"Time in s\": 2.911073 }, { \"step\": 385, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 58.41786698150333, \"RMSE\": 63.95254893573906, \"R2\": -1.588502821427925, \"Memory in Mb\": 0.6947126388549805, \"Time in s\": 3.146928 }, { \"step\": 396, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 59.7033976124885, \"RMSE\": 65.46926983257002, \"R2\": -1.5293357430909813, \"Memory in Mb\": 0.6947126388549805, \"Time in s\": 3.392032 }, { \"step\": 407, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 60.057805647389294, \"RMSE\": 66.17359973042984, \"R2\": -1.4019380007417157, \"Memory in Mb\": 1.1097631454467771, \"Time in s\": 3.672033 }, { \"step\": 418, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 59.7070864579051, \"RMSE\": 66.11592086962122, \"R2\": -1.2507954049688483, \"Memory in Mb\": 1.1127843856811523, \"Time in s\": 3.965768 }, { \"step\": 429, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 60.122823673891816, \"RMSE\": 66.73609937588846, \"R2\": -1.0378169857688957, \"Memory in Mb\": 1.1158056259155271, \"Time in s\": 4.272702 }, { \"step\": 440, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 60.39504675635191, \"RMSE\": 66.96100690444877, \"R2\": -0.906365593827489, \"Memory in Mb\": 1.1188268661499023, \"Time in s\": 4.593074 }, { \"step\": 451, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 60.27126048587789, \"RMSE\": 66.93502892662679, \"R2\": -0.8239085862185902, \"Memory in Mb\": 1.120589256286621, \"Time in s\": 4.926735 }, { \"step\": 462, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 60.340686610373176, \"RMSE\": 67.43825007380137, \"R2\": -0.7390015352251049, \"Memory in Mb\": 1.120589256286621, \"Time in s\": 5.273856 }, { \"step\": 473, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 61.40703262301831, \"RMSE\": 69.11306667757516, \"R2\": -0.6127592621572406, \"Memory in Mb\": 1.120589256286621, \"Time in s\": 5.634397 }, { \"step\": 484, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 61.95796621360106, \"RMSE\": 69.71422620021941, \"R2\": -0.5510154280248158, \"Memory in Mb\": 1.120589256286621, \"Time in s\": 6.008344 }, { \"step\": 495, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 62.59018166487368, \"RMSE\": 70.55352405729404, \"R2\": -0.4943708535906215, \"Memory in Mb\": 1.120589256286621, \"Time in s\": 6.395653 }, { \"step\": 506, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 62.49664579133251, \"RMSE\": 70.88193125644693, \"R2\": -0.4644752452013045, \"Memory in Mb\": 1.120589256286621, \"Time in s\": 6.796304 }, { \"step\": 517, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 63.25224079915844, \"RMSE\": 71.92080214464903, \"R2\": -0.4228062717918979, \"Memory in Mb\": 1.120589256286621, \"Time in s\": 7.21023 }, { \"step\": 528, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 64.80783657170488, \"RMSE\": 74.3681944005728, \"R2\": -0.367764222300833, \"Memory in Mb\": 1.120589256286621, \"Time in s\": 7.637592 }, { \"step\": 539, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 65.59959781369417, \"RMSE\": 75.30113885843834, \"R2\": -0.3443906138479853, \"Memory in Mb\": 1.120589256286621, \"Time in s\": 8.078512 }, { \"step\": 550, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 65.79684627343133, \"RMSE\": 76.01328745307667, \"R2\": -0.3277190973108916, \"Memory in Mb\": 1.120589256286621, \"Time in s\": 8.532886 }, { \"step\": 561, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 66.6512855136148, \"RMSE\": 77.20436469287773, \"R2\": -0.3097569166669509, \"Memory in Mb\": 1.120589256286621, \"Time in s\": 9.000807 }, { \"step\": 572, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 68.11975592628174, \"RMSE\": 79.56492566870935, \"R2\": -0.2867456678376987, \"Memory in Mb\": 1.120589256286621, \"Time in s\": 9.482145 }, { \"step\": 20, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 43.8732195, \"RMSE\": 43.87807788634269, \"R2\": -4514.954899312423, \"Memory in Mb\": 0.0199413299560546, \"Time in s\": 0.002755 }, { \"step\": 40, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 42.4932955, \"RMSE\": 42.52255283421693, \"R2\": -725.9491167623446, \"Memory in Mb\": 0.0317363739013671, \"Time in s\": 0.008058 }, { \"step\": 60, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 42.2167785, \"RMSE\": 42.2386240157387, \"R2\": -966.0073736019044, \"Memory in Mb\": 0.0438976287841796, \"Time in s\": 0.015549 }, { \"step\": 80, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 41.975705625, \"RMSE\": 41.99760868559829, \"R2\": -957.9655948743646, \"Memory in Mb\": 0.0562419891357421, \"Time in s\": 0.025294 }, { \"step\": 100, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 41.37550450000001, \"RMSE\": 41.410913785433536, \"R2\": -583.9966399141301, \"Memory in Mb\": 0.5381031036376953, \"Time in s\": 0.041246 }, { \"step\": 120, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 40.936110000000006, \"RMSE\": 40.97829382197767, \"R2\": -484.9611418859003, \"Memory in Mb\": 0.5386066436767578, \"Time in s\": 0.070023 }, { \"step\": 140, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 40.6885472857143, \"RMSE\": 40.72961738075088, \"R2\": -495.1050461477588, \"Memory in Mb\": 0.5391101837158203, \"Time in s\": 0.110787 }, { \"step\": 160, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 40.35105437500001, \"RMSE\": 40.39801158334292, \"R2\": -429.4078677932073, \"Memory in Mb\": 0.5393619537353516, \"Time in s\": 0.163463 }, { \"step\": 180, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 40.00981655555555, \"RMSE\": 40.06373388340122, \"R2\": -370.7794659133543, \"Memory in Mb\": 0.5396137237548828, \"Time in s\": 0.227995 }, { \"step\": 200, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 39.80633095, \"RMSE\": 39.860362966711, \"R2\": -368.1089073295326, \"Memory in Mb\": 0.5077581405639648, \"Time in s\": 0.320041 }, { \"step\": 220, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 36.497516001377406, \"RMSE\": 38.01945344470104, \"R2\": -361.2329206514933, \"Memory in Mb\": 1.3602590560913086, \"Time in s\": 0.441408 }, { \"step\": 240, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 33.64243104419191, \"RMSE\": 36.40668421494773, \"R2\": -333.65237138497804, \"Memory in Mb\": 1.360762596130371, \"Time in s\": 0.581306 }, { \"step\": 260, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 31.222114965034955, \"RMSE\": 34.98371838354962, \"R2\": -312.16748668977897, \"Memory in Mb\": 1.3610143661499023, \"Time in s\": 0.739627 }, { \"step\": 280, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 29.18205946861472, \"RMSE\": 33.71869814960704, \"R2\": -303.5986275675674, \"Memory in Mb\": 1.361769676208496, \"Time in s\": 0.915776 }, { \"step\": 300, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 27.34275770505051, \"RMSE\": 32.57805191350732, \"R2\": -278.63174197976707, \"Memory in Mb\": 1.3620214462280271, \"Time in s\": 1.109879 }, { \"step\": 320, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 25.81388747443183, \"RMSE\": 31.5521424826706, \"R2\": -274.2849072221064, \"Memory in Mb\": 1.3630285263061523, \"Time in s\": 1.321838 }, { \"step\": 340, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 24.51835124153299, \"RMSE\": 30.62414457186519, \"R2\": -273.0482727941538, \"Memory in Mb\": 1.3640356063842771, \"Time in s\": 1.551694 }, { \"step\": 360, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 23.451930423400693, \"RMSE\": 29.78792492645533, \"R2\": -260.4155562259403, \"Memory in Mb\": 1.3660497665405271, \"Time in s\": 1.799643 }, { \"step\": 380, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 22.46844053349284, \"RMSE\": 29.014219480552867, \"R2\": -255.5915105297988, \"Memory in Mb\": 1.3665533065795898, \"Time in s\": 2.065559 }, { \"step\": 400, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 21.59490700757577, \"RMSE\": 28.301677882839343, \"R2\": -250.0434007116766, \"Memory in Mb\": 0.510127067565918, \"Time in s\": 2.355987 }, { \"step\": 420, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 20.62268781294523, \"RMSE\": 27.62086591367872, \"R2\": -246.0239415518119, \"Memory in Mb\": 1.3623762130737305, \"Time in s\": 2.674434 }, { \"step\": 440, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 19.786863931462925, \"RMSE\": 26.990398924900397, \"R2\": -230.60756767519212, \"Memory in Mb\": 1.3643903732299805, \"Time in s\": 3.010878 }, { \"step\": 460, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 19.05732899619648, \"RMSE\": 26.404670160589287, \"R2\": -209.2038511633616, \"Memory in Mb\": 1.3666563034057615, \"Time in s\": 3.365293 }, { \"step\": 480, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 18.376512097202227, \"RMSE\": 25.854792215140314, \"R2\": -195.90337768575387, \"Memory in Mb\": 1.3701810836791992, \"Time in s\": 3.737716 }, { \"step\": 500, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 17.755044410127518, \"RMSE\": 25.338820973360427, \"R2\": -184.1550753065148, \"Memory in Mb\": 1.3716917037963867, \"Time in s\": 4.128280999999999 }, { \"step\": 520, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 17.16611419898163, \"RMSE\": 24.851444862058347, \"R2\": -177.4118263333629, \"Memory in Mb\": 1.3737058639526367, \"Time in s\": 4.537221 }, { \"step\": 540, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 16.628565596068775, \"RMSE\": 24.392285078947275, \"R2\": -170.25012213753183, \"Memory in Mb\": 1.3747129440307615, \"Time in s\": 4.964375 }, { \"step\": 560, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 16.091244232649693, \"RMSE\": 23.955027361350904, \"R2\": -168.10096043791202, \"Memory in Mb\": 1.3752164840698242, \"Time in s\": 5.410107999999999 }, { \"step\": 580, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 15.590768135673304, \"RMSE\": 23.54051091957351, \"R2\": -166.33817208986073, \"Memory in Mb\": 1.3764753341674805, \"Time in s\": 5.874175999999999 }, { \"step\": 600, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 15.168708628495342, \"RMSE\": 23.15108754841241, \"R2\": -159.05714501634571, \"Memory in Mb\": 0.5124959945678711, \"Time in s\": 6.365194 }, { \"step\": 620, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 14.742446374247312, \"RMSE\": 22.77953961802373, \"R2\": -151.59887848495535, \"Memory in Mb\": 3.064208030700684, \"Time in s\": 6.921285 }, { \"step\": 640, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 14.319364852585176, \"RMSE\": 22.42187566882095, \"R2\": -144.08105420081068, \"Memory in Mb\": 3.0679845809936523, \"Time in s\": 7.51197 }, { \"step\": 660, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 13.916412195872256, \"RMSE\": 22.080274918425697, \"R2\": -138.68241285181185, \"Memory in Mb\": 3.0712575912475586, \"Time in s\": 8.136981 }, { \"step\": 680, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 13.515604789075644, \"RMSE\": 21.753254558457893, \"R2\": -136.71797028279042, \"Memory in Mb\": 3.074782371520996, \"Time in s\": 8.796442 }, { \"step\": 700, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 13.16391092204058, \"RMSE\": 21.44141764506316, \"R2\": -136.3120101768532, \"Memory in Mb\": 3.0773000717163086, \"Time in s\": 9.490636 }, { \"step\": 720, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 12.828283113852926, \"RMSE\": 21.142484202016185, \"R2\": -135.44313416922282, \"Memory in Mb\": 3.078558921813965, \"Time in s\": 10.219341 }, { \"step\": 740, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 12.50446646701278, \"RMSE\": 20.855361315179096, \"R2\": -131.6825380828392, \"Memory in Mb\": 3.0800695419311523, \"Time in s\": 10.982324 }, { \"step\": 760, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 12.187542748969031, \"RMSE\": 20.57929219886472, \"R2\": -129.592708960364, \"Memory in Mb\": 3.0813283920288086, \"Time in s\": 11.779656 }, { \"step\": 780, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 11.899403743710543, \"RMSE\": 20.31464229706916, \"R2\": -126.82553676745258, \"Memory in Mb\": 3.08359432220459, \"Time in s\": 12.611571 }, { \"step\": 800, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 11.634366305883283, \"RMSE\": 20.06137952581079, \"R2\": -124.7856004590591, \"Memory in Mb\": 3.084601402282715, \"Time in s\": 13.493909000000002 }, { \"step\": 820, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 11.363415331478278, \"RMSE\": 19.815492221289517, \"R2\": -123.0687724200615, \"Memory in Mb\": 3.08560848236084, \"Time in s\": 14.412321000000002 }, { \"step\": 840, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 11.106640469158773, \"RMSE\": 19.57848368678801, \"R2\": -121.2430978899656, \"Memory in Mb\": 3.086615562438965, \"Time in s\": 15.363464000000002 }, { \"step\": 860, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 10.873909665943762, \"RMSE\": 19.35022618912736, \"R2\": -118.20364312373844, \"Memory in Mb\": 3.087119102478028, \"Time in s\": 16.347229000000002 }, { \"step\": 880, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 10.65545006969969, \"RMSE\": 19.130035299019603, \"R2\": -114.92727947355436, \"Memory in Mb\": 3.0873708724975586, \"Time in s\": 17.36361 }, { \"step\": 900, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 10.439309697188907, \"RMSE\": 18.916827199314994, \"R2\": -112.83532852765144, \"Memory in Mb\": 3.08762264251709, \"Time in s\": 18.412326 }, { \"step\": 920, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 10.21789524284777, \"RMSE\": 18.710158789526105, \"R2\": -112.19133803320568, \"Memory in Mb\": 3.087874412536621, \"Time in s\": 19.493438 }, { \"step\": 940, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 10.012578535125469, \"RMSE\": 18.510293787577226, \"R2\": -110.72583714230213, \"Memory in Mb\": 3.077906608581543, \"Time in s\": 20.736809 }, { \"step\": 960, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 9.811853150109153, \"RMSE\": 18.316579311485903, \"R2\": -109.54344305213982, \"Memory in Mb\": 3.0804243087768555, \"Time in s\": 22.013587 }, { \"step\": 980, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 9.61909067795052, \"RMSE\": 18.12881604876013, \"R2\": -109.39183420714345, \"Memory in Mb\": 3.080927848815918, \"Time in s\": 23.322572 }, { \"step\": 1000, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 9.438738635632271, \"RMSE\": 17.946847607318464, \"R2\": -109.00797869183796, \"Memory in Mb\": 3.082438468933105, \"Time in s\": 24.663779 }, { \"step\": 11, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ChickWeights\", \"MAE\": 7.837563210503649, \"RMSE\": 16.830121687224917, \"R2\": -363.61289911513376, \"Memory in Mb\": 0.1506233215332031, \"Time in s\": 0.018991 }, { \"step\": 22, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ChickWeights\", \"MAE\": 4.3557641651310055, \"RMSE\": 11.925612892987612, \"R2\": -149.62275175212707, \"Memory in Mb\": 0.1761512756347656, \"Time in s\": 0.051966 }, { \"step\": 33, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ChickWeights\", \"MAE\": 3.371112580593177, \"RMSE\": 9.780386843070694, \"R2\": -65.43453306461763, \"Memory in Mb\": 0.2142868041992187, \"Time in s\": 0.1028409999999999 }, { \"step\": 44, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ChickWeights\", \"MAE\": 2.695097509519297, \"RMSE\": 8.482165721989492, \"R2\": -55.64483692929184, \"Memory in Mb\": 0.2312774658203125, \"Time in s\": 0.1714339999999999 }, { \"step\": 55, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ChickWeights\", \"MAE\": 2.750371500828058, \"RMSE\": 7.825470627419848, \"R2\": -10.954370249441634, \"Memory in Mb\": 0.2869682312011719, \"Time in s\": 0.2559499999999999 }, { \"step\": 66, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ChickWeights\", \"MAE\": 2.874973614360605, \"RMSE\": 7.312672972792191, \"R2\": -3.4999113348114523, \"Memory in Mb\": 0.3332901000976562, \"Time in s\": 0.3603679999999999 }, { \"step\": 77, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ChickWeights\", \"MAE\": 3.049190601733834, \"RMSE\": 7.064366487423796, \"R2\": -1.814660484448317, \"Memory in Mb\": 0.3119354248046875, \"Time in s\": 0.4921819999999999 }, { \"step\": 88, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ChickWeights\", \"MAE\": 2.9760015514160614, \"RMSE\": 6.690266116634344, \"R2\": -1.2888345310585096, \"Memory in Mb\": 0.3463325500488281, \"Time in s\": 0.6436869999999999 }, { \"step\": 99, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ChickWeights\", \"MAE\": 3.4458208345296213, \"RMSE\": 6.801756467406213, \"R2\": -0.9786716534372656, \"Memory in Mb\": 0.3914375305175781, \"Time in s\": 0.8148389999999999 }, { \"step\": 110, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ChickWeights\", \"MAE\": 3.8036130411377513, \"RMSE\": 6.901055458118455, \"R2\": -0.4223797783359673, \"Memory in Mb\": 0.4219093322753906, \"Time in s\": 1.008913 }, { \"step\": 121, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ChickWeights\", \"MAE\": 4.044286594485068, \"RMSE\": 6.961321070951229, \"R2\": -0.1146764842737158, \"Memory in Mb\": 0.4422340393066406, \"Time in s\": 1.227422 }, { \"step\": 132, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ChickWeights\", \"MAE\": 4.278051046290026, \"RMSE\": 6.992189163715883, \"R2\": 0.0808809347253165, \"Memory in Mb\": 0.4695549011230469, \"Time in s\": 1.467051 }, { \"step\": 143, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ChickWeights\", \"MAE\": 4.514698307868387, \"RMSE\": 7.09845673012605, \"R2\": 0.232743181062298, \"Memory in Mb\": 0.5039863586425781, \"Time in s\": 1.72858 }, { \"step\": 154, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ChickWeights\", \"MAE\": 4.711682816733759, \"RMSE\": 7.225211881769595, \"R2\": 0.3244420464758215, \"Memory in Mb\": 0.5465545654296875, \"Time in s\": 2.00923 }, { \"step\": 165, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ChickWeights\", \"MAE\": 5.098965674365188, \"RMSE\": 7.74448667501397, \"R2\": 0.3760752970367085, \"Memory in Mb\": 0.5543212890625, \"Time in s\": 2.317031 }, { \"step\": 176, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ChickWeights\", \"MAE\": 5.613573109580669, \"RMSE\": 8.405070008975855, \"R2\": 0.4074460804745814, \"Memory in Mb\": 0.5778732299804688, \"Time in s\": 2.650775 }, { \"step\": 187, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ChickWeights\", \"MAE\": 6.0300184245211925, \"RMSE\": 8.816070592615688, \"R2\": 0.4662285490406689, \"Memory in Mb\": 0.597381591796875, \"Time in s\": 3.008386 }, { \"step\": 198, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ChickWeights\", \"MAE\": 6.135166503700917, \"RMSE\": 8.873071763991604, \"R2\": 0.5699028278126741, \"Memory in Mb\": 0.6156463623046875, \"Time in s\": 3.389767 }, { \"step\": 209, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ChickWeights\", \"MAE\": 6.663870316688535, \"RMSE\": 9.680163486851024, \"R2\": 0.5780063435312484, \"Memory in Mb\": 0.6372909545898438, \"Time in s\": 3.797698 }, { \"step\": 220, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ChickWeights\", \"MAE\": 7.114976257039628, \"RMSE\": 10.70304519196575, \"R2\": 0.548340525531499, \"Memory in Mb\": 0.6516532897949219, \"Time in s\": 4.233731 }, { \"step\": 231, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ChickWeights\", \"MAE\": 7.558935022860611, \"RMSE\": 11.205896813348822, \"R2\": 0.5765126454701623, \"Memory in Mb\": 0.6456108093261719, \"Time in s\": 4.696369 }, { \"step\": 242, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ChickWeights\", \"MAE\": 7.8784966144348445, \"RMSE\": 11.546829616877767, \"R2\": 0.6381907286214681, \"Memory in Mb\": 0.6543197631835938, \"Time in s\": 5.184542 }, { \"step\": 253, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ChickWeights\", \"MAE\": 8.045465222989945, \"RMSE\": 11.7730800696534, \"R2\": 0.6733287963504477, \"Memory in Mb\": 0.6542396545410156, \"Time in s\": 5.698149999999999 }, { \"step\": 264, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ChickWeights\", \"MAE\": 8.55861765945315, \"RMSE\": 12.698578392308455, \"R2\": 0.6527621165047097, \"Memory in Mb\": 0.7007179260253906, \"Time in s\": 6.239901999999999 }, { \"step\": 275, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ChickWeights\", \"MAE\": 9.012932654808608, \"RMSE\": 13.883768164483952, \"R2\": 0.6347528903285174, \"Memory in Mb\": 0.7182159423828125, \"Time in s\": 6.810226999999999 }, { \"step\": 286, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ChickWeights\", \"MAE\": 9.46045520422096, \"RMSE\": 14.421722772543973, \"R2\": 0.6586625054492083, \"Memory in Mb\": 0.7397651672363281, \"Time in s\": 7.406784999999999 }, { \"step\": 297, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ChickWeights\", \"MAE\": 9.467084537445258, \"RMSE\": 14.393153360282469, \"R2\": 0.7051330126585751, \"Memory in Mb\": 0.6962127685546875, \"Time in s\": 8.032812 }, { \"step\": 308, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ChickWeights\", \"MAE\": 9.993822911601686, \"RMSE\": 15.306673373378164, \"R2\": 0.7029922374440833, \"Memory in Mb\": 0.712249755859375, \"Time in s\": 8.684709 }, { \"step\": 319, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ChickWeights\", \"MAE\": 10.906399564516825, \"RMSE\": 17.709412566907307, \"R2\": 0.6392184800849823, \"Memory in Mb\": 0.7230567932128906, \"Time in s\": 9.367603 }, { \"step\": 330, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ChickWeights\", \"MAE\": 11.354718196032138, \"RMSE\": 18.25739128586918, \"R2\": 0.6718473555535345, \"Memory in Mb\": 0.748199462890625, \"Time in s\": 10.07931 }, { \"step\": 341, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ChickWeights\", \"MAE\": 11.709200775641314, \"RMSE\": 18.709755653596343, \"R2\": 0.691413317744465, \"Memory in Mb\": 0.7508468627929688, \"Time in s\": 10.818764 }, { \"step\": 352, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ChickWeights\", \"MAE\": 12.007404346564588, \"RMSE\": 19.02446579939404, \"R2\": 0.7099648583835785, \"Memory in Mb\": 0.7794418334960938, \"Time in s\": 11.583692 }, { \"step\": 363, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ChickWeights\", \"MAE\": 12.612646669962947, \"RMSE\": 20.078004824306745, \"R2\": 0.6863045708062432, \"Memory in Mb\": 0.8219375610351562, \"Time in s\": 12.374611 }, { \"step\": 374, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ChickWeights\", \"MAE\": 13.305874945245664, \"RMSE\": 21.69198202373745, \"R2\": 0.6634013148524545, \"Memory in Mb\": 0.8368568420410156, \"Time in s\": 13.19172 }, { \"step\": 385, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ChickWeights\", \"MAE\": 13.874152914620147, \"RMSE\": 22.398641632517744, \"R2\": 0.6824761966895349, \"Memory in Mb\": 0.8399162292480469, \"Time in s\": 14.039444 }, { \"step\": 396, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ChickWeights\", \"MAE\": 14.02164789427361, \"RMSE\": 22.45496751107478, \"R2\": 0.7024524705388633, \"Memory in Mb\": 0.8451423645019531, \"Time in s\": 14.916422 }, { \"step\": 407, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ChickWeights\", \"MAE\": 14.881080524519056, \"RMSE\": 23.990711305579165, \"R2\": 0.684297135752792, \"Memory in Mb\": 0.844085693359375, \"Time in s\": 15.821424 }, { \"step\": 418, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ChickWeights\", \"MAE\": 15.672731015633325, \"RMSE\": 25.84901428202966, \"R2\": 0.6559576619146927, \"Memory in Mb\": 0.8621559143066406, \"Time in s\": 16.75642 }, { \"step\": 429, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ChickWeights\", \"MAE\": 16.52730145447373, \"RMSE\": 27.03527355015745, \"R2\": 0.6655701177301542, \"Memory in Mb\": 0.8826904296875, \"Time in s\": 17.718062999999997 }, { \"step\": 440, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ChickWeights\", \"MAE\": 16.835543678126854, \"RMSE\": 27.29532071757395, \"R2\": 0.6832339396752392, \"Memory in Mb\": 0.8854255676269531, \"Time in s\": 18.709096 }, { \"step\": 451, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ChickWeights\", \"MAE\": 17.2847401584601, \"RMSE\": 27.83347178305896, \"R2\": 0.6846223453976918, \"Memory in Mb\": 0.9156723022460938, \"Time in s\": 19.725565 }, { \"step\": 462, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ChickWeights\", \"MAE\": 18.263556988874104, \"RMSE\": 29.983264015814683, \"R2\": 0.6562480279042127, \"Memory in Mb\": 0.9476966857910156, \"Time in s\": 20.769031 }, { \"step\": 473, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ChickWeights\", \"MAE\": 19.168820167599023, \"RMSE\": 31.233753579350584, \"R2\": 0.6706197338145063, \"Memory in Mb\": 0.9631462097167968, \"Time in s\": 21.841163 }, { \"step\": 484, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ChickWeights\", \"MAE\": 19.66691155441429, \"RMSE\": 31.66292403296529, \"R2\": 0.6800549958221269, \"Memory in Mb\": 0.9797592163085938, \"Time in s\": 22.942597 }, { \"step\": 495, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ChickWeights\", \"MAE\": 20.1980005296915, \"RMSE\": 32.326142080110245, \"R2\": 0.6862897411402615, \"Memory in Mb\": 1.003559112548828, \"Time in s\": 24.073219 }, { \"step\": 506, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ChickWeights\", \"MAE\": 21.038101965066165, \"RMSE\": 33.861783151779306, \"R2\": 0.6657814123530009, \"Memory in Mb\": 1.0402488708496094, \"Time in s\": 25.232436 }, { \"step\": 517, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ChickWeights\", \"MAE\": 21.79336863548733, \"RMSE\": 34.905834748448235, \"R2\": 0.6648549707242759, \"Memory in Mb\": 1.059162139892578, \"Time in s\": 26.421057 }, { \"step\": 528, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ChickWeights\", \"MAE\": 22.71415913579816, \"RMSE\": 36.0778798798642, \"R2\": 0.6781016272298894, \"Memory in Mb\": 1.080280303955078, \"Time in s\": 27.640294 }, { \"step\": 539, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ChickWeights\", \"MAE\": 22.973012146363907, \"RMSE\": 36.13494365478664, \"R2\": 0.690416970144236, \"Memory in Mb\": 1.1105842590332031, \"Time in s\": 28.88878 }, { \"step\": 550, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ChickWeights\", \"MAE\": 23.822482702164027, \"RMSE\": 37.524990725820025, \"R2\": 0.6764299185798421, \"Memory in Mb\": 1.15643310546875, \"Time in s\": 30.16877 }, { \"step\": 561, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ChickWeights\", \"MAE\": 24.754282528765707, \"RMSE\": 38.80763200620001, \"R2\": 0.669066082535116, \"Memory in Mb\": 1.1719589233398438, \"Time in s\": 31.481567 }, { \"step\": 572, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ChickWeights\", \"MAE\": 25.964764492717645, \"RMSE\": 40.6034258569803, \"R2\": 0.6648997528039327, \"Memory in Mb\": 1.186126708984375, \"Time in s\": 32.828648 }, { \"step\": 20, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"TrumpApproval\", \"MAE\": 4.656631863584941, \"RMSE\": 13.301513571178564, \"R2\": -414.0080590272913, \"Memory in Mb\": 0.20159912109375, \"Time in s\": 0.060655 }, { \"step\": 40, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"TrumpApproval\", \"MAE\": 3.3092233454389866, \"RMSE\": 9.514642226769206, \"R2\": -35.395716979334736, \"Memory in Mb\": 0.2895278930664062, \"Time in s\": 0.186458 }, { \"step\": 60, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.370245548222252, \"RMSE\": 7.779742234417967, \"R2\": -31.80504797847069, \"Memory in Mb\": 0.3228263854980469, \"Time in s\": 0.354007 }, { \"step\": 80, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.0011825545667588, \"RMSE\": 6.767036502792006, \"R2\": -23.897224712465302, \"Memory in Mb\": 0.3692893981933594, \"Time in s\": 0.55542 }, { \"step\": 100, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.056871489932448, \"RMSE\": 6.139015712379544, \"R2\": -11.856454989718417, \"Memory in Mb\": 0.4076881408691406, \"Time in s\": 0.79515 }, { \"step\": 120, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.003092266983848, \"RMSE\": 5.651046465030358, \"R2\": -8.241693395555247, \"Memory in Mb\": 0.4241790771484375, \"Time in s\": 1.072178 }, { \"step\": 140, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.8854667406427696, \"RMSE\": 5.2532308662388045, \"R2\": -7.252888139218157, \"Memory in Mb\": 0.4439773559570312, \"Time in s\": 1.385482 }, { \"step\": 160, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.9340845560700957, \"RMSE\": 4.984838031470717, \"R2\": -5.553334350331147, \"Memory in Mb\": 0.4557876586914062, \"Time in s\": 1.738613 }, { \"step\": 180, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.9596878280460808, \"RMSE\": 4.757627458420989, \"R2\": -4.242801539132487, \"Memory in Mb\": 0.4726028442382812, \"Time in s\": 2.128929 }, { \"step\": 200, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.9000266951648128, \"RMSE\": 4.53846274922039, \"R2\": -3.785084131964374, \"Memory in Mb\": 0.5018348693847656, \"Time in s\": 2.555084 }, { \"step\": 220, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.7756062902244436, \"RMSE\": 4.331882031903988, \"R2\": -3.702506681895046, \"Memory in Mb\": 0.5390548706054688, \"Time in s\": 3.014926 }, { \"step\": 240, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.722064335127714, \"RMSE\": 4.161337926832492, \"R2\": -3.3721758647478204, \"Memory in Mb\": 0.5583267211914062, \"Time in s\": 3.510014 }, { \"step\": 260, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.668741133448935, \"RMSE\": 4.009095884933904, \"R2\": -3.112800247055813, \"Memory in Mb\": 0.5867843627929688, \"Time in s\": 4.039729 }, { \"step\": 280, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.596023168515899, \"RMSE\": 3.86837254754146, \"R2\": -3.0090634555073343, \"Memory in Mb\": 0.6034698486328125, \"Time in s\": 4.6068750000000005 }, { \"step\": 300, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.5862781822088614, \"RMSE\": 3.757725401796155, \"R2\": -2.72037165832895, \"Memory in Mb\": 0.6344451904296875, \"Time in s\": 5.213001 }, { \"step\": 320, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.5269819355711358, \"RMSE\": 3.644015209511257, \"R2\": -2.67185104068617, \"Memory in Mb\": 0.6524238586425781, \"Time in s\": 5.854158 }, { \"step\": 340, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.4570659608871932, \"RMSE\": 3.537119081966105, \"R2\": -2.6559352799702967, \"Memory in Mb\": 0.6771697998046875, \"Time in s\": 6.534176 }, { \"step\": 360, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.4064005847783465, \"RMSE\": 3.441784227878547, \"R2\": -2.4899419787597266, \"Memory in Mb\": 0.7120590209960938, \"Time in s\": 7.253771 }, { \"step\": 380, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.353481677960258, \"RMSE\": 3.352627624678765, \"R2\": -2.426029812278527, \"Memory in Mb\": 0.7612266540527344, \"Time in s\": 8.012458 }, { \"step\": 400, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.3254238940946812, \"RMSE\": 3.27422063813923, \"R2\": -2.360008106665105, \"Memory in Mb\": 0.7831077575683594, \"Time in s\": 8.809453000000001 }, { \"step\": 420, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.2822507927277378, \"RMSE\": 3.197239276537114, \"R2\": -2.309899038967682, \"Memory in Mb\": 0.8153495788574219, \"Time in s\": 9.645719 }, { \"step\": 440, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.2469389910195894, \"RMSE\": 3.127027882921648, \"R2\": -2.108834809107039, \"Memory in Mb\": 0.8549957275390625, \"Time in s\": 10.520540000000002 }, { \"step\": 460, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.209007072666545, \"RMSE\": 3.060196802253249, \"R2\": -1.8234356170996369, \"Memory in Mb\": 0.8641319274902344, \"Time in s\": 11.437965000000002 }, { \"step\": 480, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.173565091899577, \"RMSE\": 2.996986036098964, \"R2\": -1.6456994146583188, \"Memory in Mb\": 0.9048995971679688, \"Time in s\": 12.395296000000002 }, { \"step\": 500, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.1545102154576106, \"RMSE\": 2.9412312685847275, \"R2\": -1.494716293761022, \"Memory in Mb\": 0.9429206848144532, \"Time in s\": 13.397268000000002 }, { \"step\": 520, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.1235341152888954, \"RMSE\": 2.885679937630878, \"R2\": -1.4055626483775598, \"Memory in Mb\": 0.9187583923339844, \"Time in s\": 14.455292000000002 }, { \"step\": 540, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.0931325909773704, \"RMSE\": 2.8325152336083046, \"R2\": -1.3092471918321815, \"Memory in Mb\": 0.9785118103027344, \"Time in s\": 15.562633000000002 }, { \"step\": 560, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.080423433010604, \"RMSE\": 2.7861157835692825, \"R2\": -1.2874470666919131, \"Memory in Mb\": 0.8416099548339844, \"Time in s\": 16.721025 }, { \"step\": 580, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.0538460644054608, \"RMSE\": 2.73873459117484, \"R2\": -1.2649736085810763, \"Memory in Mb\": 0.9269638061523438, \"Time in s\": 17.931443 }, { \"step\": 600, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.037762850657022, \"RMSE\": 2.6954140775252133, \"R2\": -1.1696179031883165, \"Memory in Mb\": 1.0152244567871094, \"Time in s\": 19.191903 }, { \"step\": 620, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.022747434499167, \"RMSE\": 2.654976815796094, \"R2\": -1.0729218181826925, \"Memory in Mb\": 0.968769073486328, \"Time in s\": 20.501534 }, { \"step\": 640, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.006861222675929, \"RMSE\": 2.615186194972241, \"R2\": -0.9736586966592728, \"Memory in Mb\": 0.803070068359375, \"Time in s\": 21.860299 }, { \"step\": 660, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.9869414277387362, \"RMSE\": 2.5762284524749504, \"R2\": -0.9015227260176624, \"Memory in Mb\": 0.7759437561035156, \"Time in s\": 23.260671 }, { \"step\": 680, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.966244541930286, \"RMSE\": 2.538677410892474, \"R2\": -0.8756731719459285, \"Memory in Mb\": 0.8428535461425781, \"Time in s\": 24.702724 }, { \"step\": 700, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.9583039577617944, \"RMSE\": 2.5060806369029174, \"R2\": -0.8758219540617476, \"Memory in Mb\": 0.9465827941894532, \"Time in s\": 26.183847 }, { \"step\": 720, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.9436352676783604, \"RMSE\": 2.472672874668102, \"R2\": -0.8662636043278715, \"Memory in Mb\": 1.0379486083984375, \"Time in s\": 27.712492 }, { \"step\": 740, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.9279533445219876, \"RMSE\": 2.44025252932111, \"R2\": -0.816552178803118, \"Memory in Mb\": 1.1120071411132812, \"Time in s\": 29.286906 }, { \"step\": 760, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.9160723193407864, \"RMSE\": 2.4102496967776017, \"R2\": -0.7913569678512289, \"Memory in Mb\": 1.17376708984375, \"Time in s\": 30.91286 }, { \"step\": 780, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.8969957702252379, \"RMSE\": 2.379401465215729, \"R2\": -0.753616871824967, \"Memory in Mb\": 1.2616004943847656, \"Time in s\": 32.587144 }, { \"step\": 800, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.8879951793456313, \"RMSE\": 2.351379467833174, \"R2\": -0.7280439441517783, \"Memory in Mb\": 1.355243682861328, \"Time in s\": 34.314383 }, { \"step\": 820, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.8778121160020841, \"RMSE\": 2.323761508818731, \"R2\": -0.7062232791684944, \"Memory in Mb\": 1.4321250915527344, \"Time in s\": 36.09551 }, { \"step\": 840, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.8674818777939237, \"RMSE\": 2.297554956673723, \"R2\": -0.683441607427212, \"Memory in Mb\": 1.4874954223632812, \"Time in s\": 37.948423 }, { \"step\": 860, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.8596582290799093, \"RMSE\": 2.2724190258224723, \"R2\": -0.6439714465644337, \"Memory in Mb\": 1.5595359802246094, \"Time in s\": 39.857894 }, { \"step\": 880, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.8554226716886886, \"RMSE\": 2.248810009475745, \"R2\": -0.601989379910626, \"Memory in Mb\": 1.6191024780273438, \"Time in s\": 41.823765 }, { \"step\": 900, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.8440517106498798, \"RMSE\": 2.2244180426447486, \"R2\": -0.5740310312005918, \"Memory in Mb\": 1.1261253356933594, \"Time in s\": 43.8642 }, { \"step\": 920, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.8347159483348575, \"RMSE\": 2.201058546919984, \"R2\": -0.5664676720057789, \"Memory in Mb\": 1.1678504943847656, \"Time in s\": 45.946931000000006 }, { \"step\": 940, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.8262269751007542, \"RMSE\": 2.1790511985851078, \"R2\": -0.5483240613297584, \"Memory in Mb\": 1.119964599609375, \"Time in s\": 48.07255500000001 }, { \"step\": 960, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.8159800926650994, \"RMSE\": 2.1571455676024542, \"R2\": -0.5332153251602936, \"Memory in Mb\": 1.1733894348144531, \"Time in s\": 50.238479000000005 }, { \"step\": 980, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.80675324923832, \"RMSE\": 2.1360469188887925, \"R2\": -0.5325676025464574, \"Memory in Mb\": 1.2283477783203125, \"Time in s\": 52.4464 }, { \"step\": 1000, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.801132918984696, \"RMSE\": 2.116027645482028, \"R2\": -0.5292923269414216, \"Memory in Mb\": 1.2836189270019531, \"Time in s\": 54.694244000000005 }, { \"step\": 11, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"ChickWeights\", \"MAE\": 4.664574314574316, \"RMSE\": 12.7079745317607, \"R2\": -206.87879383707747, \"Memory in Mb\": 0.0196142196655273, \"Time in s\": 0.002799 }, { \"step\": 22, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"ChickWeights\", \"MAE\": 2.767694704637076, \"RMSE\": 9.018587183866767, \"R2\": -85.14025986830408, \"Memory in Mb\": 0.0211782455444335, \"Time in s\": 0.009348 }, { \"step\": 33, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"ChickWeights\", \"MAE\": 2.3093367298127023, \"RMSE\": 7.420500566500976, \"R2\": -37.24267181629702, \"Memory in Mb\": 0.0263471603393554, \"Time in s\": 0.018276 }, { \"step\": 44, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"ChickWeights\", \"MAE\": 1.892363968348808, \"RMSE\": 6.441521936619904, \"R2\": -31.668094594906044, \"Memory in Mb\": 0.0274343490600585, \"Time in s\": 0.0297909999999999 }, { \"step\": 55, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"ChickWeights\", \"MAE\": 2.1129412159858934, \"RMSE\": 6.114058653243701, \"R2\": -6.297346571779499, \"Memory in Mb\": 0.0340337753295898, \"Time in s\": 0.044022 }, { \"step\": 66, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"ChickWeights\", \"MAE\": 2.832849782567835, \"RMSE\": 6.236602142425367, \"R2\": -2.2730130120415795, \"Memory in Mb\": 0.043257713317871, \"Time in s\": 0.061336 }, { \"step\": 77, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"ChickWeights\", \"MAE\": 3.4069290990236856, \"RMSE\": 6.402381882180361, \"R2\": -1.3118663438824, \"Memory in Mb\": 0.0494871139526367, \"Time in s\": 0.082289 }, { \"step\": 88, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"ChickWeights\", \"MAE\": 3.650377971160808, \"RMSE\": 6.321189272940957, \"R2\": -1.043267371916866, \"Memory in Mb\": 0.0551328659057617, \"Time in s\": 0.107232 }, { \"step\": 99, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"ChickWeights\", \"MAE\": 4.035631404360372, \"RMSE\": 6.4483291916176695, \"R2\": -0.7783857772357967, \"Memory in Mb\": 0.0562467575073242, \"Time in s\": 0.136317 }, { \"step\": 110, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"ChickWeights\", \"MAE\": 4.693189868957898, \"RMSE\": 7.0697740144659305, \"R2\": -0.4927792786841307, \"Memory in Mb\": 0.0576238632202148, \"Time in s\": 0.169599 }, { \"step\": 121, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"ChickWeights\", \"MAE\": 5.274396860168236, \"RMSE\": 7.6542276724395, \"R2\": -0.3476225254437259, \"Memory in Mb\": 0.0577573776245117, \"Time in s\": 0.206842 }, { \"step\": 132, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"ChickWeights\", \"MAE\": 5.247611065864998, \"RMSE\": 7.56430675955835, \"R2\": -0.0756815066101803, \"Memory in Mb\": 0.0578107833862304, \"Time in s\": 0.2481519999999999 }, { \"step\": 143, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"ChickWeights\", \"MAE\": 5.084413296044263, \"RMSE\": 7.343803904848652, \"R2\": 0.1787885014844915, \"Memory in Mb\": 0.058394432067871, \"Time in s\": 0.2937619999999999 }, { \"step\": 154, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"ChickWeights\", \"MAE\": 4.973008915037768, \"RMSE\": 7.173430375731751, \"R2\": 0.3340904988080935, \"Memory in Mb\": 0.0584478378295898, \"Time in s\": 0.343499 }, { \"step\": 165, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"ChickWeights\", \"MAE\": 5.201973475639973, \"RMSE\": 7.389818367745889, \"R2\": 0.4319135436678196, \"Memory in Mb\": 0.0584478378295898, \"Time in s\": 0.397279 }, { \"step\": 176, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"ChickWeights\", \"MAE\": 5.377897753885034, \"RMSE\": 7.538080975572278, \"R2\": 0.5233859928595415, \"Memory in Mb\": 0.0590581893920898, \"Time in s\": 0.455134 }, { \"step\": 187, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"ChickWeights\", \"MAE\": 5.414777515271245, \"RMSE\": 7.541781669769663, \"R2\": 0.6093812059493195, \"Memory in Mb\": 0.0591115951538085, \"Time in s\": 0.517245 }, { \"step\": 198, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"ChickWeights\", \"MAE\": 5.40059238519783, \"RMSE\": 7.511878220104288, \"R2\": 0.6917410630009373, \"Memory in Mb\": 0.0590581893920898, \"Time in s\": 0.583523 }, { \"step\": 209, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"ChickWeights\", \"MAE\": 5.933708937482518, \"RMSE\": 9.717098931216649, \"R2\": 0.5747798982216288, \"Memory in Mb\": 0.0252752304077148, \"Time in s\": 0.662886 }, { \"step\": 220, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"ChickWeights\", \"MAE\": 6.498767742677896, \"RMSE\": 10.515698120348512, \"R2\": 0.5640139167754625, \"Memory in Mb\": 0.0314245223999023, \"Time in s\": 0.744596 }, { \"step\": 231, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"ChickWeights\", \"MAE\": 6.70504504336628, \"RMSE\": 10.67374680573752, \"R2\": 0.6157790851383267, \"Memory in Mb\": 0.0365400314331054, \"Time in s\": 0.828906 }, { \"step\": 242, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"ChickWeights\", \"MAE\": 7.118759598962231, \"RMSE\": 11.248237924166032, \"R2\": 0.6566609789141779, \"Memory in Mb\": 0.0410718917846679, \"Time in s\": 0.916125 }, { \"step\": 253, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"ChickWeights\", \"MAE\": 7.339750662382254, \"RMSE\": 11.39871112384624, \"R2\": 0.6937739359440155, \"Memory in Mb\": 0.0445966720581054, \"Time in s\": 1.006337 }, { \"step\": 264, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"ChickWeights\", \"MAE\": 7.866457552558359, \"RMSE\": 12.301057885719082, \"R2\": 0.6741619364384577, \"Memory in Mb\": 0.0447034835815429, \"Time in s\": 1.099545 }, { \"step\": 275, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"ChickWeights\", \"MAE\": 8.421243223038738, \"RMSE\": 13.285884557144795, \"R2\": 0.6655331879845522, \"Memory in Mb\": 0.0447034835815429, \"Time in s\": 1.19574 }, { \"step\": 286, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"ChickWeights\", \"MAE\": 8.956033363560122, \"RMSE\": 14.109625220244896, \"R2\": 0.6732762786849746, \"Memory in Mb\": 0.0452070236206054, \"Time in s\": 1.295357 }, { \"step\": 297, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"ChickWeights\", \"MAE\": 9.573413802719209, \"RMSE\": 14.887232530340055, \"R2\": 0.6845415314559343, \"Memory in Mb\": 0.0452337265014648, \"Time in s\": 1.398184 }, { \"step\": 308, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"ChickWeights\", \"MAE\": 10.140823162094344, \"RMSE\": 15.798657858475249, \"R2\": 0.6835926538539661, \"Memory in Mb\": 0.0452604293823242, \"Time in s\": 1.504001 }, { \"step\": 319, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"ChickWeights\", \"MAE\": 11.05646041165176, \"RMSE\": 17.826108509419473, \"R2\": 0.6344480842540857, \"Memory in Mb\": 0.0452604293823242, \"Time in s\": 1.6128270000000002 }, { \"step\": 330, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"ChickWeights\", \"MAE\": 11.706749123156325, \"RMSE\": 18.647901518188576, \"R2\": 0.6576594092850414, \"Memory in Mb\": 0.0452871322631835, \"Time in s\": 1.7247210000000002 }, { \"step\": 341, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"ChickWeights\", \"MAE\": 11.849547188265053, \"RMSE\": 18.683751607733637, \"R2\": 0.6922705096711406, \"Memory in Mb\": 0.0452871322631835, \"Time in s\": 1.839831 }, { \"step\": 352, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"ChickWeights\", \"MAE\": 11.96088648820193, \"RMSE\": 18.74329807265456, \"R2\": 0.7184745225672177, \"Memory in Mb\": 0.0452871322631835, \"Time in s\": 1.958081 }, { \"step\": 363, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"ChickWeights\", \"MAE\": 12.783089048199372, \"RMSE\": 19.95838853158221, \"R2\": 0.6900311672733117, \"Memory in Mb\": 0.0453138351440429, \"Time in s\": 2.0793790000000003 }, { \"step\": 374, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"ChickWeights\", \"MAE\": 13.27307991721093, \"RMSE\": 20.988857849066505, \"R2\": 0.6848686892374445, \"Memory in Mb\": 0.0453405380249023, \"Time in s\": 2.203704 }, { \"step\": 385, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"ChickWeights\", \"MAE\": 13.623649100869688, \"RMSE\": 21.545378780740656, \"R2\": 0.7062071700264252, \"Memory in Mb\": 0.0453405380249023, \"Time in s\": 2.331151 }, { \"step\": 396, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"ChickWeights\", \"MAE\": 13.714864044781413, \"RMSE\": 21.4916185882578, \"R2\": 0.7274352207736796, \"Memory in Mb\": 0.0453405380249023, \"Time in s\": 2.4617500000000003 }, { \"step\": 407, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"ChickWeights\", \"MAE\": 14.57407318940339, \"RMSE\": 22.90334645043852, \"R2\": 0.712266679293069, \"Memory in Mb\": 0.0456800460815429, \"Time in s\": 2.5991990000000005 }, { \"step\": 418, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"ChickWeights\", \"MAE\": 15.311297276648313, \"RMSE\": 24.25392212062312, \"R2\": 0.6971079497894322, \"Memory in Mb\": 0.0457334518432617, \"Time in s\": 2.7396560000000005 }, { \"step\": 429, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"ChickWeights\", \"MAE\": 15.833945440380871, \"RMSE\": 25.12811892959106, \"R2\": 0.7110893860103431, \"Memory in Mb\": 0.0457334518432617, \"Time in s\": 2.8832220000000004 }, { \"step\": 440, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"ChickWeights\", \"MAE\": 15.995632485589844, \"RMSE\": 25.20571130808328, \"R2\": 0.7298778762133054, \"Memory in Mb\": 0.0456533432006835, \"Time in s\": 3.0299210000000003 }, { \"step\": 451, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"ChickWeights\", \"MAE\": 16.482571154231422, \"RMSE\": 25.77399383544894, \"R2\": 0.7295670550023294, \"Memory in Mb\": 0.0461835861206054, \"Time in s\": 3.179584 }, { \"step\": 462, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"ChickWeights\", \"MAE\": 17.556958821758087, \"RMSE\": 27.82207110996992, \"R2\": 0.7040173234911381, \"Memory in Mb\": 0.0469274520874023, \"Time in s\": 3.3322830000000003 }, { \"step\": 473, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"ChickWeights\", \"MAE\": 18.31809908164516, \"RMSE\": 29.103026344234387, \"R2\": 0.7140266770057507, \"Memory in Mb\": 0.046980857849121, \"Time in s\": 3.488055 }, { \"step\": 484, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"ChickWeights\", \"MAE\": 18.645508344467764, \"RMSE\": 29.39095020592674, \"R2\": 0.7243229903014706, \"Memory in Mb\": 0.0469541549682617, \"Time in s\": 3.646969 }, { \"step\": 495, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"ChickWeights\", \"MAE\": 19.076944683969508, \"RMSE\": 29.900823849283483, \"R2\": 0.7315970559213162, \"Memory in Mb\": 0.0469007492065429, \"Time in s\": 3.808963 }, { \"step\": 506, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"ChickWeights\", \"MAE\": 19.9412049113122, \"RMSE\": 31.299098765867257, \"R2\": 0.7144549629170223, \"Memory in Mb\": 0.046980857849121, \"Time in s\": 3.97409 }, { \"step\": 517, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"ChickWeights\", \"MAE\": 20.652539482762663, \"RMSE\": 32.28122969713156, \"R2\": 0.7133599532452177, \"Memory in Mb\": 0.0470075607299804, \"Time in s\": 4.142383000000001 }, { \"step\": 528, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"ChickWeights\", \"MAE\": 21.437431132207102, \"RMSE\": 33.471760575953454, \"R2\": 0.7229272102715563, \"Memory in Mb\": 0.0470075607299804, \"Time in s\": 4.313758000000001 }, { \"step\": 539, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"ChickWeights\", \"MAE\": 21.589008276225865, \"RMSE\": 33.459905509370415, \"R2\": 0.7345566785536845, \"Memory in Mb\": 0.0470075607299804, \"Time in s\": 4.488246000000001 }, { \"step\": 550, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"ChickWeights\", \"MAE\": 22.551700866868885, \"RMSE\": 35.03737693702089, \"R2\": 0.717908278090669, \"Memory in Mb\": 0.0470342636108398, \"Time in s\": 4.665796000000001 }, { \"step\": 561, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"ChickWeights\", \"MAE\": 23.243872726229487, \"RMSE\": 35.949191367533466, \"R2\": 0.7160216409608307, \"Memory in Mb\": 0.0470075607299804, \"Time in s\": 4.846464000000001 }, { \"step\": 572, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"ChickWeights\", \"MAE\": 24.092513885911806, \"RMSE\": 37.13693189688246, \"R2\": 0.7196752558485364, \"Memory in Mb\": 0.0469541549682617, \"Time in s\": 5.030284000000001 }, { \"step\": 20, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.695184981652336, \"RMSE\": 9.807184976514188, \"R2\": -224.6021011118197, \"Memory in Mb\": 0.0538091659545898, \"Time in s\": 0.005953 }, { \"step\": 40, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.3994713447037435, \"RMSE\": 7.102066178895935, \"R2\": -19.27845129783118, \"Memory in Mb\": 0.0761518478393554, \"Time in s\": 0.016156 }, { \"step\": 60, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.8170744682035584, \"RMSE\": 5.815253847056423, \"R2\": -17.329373299766118, \"Memory in Mb\": 0.0883970260620117, \"Time in s\": 0.0302489999999999 }, { \"step\": 80, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.604995404573344, \"RMSE\": 5.081770494168446, \"R2\": -13.040545957103586, \"Memory in Mb\": 0.0980443954467773, \"Time in s\": 0.0484799999999999 }, { \"step\": 100, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.824259078948539, \"RMSE\": 4.70488333223354, \"R2\": -6.5512954222403845, \"Memory in Mb\": 0.1071348190307617, \"Time in s\": 0.071134 }, { \"step\": 120, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.918744608116588, \"RMSE\": 4.412336880489357, \"R2\": -4.634185300646759, \"Memory in Mb\": 0.1113233566284179, \"Time in s\": 0.098322 }, { \"step\": 140, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.8761207739327503, \"RMSE\": 4.13187920011476, \"R2\": -4.105616799680584, \"Memory in Mb\": 0.1133375167846679, \"Time in s\": 0.13009 }, { \"step\": 160, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.961232939518506, \"RMSE\": 3.976173487274506, \"R2\": -3.1695661963674864, \"Memory in Mb\": 0.1174459457397461, \"Time in s\": 0.166507 }, { \"step\": 180, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.066134597500757, \"RMSE\": 3.873731518767916, \"R2\": -2.4756944369169624, \"Memory in Mb\": 0.1194601058959961, \"Time in s\": 0.207686 }, { \"step\": 200, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.051125997923389, \"RMSE\": 3.731810291394655, \"R2\": -2.23527456693896, \"Memory in Mb\": 0.0176219940185546, \"Time in s\": 0.262943 }, { \"step\": 220, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.0738811328897206, \"RMSE\": 4.417664564856108, \"R2\": -3.890594467356201, \"Memory in Mb\": 0.0358037948608398, \"Time in s\": 0.32065 }, { \"step\": 240, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.9726100065438288, \"RMSE\": 4.237524240975239, \"R2\": -3.5337340888030546, \"Memory in Mb\": 0.0415029525756835, \"Time in s\": 0.38109 }, { \"step\": 260, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.8594315384151243, \"RMSE\": 4.074751007989252, \"R2\": -3.248610147038553, \"Memory in Mb\": 0.0488462448120117, \"Time in s\": 0.444412 }, { \"step\": 280, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.7773205119132678, \"RMSE\": 3.936654153117972, \"R2\": -3.1518424972300867, \"Memory in Mb\": 0.0637922286987304, \"Time in s\": 0.510957 }, { \"step\": 300, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.8265705896173516, \"RMSE\": 3.8591002097544127, \"R2\": -2.923813511442849, \"Memory in Mb\": 0.0735006332397461, \"Time in s\": 0.581026 }, { \"step\": 320, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.7437620649419607, \"RMSE\": 3.7394874649640353, \"R2\": -2.8667745903740336, \"Memory in Mb\": 0.0810804367065429, \"Time in s\": 0.6546050000000001 }, { \"step\": 340, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.7029951846067328, \"RMSE\": 3.640753753244776, \"R2\": -2.873305462857122, \"Memory in Mb\": 0.0861959457397461, \"Time in s\": 0.7321480000000001 }, { \"step\": 360, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.691125588823449, \"RMSE\": 3.557868621003357, \"R2\": -2.729329365262769, \"Memory in Mb\": 0.0937833786010742, \"Time in s\": 0.8137190000000001 }, { \"step\": 380, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.641476039217788, \"RMSE\": 3.4678199943963026, \"R2\": -2.665503107324644, \"Memory in Mb\": 0.0988988876342773, \"Time in s\": 0.8994270000000001 }, { \"step\": 400, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.6260112424669562, \"RMSE\": 3.3952504336469187, \"R2\": -2.613000890937967, \"Memory in Mb\": 0.1061162948608398, \"Time in s\": 0.989373 }, { \"step\": 420, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.6289201270983786, \"RMSE\": 3.3343146523246907, \"R2\": -2.599793842225358, \"Memory in Mb\": 0.1101179122924804, \"Time in s\": 1.08339 }, { \"step\": 440, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.667060123646852, \"RMSE\": 3.302206999442347, \"R2\": -2.466911261860751, \"Memory in Mb\": 0.1157636642456054, \"Time in s\": 1.1816330000000002 }, { \"step\": 460, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.69667104754334, \"RMSE\": 3.2720484626443, \"R2\": -2.2278892819413008, \"Memory in Mb\": 0.1238470077514648, \"Time in s\": 1.2846220000000002 }, { \"step\": 480, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.6506098779434175, \"RMSE\": 3.2067821053781245, \"R2\": -2.029074572324324, \"Memory in Mb\": 0.1315069198608398, \"Time in s\": 1.3924660000000002 }, { \"step\": 500, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.6365240614669594, \"RMSE\": 3.1603547309397078, \"R2\": -1.8802784791951508, \"Memory in Mb\": 0.0784368515014648, \"Time in s\": 1.505205 }, { \"step\": 520, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.6536721067944389, \"RMSE\": 3.126560253923372, \"R2\": -1.823930193625598, \"Memory in Mb\": 0.0835790634155273, \"Time in s\": 1.621512 }, { \"step\": 540, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.6698160029512246, \"RMSE\": 3.0946441969309766, \"R2\": -1.7564325082786318, \"Memory in Mb\": 0.0873861312866211, \"Time in s\": 1.745676 }, { \"step\": 560, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.6408841434358417, \"RMSE\": 3.046586581366264, \"R2\": -1.735141389893172, \"Memory in Mb\": 0.0885534286499023, \"Time in s\": 1.8735 }, { \"step\": 580, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.6127327645791831, \"RMSE\": 2.999611374258061, \"R2\": -1.7170225021123482, \"Memory in Mb\": 0.0890569686889648, \"Time in s\": 2.004994 }, { \"step\": 600, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.6269498006919805, \"RMSE\": 2.973082395326553, \"R2\": -1.6396488808638732, \"Memory in Mb\": 0.0909147262573242, \"Time in s\": 2.140229 }, { \"step\": 620, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.64112955570108, \"RMSE\": 2.949075530135499, \"R2\": -1.5576036781852802, \"Memory in Mb\": 0.0923452377319336, \"Time in s\": 2.27922 }, { \"step\": 640, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.6562657927450175, \"RMSE\": 2.9273758724267736, \"R2\": -1.4729982020585646, \"Memory in Mb\": 0.0944395065307617, \"Time in s\": 2.422009 }, { \"step\": 660, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.6610090165740414, \"RMSE\": 2.900076441293188, \"R2\": -1.409637238697782, \"Memory in Mb\": 0.0960302352905273, \"Time in s\": 2.568661 }, { \"step\": 680, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.640070345532056, \"RMSE\": 2.8623424740678667, \"R2\": -1.3844340745604549, \"Memory in Mb\": 0.0981245040893554, \"Time in s\": 2.7190639999999995 }, { \"step\": 700, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.6119603204138224, \"RMSE\": 2.8240252200668348, \"R2\": -1.381983091116742, \"Memory in Mb\": 0.1015691757202148, \"Time in s\": 2.8733429999999998 }, { \"step\": 720, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.589173412563986, \"RMSE\": 2.788316481605285, \"R2\": -1.3731423466582644, \"Memory in Mb\": 0.1027364730834961, \"Time in s\": 3.031541 }, { \"step\": 740, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.5872474989945902, \"RMSE\": 2.762320631839069, \"R2\": -1.3276973292362433, \"Memory in Mb\": 0.1038503646850586, \"Time in s\": 3.198810999999999 }, { \"step\": 760, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.573860293324891, \"RMSE\": 2.731605449949154, \"R2\": -1.3008801881813965, \"Memory in Mb\": 0.1038503646850586, \"Time in s\": 3.3700799999999997 }, { \"step\": 780, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.5672492734296428, \"RMSE\": 2.7047187411026274, \"R2\": -1.2659143323804294, \"Memory in Mb\": 0.1064214706420898, \"Time in s\": 3.5453349999999992 }, { \"step\": 800, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.5527653312522924, \"RMSE\": 2.676901954415756, \"R2\": -1.2396196471003753, \"Memory in Mb\": 0.1070318222045898, \"Time in s\": 3.7247 }, { \"step\": 820, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.5366430278321572, \"RMSE\": 2.648967131435787, \"R2\": -1.2172052322327516, \"Memory in Mb\": 0.1085958480834961, \"Time in s\": 3.908095 }, { \"step\": 840, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.5234128351461855, \"RMSE\": 2.622526466573217, \"R2\": -1.1933402061449063, \"Memory in Mb\": 0.1101598739624023, \"Time in s\": 4.095543999999999 }, { \"step\": 860, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.520997799745444, \"RMSE\": 2.601206078568585, \"R2\": -1.1541054380753062, \"Memory in Mb\": 0.1107702255249023, \"Time in s\": 4.287108999999999 }, { \"step\": 880, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.4988763963538276, \"RMSE\": 2.573388458013685, \"R2\": -1.0978034673380694, \"Memory in Mb\": 0.1112470626831054, \"Time in s\": 4.482816999999999 }, { \"step\": 900, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.4758663089418147, \"RMSE\": 2.54594713286987, \"R2\": -1.061955243276925, \"Memory in Mb\": 0.1116437911987304, \"Time in s\": 4.682644999999999 }, { \"step\": 920, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.462937696730923, \"RMSE\": 2.523979038782575, \"R2\": -1.059822201667401, \"Memory in Mb\": 0.1116704940795898, \"Time in s\": 4.886850999999999 }, { \"step\": 940, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.4561394845136584, \"RMSE\": 2.50519739840106, \"R2\": -1.0464959899330828, \"Memory in Mb\": 0.1127042770385742, \"Time in s\": 5.100838999999999 }, { \"step\": 960, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.4393535919618172, \"RMSE\": 2.483254475687026, \"R2\": -1.0318268701719249, \"Memory in Mb\": 0.1132078170776367, \"Time in s\": 5.318975999999998 }, { \"step\": 980, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.4209594300543067, \"RMSE\": 2.4596960058574417, \"R2\": -1.0321742156649796, \"Memory in Mb\": 0.1138181686401367, \"Time in s\": 5.541285999999999 }, { \"step\": 1000, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.4020445673510784, \"RMSE\": 2.4364355463770164, \"R2\": -1.027485181852556, \"Memory in Mb\": 0.1144285202026367, \"Time in s\": 5.767785999999998 }, { \"step\": 11, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ChickWeights\", \"MAE\": 4.674710287324511, \"RMSE\": 12.709622005759083, \"R2\": -206.93269654300337, \"Memory in Mb\": 0.1438665390014648, \"Time in s\": 0.043578 }, { \"step\": 22, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ChickWeights\", \"MAE\": 2.741934273684416, \"RMSE\": 9.017856101646904, \"R2\": -85.12629469646626, \"Memory in Mb\": 0.1680784225463867, \"Time in s\": 0.114891 }, { \"step\": 33, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ChickWeights\", \"MAE\": 2.321314094852809, \"RMSE\": 7.424021720293775, \"R2\": -37.27897402435965, \"Memory in Mb\": 0.2096052169799804, \"Time in s\": 0.217395 }, { \"step\": 44, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ChickWeights\", \"MAE\": 1.9425031371298072, \"RMSE\": 6.446443185481759, \"R2\": -31.71802976156788, \"Memory in Mb\": 0.2417478561401367, \"Time in s\": 0.352059 }, { \"step\": 55, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ChickWeights\", \"MAE\": 2.220127898780405, \"RMSE\": 6.120501061993398, \"R2\": -6.312733162160137, \"Memory in Mb\": 0.3060827255249023, \"Time in s\": 0.516621 }, { \"step\": 66, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ChickWeights\", \"MAE\": 2.329752126186388, \"RMSE\": 5.733717860182345, \"R2\": -1.7664593315707076, \"Memory in Mb\": 0.3567266464233398, \"Time in s\": 0.719974 }, { \"step\": 77, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ChickWeights\", \"MAE\": 2.702798931003846, \"RMSE\": 5.8295610878248265, \"R2\": -0.9166874006339528, \"Memory in Mb\": 0.3732900619506836, \"Time in s\": 0.96062 }, { \"step\": 88, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ChickWeights\", \"MAE\": 2.6099619817757915, \"RMSE\": 5.526618942218035, \"R2\": -0.5618763668879856, \"Memory in Mb\": 0.4128637313842773, \"Time in s\": 1.239929 }, { \"step\": 99, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ChickWeights\", \"MAE\": 2.746820956366501, \"RMSE\": 5.433915350818854, \"R2\": -0.2628661224764999, \"Memory in Mb\": 0.4623746871948242, \"Time in s\": 1.563224 }, { \"step\": 110, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ChickWeights\", \"MAE\": 2.8880448046427323, \"RMSE\": 5.393209741308231, \"R2\": 0.131281330475993, \"Memory in Mb\": 0.5318593978881836, \"Time in s\": 1.933351 }, { \"step\": 121, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ChickWeights\", \"MAE\": 3.102793618966865, \"RMSE\": 5.4667968998241765, \"R2\": 0.312565398150165, \"Memory in Mb\": 0.5604543685913086, \"Time in s\": 2.356966 }, { \"step\": 132, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ChickWeights\", \"MAE\": 3.301393875206229, \"RMSE\": 5.777687225912497, \"R2\": 0.3724425463943938, \"Memory in Mb\": 0.1946859359741211, \"Time in s\": 2.834547 }, { \"step\": 143, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ChickWeights\", \"MAE\": 3.2818761969220303, \"RMSE\": 5.651296511520541, \"R2\": 0.5136946280960023, \"Memory in Mb\": 0.2288389205932617, \"Time in s\": 3.343438 }, { \"step\": 154, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ChickWeights\", \"MAE\": 3.2515588511167266, \"RMSE\": 5.538190901897615, \"R2\": 0.6030852117170243, \"Memory in Mb\": 0.2577199935913086, \"Time in s\": 3.884102 }, { \"step\": 165, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ChickWeights\", \"MAE\": 3.5490619378471253, \"RMSE\": 5.901472315915889, \"R2\": 0.6377005660204649, \"Memory in Mb\": 0.2627325057983398, \"Time in s\": 4.460716 }, { \"step\": 176, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ChickWeights\", \"MAE\": 3.748099204740288, \"RMSE\": 6.114989482363781, \"R2\": 0.6863562530103127, \"Memory in Mb\": 0.2941198348999023, \"Time in s\": 5.073099 }, { \"step\": 187, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ChickWeights\", \"MAE\": 3.855321751115436, \"RMSE\": 6.172883744230479, \"R2\": 0.7383134393857906, \"Memory in Mb\": 0.3320951461791992, \"Time in s\": 5.716163 }, { \"step\": 198, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ChickWeights\", \"MAE\": 4.064611732985858, \"RMSE\": 6.634154210053631, \"R2\": 0.7595694090278171, \"Memory in Mb\": 0.2527418136596679, \"Time in s\": 6.400463 }, { \"step\": 209, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ChickWeights\", \"MAE\": 4.2854283677061895, \"RMSE\": 7.034301545827039, \"R2\": 0.7771654635449772, \"Memory in Mb\": 0.3202161788940429, \"Time in s\": 7.116785 }, { \"step\": 220, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ChickWeights\", \"MAE\": 4.87812017832838, \"RMSE\": 8.464168223538273, \"R2\": 0.7175347810295998, \"Memory in Mb\": 0.3510808944702148, \"Time in s\": 7.873189 }, { \"step\": 231, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ChickWeights\", \"MAE\": 5.139510271881338, \"RMSE\": 8.709209118107985, \"R2\": 0.7441975817879227, \"Memory in Mb\": 0.3200139999389648, \"Time in s\": 8.671972 }, { \"step\": 242, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ChickWeights\", \"MAE\": 5.512568989147682, \"RMSE\": 9.214318878962654, \"R2\": 0.7696009669773758, \"Memory in Mb\": 0.3710927963256836, \"Time in s\": 9.508629 }, { \"step\": 253, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ChickWeights\", \"MAE\": 5.61462831814645, \"RMSE\": 9.300369065456374, \"R2\": 0.7961404678287027, \"Memory in Mb\": 0.4192609786987304, \"Time in s\": 10.381303 }, { \"step\": 264, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ChickWeights\", \"MAE\": 6.307820522062941, \"RMSE\": 10.632794713133398, \"R2\": 0.7565488951511344, \"Memory in Mb\": 0.3416013717651367, \"Time in s\": 11.288641 }, { \"step\": 275, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ChickWeights\", \"MAE\": 6.788015101176612, \"RMSE\": 11.834244259749068, \"R2\": 0.7346291964126817, \"Memory in Mb\": 0.3569021224975586, \"Time in s\": 12.231102 }, { \"step\": 286, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ChickWeights\", \"MAE\": 7.083325494446596, \"RMSE\": 12.279756566760542, \"R2\": 0.7525262008661281, \"Memory in Mb\": 0.3965520858764648, \"Time in s\": 13.20812 }, { \"step\": 297, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ChickWeights\", \"MAE\": 7.191320030958258, \"RMSE\": 12.343414248948324, \"R2\": 0.7831373025283351, \"Memory in Mb\": 0.4258260726928711, \"Time in s\": 14.218151 }, { \"step\": 308, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ChickWeights\", \"MAE\": 7.797209174725968, \"RMSE\": 13.278742843330225, \"R2\": 0.7764780945249113, \"Memory in Mb\": 0.4501142501831054, \"Time in s\": 15.260696 }, { \"step\": 319, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ChickWeights\", \"MAE\": 8.817827198046418, \"RMSE\": 15.9696940597815, \"R2\": 0.7066209046531168, \"Memory in Mb\": 0.4718656539916992, \"Time in s\": 16.339578 }, { \"step\": 330, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ChickWeights\", \"MAE\": 9.357743221083844, \"RMSE\": 16.830542617885925, \"R2\": 0.7211345584533546, \"Memory in Mb\": 0.3236379623413086, \"Time in s\": 17.459623999999998 }, { \"step\": 341, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ChickWeights\", \"MAE\": 9.597227793355117, \"RMSE\": 16.978620563243194, \"R2\": 0.7458759585671038, \"Memory in Mb\": 0.3676939010620117, \"Time in s\": 18.612921 }, { \"step\": 352, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ChickWeights\", \"MAE\": 9.7373515968392, \"RMSE\": 17.046818432657442, \"R2\": 0.7671306392320572, \"Memory in Mb\": 0.3629522323608398, \"Time in s\": 19.795535999999995 }, { \"step\": 363, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ChickWeights\", \"MAE\": 10.474784553852622, \"RMSE\": 18.420082712462506, \"R2\": 0.7359718490826586, \"Memory in Mb\": 0.3571195602416992, \"Time in s\": 21.012742 }, { \"step\": 374, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ChickWeights\", \"MAE\": 11.136965297216438, \"RMSE\": 20.001694167276444, \"R2\": 0.7138145773791849, \"Memory in Mb\": 0.3998785018920898, \"Time in s\": 22.263945 }, { \"step\": 385, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ChickWeights\", \"MAE\": 11.663755213802826, \"RMSE\": 20.72310945995512, \"R2\": 0.7282041838356077, \"Memory in Mb\": 0.3288450241088867, \"Time in s\": 23.553922 }, { \"step\": 396, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ChickWeights\", \"MAE\": 11.794892571100942, \"RMSE\": 20.71536814309967, \"R2\": 0.7467690419180344, \"Memory in Mb\": 0.4024953842163086, \"Time in s\": 24.874665 }, { \"step\": 407, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ChickWeights\", \"MAE\": 12.7361976076631, \"RMSE\": 22.396840643020628, \"R2\": 0.7248523596518419, \"Memory in Mb\": 0.4475545883178711, \"Time in s\": 26.248019 }, { \"step\": 418, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ChickWeights\", \"MAE\": 13.669165628181592, \"RMSE\": 24.17119440887557, \"R2\": 0.6991706950778176, \"Memory in Mb\": 0.4968290328979492, \"Time in s\": 27.657996 }, { \"step\": 429, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ChickWeights\", \"MAE\": 14.21091951782044, \"RMSE\": 24.907688885339496, \"R2\": 0.7161359437094021, \"Memory in Mb\": 0.5376424789428711, \"Time in s\": 29.100111 }, { \"step\": 440, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ChickWeights\", \"MAE\": 14.453337266265764, \"RMSE\": 25.002813963201465, \"R2\": 0.7342091543116155, \"Memory in Mb\": 0.5657072067260742, \"Time in s\": 30.577019 }, { \"step\": 451, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ChickWeights\", \"MAE\": 14.795462221424788, \"RMSE\": 25.396591749368834, \"R2\": 0.7374288341656263, \"Memory in Mb\": 0.2583265304565429, \"Time in s\": 32.092868 }, { \"step\": 462, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ChickWeights\", \"MAE\": 16.121556592117344, \"RMSE\": 28.147488646444906, \"R2\": 0.6970529793362255, \"Memory in Mb\": 0.3047628402709961, \"Time in s\": 33.634783000000006 }, { \"step\": 473, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ChickWeights\", \"MAE\": 16.919755395139642, \"RMSE\": 29.31210430009691, \"R2\": 0.7099030173412568, \"Memory in Mb\": 0.3544912338256836, \"Time in s\": 35.20760200000001 }, { \"step\": 484, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ChickWeights\", \"MAE\": 17.222944705236912, \"RMSE\": 29.519121223412064, \"R2\": 0.7219133474565596, \"Memory in Mb\": 0.3983259201049804, \"Time in s\": 36.808400000000006 }, { \"step\": 495, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ChickWeights\", \"MAE\": 17.875632407347712, \"RMSE\": 30.34841895302658, \"R2\": 0.7235012910446976, \"Memory in Mb\": 0.4324254989624023, \"Time in s\": 38.43633400000001 }, { \"step\": 506, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ChickWeights\", \"MAE\": 18.888217233083253, \"RMSE\": 31.835585198521954, \"R2\": 0.7045822227904736, \"Memory in Mb\": 0.4678411483764648, \"Time in s\": 40.09290200000001 }, { \"step\": 517, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ChickWeights\", \"MAE\": 19.824673890348247, \"RMSE\": 33.232962754926376, \"R2\": 0.6962090391160322, \"Memory in Mb\": 0.4975500106811523, \"Time in s\": 41.78457100000001 }, { \"step\": 528, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ChickWeights\", \"MAE\": 20.55396214529221, \"RMSE\": 34.09704280994323, \"R2\": 0.712478586733055, \"Memory in Mb\": 0.5351285934448242, \"Time in s\": 43.50851800000001 }, { \"step\": 539, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ChickWeights\", \"MAE\": 20.794926737672743, \"RMSE\": 34.209729391662115, \"R2\": 0.7225264054992676, \"Memory in Mb\": 0.576685905456543, \"Time in s\": 45.26814300000001 }, { \"step\": 550, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ChickWeights\", \"MAE\": 21.840113545070352, \"RMSE\": 36.13121679398973, \"R2\": 0.7000199686285788, \"Memory in Mb\": 0.4753904342651367, \"Time in s\": 47.07007900000001 }, { \"step\": 561, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ChickWeights\", \"MAE\": 22.59682566601873, \"RMSE\": 37.08324430393829, \"R2\": 0.6978222813826447, \"Memory in Mb\": 0.5189352035522461, \"Time in s\": 48.90757000000001 }, { \"step\": 572, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ChickWeights\", \"MAE\": 23.516206925607506, \"RMSE\": 38.207190389030345, \"R2\": 0.703284935102584, \"Memory in Mb\": 0.5585355758666992, \"Time in s\": 50.782863000000006 }, { \"step\": 20, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.677140920600926, \"RMSE\": 9.804891856735376, \"R2\": -224.4966127051096, \"Memory in Mb\": 0.2373647689819336, \"Time in s\": 0.10297 }, { \"step\": 40, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.227777813220112, \"RMSE\": 7.083306817310631, \"R2\": -19.171465983096805, \"Memory in Mb\": 0.3270711898803711, \"Time in s\": 0.289755 }, { \"step\": 60, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.615301860635012, \"RMSE\": 5.7908261762165685, \"R2\": -17.175707266102673, \"Memory in Mb\": 0.3493108749389648, \"Time in s\": 0.561259 }, { \"step\": 80, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.3541232617236425, \"RMSE\": 5.026566774725167, \"R2\": -12.73715546617699, \"Memory in Mb\": 0.3968191146850586, \"Time in s\": 0.918423 }, { \"step\": 100, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.2177336486572592, \"RMSE\": 4.516233376106315, \"R2\": -5.957872973758095, \"Memory in Mb\": 0.4107885360717773, \"Time in s\": 1.362708 }, { \"step\": 120, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.1154353306650455, \"RMSE\": 4.135716354504269, \"R2\": -3.949887064372274, \"Memory in Mb\": 0.4562673568725586, \"Time in s\": 1.881933 }, { \"step\": 140, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.2694693870086102, \"RMSE\": 4.857381406328817, \"R2\": -6.05598089151511, \"Memory in Mb\": 0.2026891708374023, \"Time in s\": 2.479905 }, { \"step\": 160, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.197729670480587, \"RMSE\": 4.553187715474859, \"R2\": -4.467532002416268, \"Memory in Mb\": 0.3215742111206054, \"Time in s\": 3.138645 }, { \"step\": 180, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.1382354235034091, \"RMSE\": 4.302111654125532, \"R2\": -3.2869252353070264, \"Memory in Mb\": 0.4017667770385742, \"Time in s\": 3.869636 }, { \"step\": 200, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.0619299314905082, \"RMSE\": 4.083716699099621, \"R2\": -2.874210730330613, \"Memory in Mb\": 0.4772901535034179, \"Time in s\": 4.674132999999999 }, { \"step\": 220, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.009898171671029, \"RMSE\": 3.898715068147509, \"R2\": -2.8090719143731837, \"Memory in Mb\": 0.5200605392456055, \"Time in s\": 5.552957999999999 }, { \"step\": 240, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.9646445055554522, \"RMSE\": 3.736185930434801, \"R2\": -2.5244277699140394, \"Memory in Mb\": 0.590418815612793, \"Time in s\": 6.501358999999999 }, { \"step\": 260, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.9172332240820484, \"RMSE\": 3.59133014967386, \"R2\": -2.300314999970906, \"Memory in Mb\": 0.677699089050293, \"Time in s\": 7.522147999999999 }, { \"step\": 280, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.8713372213927624, \"RMSE\": 3.461793231901636, \"R2\": -2.2106176891844487, \"Memory in Mb\": 0.7459287643432617, \"Time in s\": 8.615362 }, { \"step\": 300, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.8589405406556339, \"RMSE\": 3.350216865329824, \"R2\": -1.9572094631572516, \"Memory in Mb\": 0.8300580978393555, \"Time in s\": 9.791068 }, { \"step\": 320, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.8332253596869799, \"RMSE\": 3.2462964384589745, \"R2\": -1.91407713381932, \"Memory in Mb\": 0.8134641647338867, \"Time in s\": 11.051061 }, { \"step\": 340, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.8452804825423809, \"RMSE\": 3.1781241500612394, \"R2\": -1.9514868559348704, \"Memory in Mb\": 0.7938528060913086, \"Time in s\": 12.398121 }, { \"step\": 360, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.8633895166787324, \"RMSE\": 3.1040171399305283, \"R2\": -1.8385669899509445, \"Memory in Mb\": 0.8660383224487305, \"Time in s\": 13.839894 }, { \"step\": 380, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.8513159306308952, \"RMSE\": 3.0258511220091995, \"R2\": -1.790715802132521, \"Memory in Mb\": 0.930495262145996, \"Time in s\": 15.372487 }, { \"step\": 400, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.8510161564157176, \"RMSE\": 2.9641796059686003, \"R2\": -1.7538068241726803, \"Memory in Mb\": 0.9046812057495116, \"Time in s\": 17.004842 }, { \"step\": 420, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.8518447067444684, \"RMSE\": 2.91289276041272, \"R2\": -1.747346648183163, \"Memory in Mb\": 0.2984609603881836, \"Time in s\": 18.737382 }, { \"step\": 440, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.8395925714721912, \"RMSE\": 2.8496340177457244, \"R2\": -1.5817388594042834, \"Memory in Mb\": 0.3453207015991211, \"Time in s\": 20.534513 }, { \"step\": 460, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.8276812048623222, \"RMSE\": 2.790693875069675, \"R2\": -1.3480296319058032, \"Memory in Mb\": 0.3807516098022461, \"Time in s\": 22.405168 }, { \"step\": 480, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.8089244758990292, \"RMSE\": 2.7333778802045163, \"R2\": -1.2007484949677454, \"Memory in Mb\": 0.4482488632202148, \"Time in s\": 24.346508 }, { \"step\": 500, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.7928829832001318, \"RMSE\": 2.679994202833863, \"R2\": -1.0712404739849797, \"Memory in Mb\": 0.4975194931030273, \"Time in s\": 26.364916 }, { \"step\": 520, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.7968627360462042, \"RMSE\": 2.655906961990266, \"R2\": -1.037727311679443, \"Memory in Mb\": 0.3753881454467773, \"Time in s\": 28.465481 }, { \"step\": 540, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.7742648164612971, \"RMSE\": 2.606576025481956, \"R2\": -0.9555400217068246, \"Memory in Mb\": 0.4063673019409179, \"Time in s\": 30.635854 }, { \"step\": 560, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.7686196547282114, \"RMSE\": 2.5632583130611786, \"R2\": -0.9361432250468024, \"Memory in Mb\": 0.4374494552612304, \"Time in s\": 32.873421 }, { \"step\": 580, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.7604339230100455, \"RMSE\": 2.522208925126442, \"R2\": -0.9209911676265596, \"Memory in Mb\": 0.4967718124389648, \"Time in s\": 35.174013 }, { \"step\": 600, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.7505205215384793, \"RMSE\": 2.4816305596178174, \"R2\": -0.839105014250833, \"Memory in Mb\": 0.569575309753418, \"Time in s\": 37.541938 }, { \"step\": 620, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.7415649894603582, \"RMSE\": 2.4434454055931187, \"R2\": -0.7557664113175795, \"Memory in Mb\": 0.6584272384643555, \"Time in s\": 39.976025 }, { \"step\": 640, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.7318408439896782, \"RMSE\": 2.406886195682516, \"R2\": -0.6717754169622376, \"Memory in Mb\": 0.7120962142944336, \"Time in s\": 42.479937 }, { \"step\": 660, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.7165688958291918, \"RMSE\": 2.37061113672817, \"R2\": -0.6101020988109156, \"Memory in Mb\": 0.8089780807495117, \"Time in s\": 45.058017 }, { \"step\": 680, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.7019759043195819, \"RMSE\": 2.336022363731008, \"R2\": -0.5881668313132071, \"Memory in Mb\": 0.8398160934448242, \"Time in s\": 47.720249 }, { \"step\": 700, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.7046161229979929, \"RMSE\": 2.307765546298159, \"R2\": -0.5906876272832315, \"Memory in Mb\": 0.9275884628295898, \"Time in s\": 50.471651 }, { \"step\": 720, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.699791115281057, \"RMSE\": 2.2802792890719745, \"R2\": -0.5871418479715558, \"Memory in Mb\": 0.9087285995483398, \"Time in s\": 53.310919 }, { \"step\": 740, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.6993154625867034, \"RMSE\": 2.2532668970074776, \"R2\": -0.5488294820654547, \"Memory in Mb\": 0.8719320297241211, \"Time in s\": 56.236366 }, { \"step\": 760, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.6904813287783395, \"RMSE\": 2.224703478329614, \"R2\": -0.5261679577762028, \"Memory in Mb\": 0.929518699645996, \"Time in s\": 59.248631 }, { \"step\": 780, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.6809571474431378, \"RMSE\": 2.1968956754751456, \"R2\": -0.4949206250779803, \"Memory in Mb\": 1.0446271896362305, \"Time in s\": 62.34821899999999 }, { \"step\": 800, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.6801944660295238, \"RMSE\": 2.1726014141558028, \"R2\": -0.4752630155166446, \"Memory in Mb\": 1.1031560897827148, \"Time in s\": 65.54039399999999 }, { \"step\": 820, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.6800251543540311, \"RMSE\": 2.1538267014914845, \"R2\": -0.4657984869085023, \"Memory in Mb\": 1.0328702926635742, \"Time in s\": 68.82165799999999 }, { \"step\": 840, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.6756860923792902, \"RMSE\": 2.1342936501586, \"R2\": -0.4526954783446735, \"Memory in Mb\": 0.7475957870483398, \"Time in s\": 72.18837399999998 }, { \"step\": 860, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.6733560632024954, \"RMSE\": 2.111426905397114, \"R2\": -0.4192847599850244, \"Memory in Mb\": 0.8119535446166992, \"Time in s\": 75.62699399999998 }, { \"step\": 880, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.6699617254268444, \"RMSE\": 2.0889651464935617, \"R2\": -0.3823451381863831, \"Memory in Mb\": 0.8655576705932617, \"Time in s\": 79.14156799999998 }, { \"step\": 900, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.6641763033522209, \"RMSE\": 2.070781028281848, \"R2\": -0.3641082228169974, \"Memory in Mb\": 0.8467855453491211, \"Time in s\": 82.73512999999998 }, { \"step\": 920, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.6581948551182502, \"RMSE\": 2.04921868376074, \"R2\": -0.3577970506072152, \"Memory in Mb\": 0.9400205612182616, \"Time in s\": 86.40810799999998 }, { \"step\": 940, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.6518145768911946, \"RMSE\": 2.0285221107553344, \"R2\": -0.3417959776408932, \"Memory in Mb\": 0.8168668746948242, \"Time in s\": 90.15720199999998 }, { \"step\": 960, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.6459384626205039, \"RMSE\": 2.0083339154310043, \"R2\": -0.328972794111009, \"Memory in Mb\": 0.9120321273803712, \"Time in s\": 93.98330699999998 }, { \"step\": 980, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.6434185155449816, \"RMSE\": 1.989534246313576, \"R2\": -0.3295384550375804, \"Memory in Mb\": 0.9782476425170898, \"Time in s\": 97.888458 }, { \"step\": 1000, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.6405613133684143, \"RMSE\": 1.9713432953350207, \"R2\": -0.3273099653285773, \"Memory in Mb\": 1.0593442916870115, \"Time in s\": 101.872577 }, { \"step\": 11, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"ChickWeights\", \"MAE\": 10.961585696594296, \"RMSE\": 17.742218537059934, \"R2\": -404.203665453531, \"Memory in Mb\": 0.1782550811767578, \"Time in s\": 0.00837 }, { \"step\": 22, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"ChickWeights\", \"MAE\": 5.889263372306967, \"RMSE\": 12.56756036562654, \"R2\": -166.2750319368382, \"Memory in Mb\": 0.1910533905029297, \"Time in s\": 0.024461 }, { \"step\": 33, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"ChickWeights\", \"MAE\": 4.385855416063054, \"RMSE\": 10.300574999811516, \"R2\": -72.68935558725912, \"Memory in Mb\": 0.2280941009521484, \"Time in s\": 0.04853 }, { \"step\": 44, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"ChickWeights\", \"MAE\": 3.447994945519213, \"RMSE\": 8.931729546653537, \"R2\": -61.80843214992921, \"Memory in Mb\": 0.2438068389892578, \"Time in s\": 0.081553 }, { \"step\": 55, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"ChickWeights\", \"MAE\": 3.35253400433443, \"RMSE\": 8.24824849102831, \"R2\": -12.280953122644297, \"Memory in Mb\": 0.2916851043701172, \"Time in s\": 0.123768 }, { \"step\": 66, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"ChickWeights\", \"MAE\": 3.890626377385135, \"RMSE\": 8.046318086318358, \"R2\": -4.448112248710658, \"Memory in Mb\": 0.3612346649169922, \"Time in s\": 0.176552 }, { \"step\": 77, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"ChickWeights\", \"MAE\": 4.3386080839549335, \"RMSE\": 7.968508799771546, \"R2\": -2.581242138287016, \"Memory in Mb\": 0.4169635772705078, \"Time in s\": 0.2426829999999999 }, { \"step\": 88, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"ChickWeights\", \"MAE\": 4.490657725713863, \"RMSE\": 7.741140850532905, \"R2\": -2.064344233357061, \"Memory in Mb\": 0.4634113311767578, \"Time in s\": 0.322916 }, { \"step\": 99, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"ChickWeights\", \"MAE\": 4.7848487383098295, \"RMSE\": 7.706963997319479, \"R2\": -1.5403773740733393, \"Memory in Mb\": 0.4844684600830078, \"Time in s\": 0.418203 }, { \"step\": 110, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"ChickWeights\", \"MAE\": 4.819508170706054, \"RMSE\": 7.525876668153812, \"R2\": -0.6916040970280823, \"Memory in Mb\": 0.5139484405517578, \"Time in s\": 0.528794 }, { \"step\": 121, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"ChickWeights\", \"MAE\": 4.831868630589585, \"RMSE\": 7.4067264415397105, \"R2\": -0.261880180240535, \"Memory in Mb\": 0.5271091461181641, \"Time in s\": 0.654758 }, { \"step\": 132, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"ChickWeights\", \"MAE\": 4.689813959013141, \"RMSE\": 7.185628463196342, \"R2\": 0.0293225226637304, \"Memory in Mb\": 0.5355319976806641, \"Time in s\": 0.796727 }, { \"step\": 143, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"ChickWeights\", \"MAE\": 4.575735548989308, \"RMSE\": 6.988996035323699, \"R2\": 0.2562234762018912, \"Memory in Mb\": 0.5445156097412109, \"Time in s\": 0.954849 }, { \"step\": 154, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"ChickWeights\", \"MAE\": 4.5047654468276, \"RMSE\": 6.840884466051986, \"R2\": 0.3943998861813385, \"Memory in Mb\": 0.5490932464599609, \"Time in s\": 1.128949 }, { \"step\": 165, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"ChickWeights\", \"MAE\": 4.788922390491115, \"RMSE\": 7.112527411058041, \"R2\": 0.4737467226899117, \"Memory in Mb\": 0.5519008636474609, \"Time in s\": 1.318602 }, { \"step\": 176, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"ChickWeights\", \"MAE\": 4.970710142849908, \"RMSE\": 7.252656159084941, \"R2\": 0.5587960603089545, \"Memory in Mb\": 0.5291376113891602, \"Time in s\": 1.527133 }, { \"step\": 187, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"ChickWeights\", \"MAE\": 5.027942129925975, \"RMSE\": 7.276897995624471, \"R2\": 0.6363381045701173, \"Memory in Mb\": 0.5012483596801758, \"Time in s\": 1.7549830000000002 }, { \"step\": 198, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"ChickWeights\", \"MAE\": 5.049038224539579, \"RMSE\": 7.28413559977408, \"R2\": 0.7101491068659838, \"Memory in Mb\": 0.3717927932739258, \"Time in s\": 2.012416 }, { \"step\": 209, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"ChickWeights\", \"MAE\": 5.250417000807892, \"RMSE\": 7.635375364934655, \"R2\": 0.7374564682191251, \"Memory in Mb\": 0.3473329544067383, \"Time in s\": 2.287127 }, { \"step\": 220, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"ChickWeights\", \"MAE\": 5.8380227010176045, \"RMSE\": 8.722095838276593, \"R2\": 0.7000574251839753, \"Memory in Mb\": 0.3545808792114258, \"Time in s\": 2.575553 }, { \"step\": 231, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"ChickWeights\", \"MAE\": 6.044258003678201, \"RMSE\": 8.960072203538655, \"R2\": 0.7292489015049135, \"Memory in Mb\": 0.391514778137207, \"Time in s\": 2.875072 }, { \"step\": 242, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"ChickWeights\", \"MAE\": 6.276524828158362, \"RMSE\": 9.354066864400572, \"R2\": 0.7625593259413487, \"Memory in Mb\": 0.4183511734008789, \"Time in s\": 3.187566 }, { \"step\": 253, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"ChickWeights\", \"MAE\": 6.3739207187481774, \"RMSE\": 9.482128758285446, \"R2\": 0.7880944389945109, \"Memory in Mb\": 0.4399347305297851, \"Time in s\": 3.512786 }, { \"step\": 264, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"ChickWeights\", \"MAE\": 6.979732945669537, \"RMSE\": 10.70081988088543, \"R2\": 0.7534238884125567, \"Memory in Mb\": 0.4525842666625976, \"Time in s\": 3.851311 }, { \"step\": 275, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"ChickWeights\", \"MAE\": 7.440978636214291, \"RMSE\": 11.778455343325094, \"R2\": 0.7371253175053303, \"Memory in Mb\": 0.4610300064086914, \"Time in s\": 4.203753 }, { \"step\": 286, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"ChickWeights\", \"MAE\": 7.722695321089189, \"RMSE\": 12.251182899165162, \"R2\": 0.7536765505554787, \"Memory in Mb\": 0.4700403213500976, \"Time in s\": 4.570129 }, { \"step\": 297, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"ChickWeights\", \"MAE\": 7.706764271589622, \"RMSE\": 12.242915074252831, \"R2\": 0.7866542868859879, \"Memory in Mb\": 0.4733209609985351, \"Time in s\": 4.950063 }, { \"step\": 308, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"ChickWeights\", \"MAE\": 8.246398153738868, \"RMSE\": 13.148346181255995, \"R2\": 0.7808464902384371, \"Memory in Mb\": 0.4770059585571289, \"Time in s\": 5.342659 }, { \"step\": 319, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"ChickWeights\", \"MAE\": 9.3474227240372, \"RMSE\": 15.837966045238383, \"R2\": 0.7114408913609181, \"Memory in Mb\": 0.4802255630493164, \"Time in s\": 5.749268000000001 }, { \"step\": 330, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"ChickWeights\", \"MAE\": 9.81720525198044, \"RMSE\": 16.4984558080814, \"R2\": 0.732030690326682, \"Memory in Mb\": 0.4911470413208008, \"Time in s\": 6.171356000000001 }, { \"step\": 341, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"ChickWeights\", \"MAE\": 10.029686613445753, \"RMSE\": 16.618597428793866, \"R2\": 0.7565388422982208, \"Memory in Mb\": 0.496312141418457, \"Time in s\": 6.607662000000001 }, { \"step\": 352, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"ChickWeights\", \"MAE\": 10.183424974922596, \"RMSE\": 16.72954607175143, \"R2\": 0.7757182197717174, \"Memory in Mb\": 0.5041093826293945, \"Time in s\": 7.058783000000001 }, { \"step\": 363, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"ChickWeights\", \"MAE\": 11.087226456936444, \"RMSE\": 18.183293641227465, \"R2\": 0.7427163510168195, \"Memory in Mb\": 0.5065813064575195, \"Time in s\": 7.524030000000001 }, { \"step\": 374, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"ChickWeights\", \"MAE\": 11.611208342142504, \"RMSE\": 19.353368352464965, \"R2\": 0.7320664683002076, \"Memory in Mb\": 0.4931306838989258, \"Time in s\": 8.007171000000001 }, { \"step\": 385, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"ChickWeights\", \"MAE\": 12.01727910935522, \"RMSE\": 20.0211446068419, \"R2\": 0.7463056879958478, \"Memory in Mb\": 0.4681062698364258, \"Time in s\": 8.510702000000002 }, { \"step\": 396, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"ChickWeights\", \"MAE\": 12.181393533243163, \"RMSE\": 20.04691533881536, \"R2\": 0.7628481050767555, \"Memory in Mb\": 0.4353647232055664, \"Time in s\": 9.035917 }, { \"step\": 407, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"ChickWeights\", \"MAE\": 13.09061742270854, \"RMSE\": 21.647454224788547, \"R2\": 0.7429569103775002, \"Memory in Mb\": 0.4253015518188476, \"Time in s\": 9.583169000000002 }, { \"step\": 418, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"ChickWeights\", \"MAE\": 13.857583790923206, \"RMSE\": 23.134723844753264, \"R2\": 0.7244169153448246, \"Memory in Mb\": 0.4488153457641601, \"Time in s\": 10.147004 }, { \"step\": 429, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"ChickWeights\", \"MAE\": 14.415812868423222, \"RMSE\": 24.06086703427245, \"R2\": 0.7351096813998451, \"Memory in Mb\": 0.4820413589477539, \"Time in s\": 10.724926 }, { \"step\": 440, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"ChickWeights\", \"MAE\": 14.633210713365676, \"RMSE\": 24.14605091357361, \"R2\": 0.7521125932778163, \"Memory in Mb\": 0.5092172622680664, \"Time in s\": 11.318068 }, { \"step\": 451, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"ChickWeights\", \"MAE\": 15.133020029766048, \"RMSE\": 24.73830229788057, \"R2\": 0.7508643131645836, \"Memory in Mb\": 0.5367746353149414, \"Time in s\": 11.926425 }, { \"step\": 462, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"ChickWeights\", \"MAE\": 16.308165732303497, \"RMSE\": 27.095114472384648, \"R2\": 0.7192825818530082, \"Memory in Mb\": 0.5905351638793945, \"Time in s\": 12.55391 }, { \"step\": 473, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"ChickWeights\", \"MAE\": 17.097647362788752, \"RMSE\": 28.42746345262044, \"R2\": 0.7271490710401121, \"Memory in Mb\": 0.6096200942993164, \"Time in s\": 13.198999 }, { \"step\": 484, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"ChickWeights\", \"MAE\": 17.403233741388874, \"RMSE\": 28.60630060223112, \"R2\": 0.7388459944901757, \"Memory in Mb\": 0.6277017593383789, \"Time in s\": 13.862263 }, { \"step\": 495, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"ChickWeights\", \"MAE\": 17.845914876737442, \"RMSE\": 29.12415998011611, \"R2\": 0.7453593218363636, \"Memory in Mb\": 0.6518182754516602, \"Time in s\": 14.54413 }, { \"step\": 506, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"ChickWeights\", \"MAE\": 18.74953553887996, \"RMSE\": 30.62874049978988, \"R2\": 0.7265554777237875, \"Memory in Mb\": 0.6593713760375977, \"Time in s\": 15.245298000000002 }, { \"step\": 517, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"ChickWeights\", \"MAE\": 19.47487310339954, \"RMSE\": 31.618292560451803, \"R2\": 0.7250121198400961, \"Memory in Mb\": 0.6093225479125977, \"Time in s\": 15.967903000000002 }, { \"step\": 528, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"ChickWeights\", \"MAE\": 20.25384975920148, \"RMSE\": 32.7838221306768, \"R2\": 0.7341994138392889, \"Memory in Mb\": 0.6206216812133789, \"Time in s\": 16.707851 }, { \"step\": 539, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"ChickWeights\", \"MAE\": 20.43401222967888, \"RMSE\": 32.81136182644564, \"R2\": 0.7447469767293837, \"Memory in Mb\": 0.6340532302856445, \"Time in s\": 17.46759 }, { \"step\": 550, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"ChickWeights\", \"MAE\": 21.48271816680656, \"RMSE\": 34.486522216355176, \"R2\": 0.7267085961280999, \"Memory in Mb\": 0.6409807205200195, \"Time in s\": 18.246304 }, { \"step\": 561, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"ChickWeights\", \"MAE\": 22.214948014440758, \"RMSE\": 35.44362766111904, \"R2\": 0.7239528137855742, \"Memory in Mb\": 0.6374177932739258, \"Time in s\": 19.046772 }, { \"step\": 572, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"ChickWeights\", \"MAE\": 23.05953013824058, \"RMSE\": 36.58622235196899, \"R2\": 0.7279275728793118, \"Memory in Mb\": 0.6435747146606445, \"Time in s\": 19.865794 }, { \"step\": 20, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"TrumpApproval\", \"MAE\": 6.585839365171216, \"RMSE\": 13.881516159864546, \"R2\": -450.9893611410228, \"Memory in Mb\": 0.4141368865966797, \"Time in s\": 0.017776 }, { \"step\": 40, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"TrumpApproval\", \"MAE\": 4.363712565198605, \"RMSE\": 9.938671227284791, \"R2\": -38.712022235644, \"Memory in Mb\": 0.5953998565673828, \"Time in s\": 0.05778 }, { \"step\": 60, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"TrumpApproval\", \"MAE\": 3.1246200328296143, \"RMSE\": 8.126568674182735, \"R2\": -34.79519075888522, \"Memory in Mb\": 0.7211894989013672, \"Time in s\": 0.123528 }, { \"step\": 80, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.5854221987839483, \"RMSE\": 7.070456887280416, \"R2\": -26.179962781345687, \"Memory in Mb\": 0.8254451751708984, \"Time in s\": 0.217362 }, { \"step\": 100, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.612795341566278, \"RMSE\": 6.441452396131832, \"R2\": -13.15439617190778, \"Memory in Mb\": 0.9287128448486328, \"Time in s\": 0.340609 }, { \"step\": 120, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.5650159676967723, \"RMSE\": 5.964319967024998, \"R2\": -9.294746752112973, \"Memory in Mb\": 0.9687213897705078, \"Time in s\": 0.493962 }, { \"step\": 140, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.4246087892523565, \"RMSE\": 5.555151474129083, \"R2\": -8.228790661275799, \"Memory in Mb\": 0.9892597198486328, \"Time in s\": 0.677921 }, { \"step\": 160, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.2372944440918734, \"RMSE\": 5.2113826313001965, \"R2\": -6.162524907472281, \"Memory in Mb\": 1.0282306671142578, \"Time in s\": 0.8947890000000001 }, { \"step\": 180, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.1466345389477697, \"RMSE\": 4.9537003451084844, \"R2\": -4.683842281929501, \"Memory in Mb\": 0.9880685806274414, \"Time in s\": 1.151076 }, { \"step\": 200, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.9892950861944356, \"RMSE\": 4.704683592711833, \"R2\": -4.14200943628533, \"Memory in Mb\": 0.5611734390258789, \"Time in s\": 1.477073 }, { \"step\": 220, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.8962186057938932, \"RMSE\": 4.499134208858052, \"R2\": -4.072640393025983, \"Memory in Mb\": 0.3967218399047851, \"Time in s\": 1.841739 }, { \"step\": 240, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.803503053095204, \"RMSE\": 4.314740779926486, \"R2\": -3.700467691841136, \"Memory in Mb\": 0.4504899978637695, \"Time in s\": 2.225753 }, { \"step\": 260, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.7080100607704198, \"RMSE\": 4.14999374436193, \"R2\": -3.406965132392963, \"Memory in Mb\": 0.5133523941040039, \"Time in s\": 2.631091 }, { \"step\": 280, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.6655932342441322, \"RMSE\": 4.017842470146439, \"R2\": -3.324861014485008, \"Memory in Mb\": 0.6190156936645508, \"Time in s\": 3.0590589999999995 }, { \"step\": 300, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.6385421968346507, \"RMSE\": 3.904974440220909, \"R2\": -3.01765496812087, \"Memory in Mb\": 0.7099161148071289, \"Time in s\": 3.510991 }, { \"step\": 320, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.5504197008133211, \"RMSE\": 3.781681667850968, \"R2\": -2.954527764504537, \"Memory in Mb\": 0.777043342590332, \"Time in s\": 3.987728 }, { \"step\": 340, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.5047182142648992, \"RMSE\": 3.6774767295200856, \"R2\": -2.9518368180177696, \"Memory in Mb\": 0.8162164688110352, \"Time in s\": 4.49124 }, { \"step\": 360, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.5167434089128324, \"RMSE\": 3.599404249538014, \"R2\": -2.816912258630976, \"Memory in Mb\": 0.8943338394165039, \"Time in s\": 5.022537 }, { \"step\": 380, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.47690532831993, \"RMSE\": 3.5095784857397434, \"R2\": -2.754312484791544, \"Memory in Mb\": 0.9456682205200196, \"Time in s\": 5.582067 }, { \"step\": 400, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.4473785178172314, \"RMSE\": 3.4281100357630656, \"R2\": -2.683273334840131, \"Memory in Mb\": 1.0103578567504885, \"Time in s\": 6.172723 }, { \"step\": 420, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.40955398667131, \"RMSE\": 3.3497394826483737, \"R2\": -2.633176800681623, \"Memory in Mb\": 0.9896020889282228, \"Time in s\": 6.801780999999999 }, { \"step\": 440, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.3766290707405415, \"RMSE\": 3.2780180874471507, \"R2\": -2.4163065189839363, \"Memory in Mb\": 1.0508508682250977, \"Time in s\": 7.462708999999999 }, { \"step\": 460, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.3473843223583597, \"RMSE\": 3.2105206765417766, \"R2\": -2.1076358108912725, \"Memory in Mb\": 1.1290063858032229, \"Time in s\": 8.158304 }, { \"step\": 480, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.3146601146346069, \"RMSE\": 3.145698486393554, \"R2\": -1.914776430829721, \"Memory in Mb\": 1.2007226943969729, \"Time in s\": 8.887782 }, { \"step\": 500, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.2829933656636, \"RMSE\": 3.084835387092109, \"R2\": -1.7442697801193798, \"Memory in Mb\": 1.2468271255493164, \"Time in s\": 9.6533 }, { \"step\": 520, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.253253159364902, \"RMSE\": 3.027195393916086, \"R2\": -1.647288417049146, \"Memory in Mb\": 1.2976083755493164, \"Time in s\": 10.456286 }, { \"step\": 540, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.221748892460797, \"RMSE\": 2.9719574458877616, \"R2\": -1.5422080467663055, \"Memory in Mb\": 1.2989130020141602, \"Time in s\": 11.302161 }, { \"step\": 560, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.2084725532442826, \"RMSE\": 2.924408360153415, \"R2\": -1.5201637785773734, \"Memory in Mb\": 1.2362565994262695, \"Time in s\": 12.194852 }, { \"step\": 580, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.1798260089354875, \"RMSE\": 2.875143807222344, \"R2\": -1.496217338554359, \"Memory in Mb\": 1.0948266983032229, \"Time in s\": 13.14623 }, { \"step\": 600, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.1697523000107772, \"RMSE\": 2.832108990371036, \"R2\": -1.3952574337441268, \"Memory in Mb\": 0.9711389541625975, \"Time in s\": 14.161435 }, { \"step\": 620, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.1550049446613664, \"RMSE\": 2.791430677298195, \"R2\": -1.29147514335963, \"Memory in Mb\": 0.9871377944946288, \"Time in s\": 15.218796 }, { \"step\": 640, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.1314798119108302, \"RMSE\": 2.748614104980716, \"R2\": -1.180190104825623, \"Memory in Mb\": 0.955540657043457, \"Time in s\": 16.316332 }, { \"step\": 660, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.1121913646065007, \"RMSE\": 2.708986615305391, \"R2\": -1.102550782410015, \"Memory in Mb\": 0.9793291091918944, \"Time in s\": 17.449759 }, { \"step\": 680, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.096472283083769, \"RMSE\": 2.6715401075955145, \"R2\": -1.0771388394715324, \"Memory in Mb\": 1.0325212478637695, \"Time in s\": 18.614745 }, { \"step\": 700, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.0884657542544798, \"RMSE\": 2.639348967980305, \"R2\": -1.080631470653255, \"Memory in Mb\": 1.1148195266723633, \"Time in s\": 19.813024 }, { \"step\": 720, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.0667544669913536, \"RMSE\": 2.603003384812668, \"R2\": -1.0681837571806945, \"Memory in Mb\": 1.1698732376098633, \"Time in s\": 21.046603 }, { \"step\": 740, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.0459321867530782, \"RMSE\": 2.5682868281355016, \"R2\": -1.0121733037883445, \"Memory in Mb\": 1.2092561721801758, \"Time in s\": 22.315315 }, { \"step\": 760, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.0276781933457848, \"RMSE\": 2.5353029000371494, \"R2\": -0.9820644398878616, \"Memory in Mb\": 1.2420778274536133, \"Time in s\": 23.623046 }, { \"step\": 780, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.0094276125302497, \"RMSE\": 2.50338787783782, \"R2\": -0.9411341749539492, \"Memory in Mb\": 1.3017473220825195, \"Time in s\": 24.968597 }, { \"step\": 800, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.0023537746213038, \"RMSE\": 2.475512975868657, \"R2\": -0.9153129864924484, \"Memory in Mb\": 1.3318758010864258, \"Time in s\": 26.357821 }, { \"step\": 820, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.9977235614586972, \"RMSE\": 2.450813157375169, \"R2\": -0.8978992844128302, \"Memory in Mb\": 1.3786516189575195, \"Time in s\": 27.784762 }, { \"step\": 840, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.9889333901170724, \"RMSE\": 2.4246168260512984, \"R2\": -0.8747893145143344, \"Memory in Mb\": 1.416356086730957, \"Time in s\": 29.251496 }, { \"step\": 860, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.9712423333697714, \"RMSE\": 2.3967734443047704, \"R2\": -0.8288218633529623, \"Memory in Mb\": 1.4480867385864258, \"Time in s\": 30.757563999999995 }, { \"step\": 880, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.9593965092028914, \"RMSE\": 2.3708401769846086, \"R2\": -0.7805683851951253, \"Memory in Mb\": 1.4688615798950195, \"Time in s\": 32.305297 }, { \"step\": 900, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.9500350341868696, \"RMSE\": 2.346029725301664, \"R2\": -0.7508441641708208, \"Memory in Mb\": 1.3691072463989258, \"Time in s\": 33.916341 }, { \"step\": 920, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.9362562616709212, \"RMSE\": 2.321054413049932, \"R2\": -0.7419227486050888, \"Memory in Mb\": 1.2137422561645508, \"Time in s\": 35.592854 }, { \"step\": 940, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.9293426247327622, \"RMSE\": 2.2986373047272863, \"R2\": -0.7229310397545319, \"Memory in Mb\": 1.2394838333129885, \"Time in s\": 37.31016399999999 }, { \"step\": 960, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.9223382441623138, \"RMSE\": 2.2770208564096435, \"R2\": -0.7083555493245846, \"Memory in Mb\": 1.2827730178833008, \"Time in s\": 39.06327099999999 }, { \"step\": 980, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.9155630176835056, \"RMSE\": 2.256277668055956, \"R2\": -0.7099489887647161, \"Memory in Mb\": 1.2986268997192385, \"Time in s\": 40.85878199999999 }, { \"step\": 1000, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.9044154317407224, \"RMSE\": 2.234829366963466, \"R2\": -0.7058332444386559, \"Memory in Mb\": 1.3350114822387695, \"Time in s\": 42.69044199999999 }, { \"step\": 11, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"ChickWeights\", \"MAE\": 41.63636363636363, \"RMSE\": 41.64569169030137, \"R2\": -2231.5319148936137, \"Memory in Mb\": 0.0524826049804687, \"Time in s\": 0.006254 }, { \"step\": 22, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"ChickWeights\", \"MAE\": 41.31818181818181, \"RMSE\": 41.32960638133835, \"R2\": -1808.0547045951903, \"Memory in Mb\": 0.0595359802246093, \"Time in s\": 0.016967 }, { \"step\": 33, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"ChickWeights\", \"MAE\": 41.12121212121212, \"RMSE\": 41.13871582091424, \"R2\": -1174.393494897962, \"Memory in Mb\": 0.0739974975585937, \"Time in s\": 0.032538 }, { \"step\": 44, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"ChickWeights\", \"MAE\": 41.159090909090914, \"RMSE\": 41.17451771534076, \"R2\": -1333.7620984139928, \"Memory in Mb\": 0.0802268981933593, \"Time in s\": 0.0535149999999999 }, { \"step\": 55, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"ChickWeights\", \"MAE\": 41.5090909090909, \"RMSE\": 41.57075020645253, \"R2\": -336.3506066081568, \"Memory in Mb\": 0.0978660583496093, \"Time in s\": 0.0804889999999999 }, { \"step\": 66, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"ChickWeights\", \"MAE\": 42.681818181818166, \"RMSE\": 42.82080349691271, \"R2\": -153.29834830483878, \"Memory in Mb\": 0.121429443359375, \"Time in s\": 0.1147379999999999 }, { \"step\": 77, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"ChickWeights\", \"MAE\": 43.50649350649351, \"RMSE\": 43.70978671356627, \"R2\": -106.75487995129542, \"Memory in Mb\": 0.1384239196777343, \"Time in s\": 0.1574789999999999 }, { \"step\": 88, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"ChickWeights\", \"MAE\": 44.21590909090909, \"RMSE\": 44.43649707984724, \"R2\": -99.97346126163, \"Memory in Mb\": 0.1540603637695312, \"Time in s\": 0.209356 }, { \"step\": 99, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"ChickWeights\", \"MAE\": 45.05050505050505, \"RMSE\": 45.309262771858165, \"R2\": -86.8022342468144, \"Memory in Mb\": 0.1602935791015625, \"Time in s\": 0.271082 }, { \"step\": 110, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"ChickWeights\", \"MAE\": 46.16363636363636, \"RMSE\": 46.52487115902242, \"R2\": -63.64797006437341, \"Memory in Mb\": 0.164306640625, \"Time in s\": 0.3431619999999999 }, { \"step\": 121, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"ChickWeights\", \"MAE\": 47.21487603305785, \"RMSE\": 47.67304278378361, \"R2\": -51.27707184490422, \"Memory in Mb\": 0.1650009155273437, \"Time in s\": 0.4251659999999999 }, { \"step\": 132, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"ChickWeights\", \"MAE\": 48.29545454545455, \"RMSE\": 48.843054157105485, \"R2\": -43.84882422437649, \"Memory in Mb\": 0.165863037109375, \"Time in s\": 0.5168919999999999 }, { \"step\": 143, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"ChickWeights\", \"MAE\": 49.44055944055945, \"RMSE\": 50.100318941519305, \"R2\": -37.220279564063546, \"Memory in Mb\": 0.1322584152221679, \"Time in s\": 0.622087 }, { \"step\": 154, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"ChickWeights\", \"MAE\": 50.532467532467535, \"RMSE\": 51.29137544271156, \"R2\": -33.04474826644667, \"Memory in Mb\": 0.1404676437377929, \"Time in s\": 0.736412 }, { \"step\": 165, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"ChickWeights\", \"MAE\": 51.690909090909095, \"RMSE\": 52.61253451297311, \"R2\": -27.795548438273773, \"Memory in Mb\": 0.1464834213256836, \"Time in s\": 0.8597999999999999 }, { \"step\": 176, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"ChickWeights\", \"MAE\": 53.00568181818182, \"RMSE\": 54.11860921749895, \"R2\": -23.566226925646237, \"Memory in Mb\": 0.1528844833374023, \"Time in s\": 0.99286 }, { \"step\": 187, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"ChickWeights\", \"MAE\": 54.41176470588235, \"RMSE\": 55.733754017636336, \"R2\": -20.33250305682894, \"Memory in Mb\": 0.1579313278198242, \"Time in s\": 1.135584 }, { \"step\": 198, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"ChickWeights\", \"MAE\": 56.02525252525252, \"RMSE\": 57.635786091488654, \"R2\": -17.146924852486976, \"Memory in Mb\": 0.1600141525268554, \"Time in s\": 1.287864 }, { \"step\": 209, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"ChickWeights\", \"MAE\": 57.5645933014354, \"RMSE\": 59.46206220864915, \"R2\": -14.922837840066968, \"Memory in Mb\": 0.1255407333374023, \"Time in s\": 1.454924 }, { \"step\": 220, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"ChickWeights\", \"MAE\": 58.69090909090908, \"RMSE\": 60.81327606250582, \"R2\": -13.581197962556498, \"Memory in Mb\": 0.1323118209838867, \"Time in s\": 1.63074 }, { \"step\": 231, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"ChickWeights\", \"MAE\": 60.25541125541125, \"RMSE\": 62.66764529032318, \"R2\": -12.244451024360147, \"Memory in Mb\": 0.1370038986206054, \"Time in s\": 1.815352 }, { \"step\": 242, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"ChickWeights\", \"MAE\": 62.17355371900826, \"RMSE\": 65.06963847478845, \"R2\": -10.489760184397111, \"Memory in Mb\": 0.1415891647338867, \"Time in s\": 2.009009 }, { \"step\": 253, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"ChickWeights\", \"MAE\": 63.93675889328063, \"RMSE\": 67.17295239601157, \"R2\": -9.634560128382748, \"Memory in Mb\": 0.1452207565307617, \"Time in s\": 2.211595 }, { \"step\": 264, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"ChickWeights\", \"MAE\": 65.10606060606062, \"RMSE\": 68.57980310513724, \"R2\": -9.127665748505592, \"Memory in Mb\": 0.1458845138549804, \"Time in s\": 2.423132 }, { \"step\": 275, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"ChickWeights\", \"MAE\": 66.61454545454548, \"RMSE\": 70.46451073219248, \"R2\": -8.408339126213217, \"Memory in Mb\": 0.1459379196166992, \"Time in s\": 2.643805 }, { \"step\": 286, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"ChickWeights\", \"MAE\": 68.48951048951052, \"RMSE\": 72.8020594498525, \"R2\": -7.6983532427125105, \"Memory in Mb\": 0.1282644271850586, \"Time in s\": 2.877713 }, { \"step\": 297, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"ChickWeights\", \"MAE\": 70.55218855218858, \"RMSE\": 75.3669362796119, \"R2\": -7.08492451355157, \"Memory in Mb\": 0.1300420761108398, \"Time in s\": 3.120317 }, { \"step\": 308, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"ChickWeights\", \"MAE\": 72.39285714285718, \"RMSE\": 77.65033596401675, \"R2\": -6.643510181414674, \"Memory in Mb\": 0.1343069076538086, \"Time in s\": 3.37158 }, { \"step\": 319, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"ChickWeights\", \"MAE\": 73.45454545454551, \"RMSE\": 79.15086186624424, \"R2\": -6.206879640065647, \"Memory in Mb\": 0.1407041549682617, \"Time in s\": 3.631653 }, { \"step\": 330, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"ChickWeights\", \"MAE\": 75.77878787878792, \"RMSE\": 82.20832738177494, \"R2\": -5.653192449779911, \"Memory in Mb\": 0.1449460983276367, \"Time in s\": 3.900744 }, { \"step\": 341, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"ChickWeights\", \"MAE\": 77.92375366568919, \"RMSE\": 84.89106353805269, \"R2\": -5.352795814687307, \"Memory in Mb\": 0.1466054916381836, \"Time in s\": 4.1790140000000005 }, { \"step\": 352, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"ChickWeights\", \"MAE\": 80.04545454545458, \"RMSE\": 87.49376601169416, \"R2\": -5.134510311668016, \"Memory in Mb\": 0.1466588973999023, \"Time in s\": 4.46626 }, { \"step\": 363, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"ChickWeights\", \"MAE\": 80.99724517906337, \"RMSE\": 88.57562798692558, \"R2\": -5.105139086016474, \"Memory in Mb\": 0.1461553573608398, \"Time in s\": 4.762509 }, { \"step\": 374, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"ChickWeights\", \"MAE\": 82.77807486631018, \"RMSE\": 90.83029071422122, \"R2\": -4.901675845817959, \"Memory in Mb\": 0.1569280624389648, \"Time in s\": 5.06867 }, { \"step\": 385, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"ChickWeights\", \"MAE\": 85.1766233766234, \"RMSE\": 93.99517810235533, \"R2\": -4.591702735915359, \"Memory in Mb\": 0.1629590988159179, \"Time in s\": 5.384646 }, { \"step\": 396, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"ChickWeights\", \"MAE\": 87.26767676767678, \"RMSE\": 96.48964983485284, \"R2\": -4.494054297851511, \"Memory in Mb\": 0.1698560714721679, \"Time in s\": 5.710516 }, { \"step\": 407, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"ChickWeights\", \"MAE\": 89.00737100737103, \"RMSE\": 98.71879502607636, \"R2\": -4.345544683073043, \"Memory in Mb\": 0.1741170883178711, \"Time in s\": 6.048375 }, { \"step\": 418, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"ChickWeights\", \"MAE\": 90.57416267942588, \"RMSE\": 100.72635724110243, \"R2\": -4.224084264201084, \"Memory in Mb\": 0.1756696701049804, \"Time in s\": 6.396311 }, { \"step\": 429, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"ChickWeights\", \"MAE\": 93.12121212121215, \"RMSE\": 104.19735398794236, \"R2\": -3.967717840349581, \"Memory in Mb\": 0.1753263473510742, \"Time in s\": 6.754458 }, { \"step\": 440, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"ChickWeights\", \"MAE\": 95.41818181818184, \"RMSE\": 107.03565676064125, \"R2\": -3.8710119659250095, \"Memory in Mb\": 0.1580381393432617, \"Time in s\": 7.124987 }, { \"step\": 451, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"ChickWeights\", \"MAE\": 97.16629711751663, \"RMSE\": 109.07665280092142, \"R2\": -3.843505105397095, \"Memory in Mb\": 0.1671514511108398, \"Time in s\": 7.505263 }, { \"step\": 462, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"ChickWeights\", \"MAE\": 98.71645021645024, \"RMSE\": 111.1763643167196, \"R2\": -3.72620239405422, \"Memory in Mb\": 0.1737470626831054, \"Time in s\": 7.89547 }, { \"step\": 473, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"ChickWeights\", \"MAE\": 101.54122621564484, \"RMSE\": 115.2058457378686, \"R2\": -3.48124047566686, \"Memory in Mb\": 0.1785993576049804, \"Time in s\": 8.295795 }, { \"step\": 484, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"ChickWeights\", \"MAE\": 103.77066115702482, \"RMSE\": 117.90601559037044, \"R2\": -3.4365483842712585, \"Memory in Mb\": 0.1823682785034179, \"Time in s\": 8.706368 }, { \"step\": 495, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"ChickWeights\", \"MAE\": 106.02424242424244, \"RMSE\": 120.71525892518191, \"R2\": -3.37467008920777, \"Memory in Mb\": 0.1661062240600586, \"Time in s\": 9.13027 }, { \"step\": 506, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"ChickWeights\", \"MAE\": 107.31620553359684, \"RMSE\": 122.26004165941237, \"R2\": -3.356924458603192, \"Memory in Mb\": 0.1709508895874023, \"Time in s\": 9.564033 }, { \"step\": 517, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"ChickWeights\", \"MAE\": 109.39651837524178, \"RMSE\": 124.91233289427784, \"R2\": -3.291877964737682, \"Memory in Mb\": 0.1725950241088867, \"Time in s\": 10.007821 }, { \"step\": 528, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"ChickWeights\", \"MAE\": 112.36553030303028, \"RMSE\": 129.1106745698386, \"R2\": -3.1225038051323804, \"Memory in Mb\": 0.1781835556030273, \"Time in s\": 10.461944 }, { \"step\": 539, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"ChickWeights\", \"MAE\": 114.52504638218922, \"RMSE\": 131.65752925403248, \"R2\": -3.109734667916423, \"Memory in Mb\": 0.1829481124877929, \"Time in s\": 10.926196 }, { \"step\": 550, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"ChickWeights\", \"MAE\": 115.89999999999996, \"RMSE\": 133.35909826820617, \"R2\": -3.0866973064470367, \"Memory in Mb\": 0.1825780868530273, \"Time in s\": 11.400602999999998 }, { \"step\": 561, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"ChickWeights\", \"MAE\": 117.86452762923346, \"RMSE\": 135.8046463151548, \"R2\": -3.0526234314410727, \"Memory in Mb\": 0.1822462081909179, \"Time in s\": 11.885435 }, { \"step\": 572, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"ChickWeights\", \"MAE\": 120.54020979020974, \"RMSE\": 139.4624607986965, \"R2\": -2.953338846956928, \"Memory in Mb\": 0.1833868026733398, \"Time in s\": 12.380554 }, { \"step\": 20, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"TrumpApproval\", \"MAE\": 43.8732195, \"RMSE\": 43.87807788634269, \"R2\": -4514.954899312423, \"Memory in Mb\": 0.1279449462890625, \"Time in s\": 0.015909 }, { \"step\": 40, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"TrumpApproval\", \"MAE\": 42.4932955, \"RMSE\": 42.52255283421693, \"R2\": -725.9491167623446, \"Memory in Mb\": 0.1855659484863281, \"Time in s\": 0.053644 }, { \"step\": 60, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"TrumpApproval\", \"MAE\": 42.2167785, \"RMSE\": 42.2386240157387, \"R2\": -966.0073736019044, \"Memory in Mb\": 0.2224998474121093, \"Time in s\": 0.118601 }, { \"step\": 80, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"TrumpApproval\", \"MAE\": 41.975705625, \"RMSE\": 41.99760868559829, \"R2\": -957.9655948743646, \"Memory in Mb\": 0.2547683715820312, \"Time in s\": 0.215522 }, { \"step\": 100, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"TrumpApproval\", \"MAE\": 41.37550450000001, \"RMSE\": 41.410913785433536, \"R2\": -583.9966399141301, \"Memory in Mb\": 0.2853622436523437, \"Time in s\": 0.349036 }, { \"step\": 120, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"TrumpApproval\", \"MAE\": 40.936110000000006, \"RMSE\": 40.97829382197767, \"R2\": -484.9611418859003, \"Memory in Mb\": 0.2942886352539062, \"Time in s\": 0.520816 }, { \"step\": 140, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"TrumpApproval\", \"MAE\": 40.6885472857143, \"RMSE\": 40.72961738075088, \"R2\": -495.1050461477588, \"Memory in Mb\": 0.2989387512207031, \"Time in s\": 0.713599 }, { \"step\": 160, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"TrumpApproval\", \"MAE\": 40.35105437500001, \"RMSE\": 40.39801158334292, \"R2\": -429.4078677932073, \"Memory in Mb\": 0.2328748703002929, \"Time in s\": 0.934381 }, { \"step\": 180, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"TrumpApproval\", \"MAE\": 40.00981655555555, \"RMSE\": 40.06373388340122, \"R2\": -370.7794659133543, \"Memory in Mb\": 0.2508001327514648, \"Time in s\": 1.175315 }, { \"step\": 200, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"TrumpApproval\", \"MAE\": 39.80633095, \"RMSE\": 39.860362966711, \"R2\": -368.1089073295326, \"Memory in Mb\": 0.1596212387084961, \"Time in s\": 1.445583 }, { \"step\": 220, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"TrumpApproval\", \"MAE\": 39.727043136363626, \"RMSE\": 39.77723500009918, \"R2\": -395.5019807293188, \"Memory in Mb\": 0.1831541061401367, \"Time in s\": 1.734898 }, { \"step\": 240, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"TrumpApproval\", \"MAE\": 39.56323079166665, \"RMSE\": 39.61325406766278, \"R2\": -395.19837684116754, \"Memory in Mb\": 0.1905164718627929, \"Time in s\": 2.043551 }, { \"step\": 260, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"TrumpApproval\", \"MAE\": 39.42014538461535, \"RMSE\": 39.46968290441584, \"R2\": -397.63185900832246, \"Memory in Mb\": 0.2020750045776367, \"Time in s\": 2.371621 }, { \"step\": 280, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"TrumpApproval\", \"MAE\": 39.33200189285712, \"RMSE\": 39.37942345737111, \"R2\": -414.4560159350036, \"Memory in Mb\": 0.2260313034057617, \"Time in s\": 2.720012 }, { \"step\": 300, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"TrumpApproval\", \"MAE\": 39.18435719999999, \"RMSE\": 39.23275803924839, \"R2\": -404.5402138221895, \"Memory in Mb\": 0.2437810897827148, \"Time in s\": 3.088623 }, { \"step\": 320, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"TrumpApproval\", \"MAE\": 39.13568690624999, \"RMSE\": 39.1818628962716, \"R2\": -423.5167725219512, \"Memory in Mb\": 0.2581815719604492, \"Time in s\": 3.477665 }, { \"step\": 340, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"TrumpApproval\", \"MAE\": 39.14620944117645, \"RMSE\": 39.18989510023786, \"R2\": -447.7943063391533, \"Memory in Mb\": 0.2493734359741211, \"Time in s\": 3.892527 }, { \"step\": 360, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"TrumpApproval\", \"MAE\": 39.24072974999997, \"RMSE\": 39.28395553300239, \"R2\": -453.6543473793619, \"Memory in Mb\": 0.2643537521362304, \"Time in s\": 4.328145999999999 }, { \"step\": 380, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"TrumpApproval\", \"MAE\": 39.29597665789471, \"RMSE\": 39.33769921546023, \"R2\": -470.6701690846498, \"Memory in Mb\": 0.2747945785522461, \"Time in s\": 4.784845 }, { \"step\": 400, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"TrumpApproval\", \"MAE\": 39.35730624999997, \"RMSE\": 39.39781946688104, \"R2\": -485.4842825426507, \"Memory in Mb\": 0.2898855209350586, \"Time in s\": 5.262664 }, { \"step\": 420, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"TrumpApproval\", \"MAE\": 39.40549083333331, \"RMSE\": 39.44465897881697, \"R2\": -502.7799504226928, \"Memory in Mb\": 0.2975950241088867, \"Time in s\": 5.761624 }, { \"step\": 440, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"TrumpApproval\", \"MAE\": 39.49730674999998, \"RMSE\": 39.53710368662846, \"R2\": -495.9856416828035, \"Memory in Mb\": 0.3090314865112304, \"Time in s\": 6.281948 }, { \"step\": 460, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"TrumpApproval\", \"MAE\": 39.61474728260867, \"RMSE\": 39.65658853240579, \"R2\": -473.14358309219216, \"Memory in Mb\": 0.3255758285522461, \"Time in s\": 6.824272 }, { \"step\": 480, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"TrumpApproval\", \"MAE\": 39.71032456249997, \"RMSE\": 39.75304758270976, \"R2\": -464.4916761787406, \"Memory in Mb\": 0.3411321640014648, \"Time in s\": 7.389190999999999 }, { \"step\": 500, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"TrumpApproval\", \"MAE\": 39.80313951999997, \"RMSE\": 39.84667590965187, \"R2\": -456.8750824508669, \"Memory in Mb\": 0.2797002792358398, \"Time in s\": 7.981206999999999 }, { \"step\": 520, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"TrumpApproval\", \"MAE\": 39.87354713461536, \"RMSE\": 39.916931033645376, \"R2\": -459.2932847271911, \"Memory in Mb\": 0.2906713485717773, \"Time in s\": 8.594204 }, { \"step\": 540, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"TrumpApproval\", \"MAE\": 39.94649651851849, \"RMSE\": 39.98996046818772, \"R2\": -459.28610565666287, \"Memory in Mb\": 0.2965841293334961, \"Time in s\": 9.232432 }, { \"step\": 560, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"TrumpApproval\", \"MAE\": 39.97606614285712, \"RMSE\": 40.018487723609816, \"R2\": -470.926187706672, \"Memory in Mb\": 0.3019857406616211, \"Time in s\": 9.891908 }, { \"step\": 580, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"TrumpApproval\", \"MAE\": 40.00338510344825, \"RMSE\": 40.044755101652726, \"R2\": -483.2331705341176, \"Memory in Mb\": 0.3056249618530273, \"Time in s\": 10.572818 }, { \"step\": 600, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"TrumpApproval\", \"MAE\": 40.07393431666663, \"RMSE\": 40.11569326301364, \"R2\": -479.5746686678817, \"Memory in Mb\": 0.3106412887573242, \"Time in s\": 11.275213 }, { \"step\": 620, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"TrumpApproval\", \"MAE\": 40.1459417741935, \"RMSE\": 40.18827077358568, \"R2\": -473.96334667177865, \"Memory in Mb\": 0.3147268295288086, \"Time in s\": 11.999135999999998 }, { \"step\": 640, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"TrumpApproval\", \"MAE\": 40.21943815624997, \"RMSE\": 40.26249426545423, \"R2\": -466.8085709746123, \"Memory in Mb\": 0.3194303512573242, \"Time in s\": 12.744586999999996 }, { \"step\": 660, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"TrumpApproval\", \"MAE\": 40.28296777272724, \"RMSE\": 40.32626722721455, \"R2\": -464.9172853497744, \"Memory in Mb\": 0.3247060775756836, \"Time in s\": 13.511768999999996 }, { \"step\": 680, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"TrumpApproval\", \"MAE\": 40.31998279411761, \"RMSE\": 40.36256991107017, \"R2\": -473.1325264408024, \"Memory in Mb\": 0.3289289474487304, \"Time in s\": 14.300919999999998 }, { \"step\": 700, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"TrumpApproval\", \"MAE\": 40.31359012857138, \"RMSE\": 40.35509446667054, \"R2\": -485.40526703956544, \"Memory in Mb\": 0.2762861251831054, \"Time in s\": 15.116749 }, { \"step\": 720, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"TrumpApproval\", \"MAE\": 40.31730695833329, \"RMSE\": 40.357915759594896, \"R2\": -496.1610725544049, \"Memory in Mb\": 0.2866239547729492, \"Time in s\": 15.953537 }, { \"step\": 740, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"TrumpApproval\", \"MAE\": 40.36653568918915, \"RMSE\": 40.40711941642496, \"R2\": -497.0742803710164, \"Memory in Mb\": 0.2274045944213867, \"Time in s\": 16.820776 }, { \"step\": 760, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"TrumpApproval\", \"MAE\": 40.40314367105261, \"RMSE\": 40.443256311482514, \"R2\": -503.3712175162706, \"Memory in Mb\": 0.2380895614624023, \"Time in s\": 17.708028 }, { \"step\": 780, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"TrumpApproval\", \"MAE\": 40.44545064102563, \"RMSE\": 40.48534274444009, \"R2\": -506.6856716110208, \"Memory in Mb\": 0.2534551620483398, \"Time in s\": 18.615628 }, { \"step\": 800, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"TrumpApproval\", \"MAE\": 40.47854825, \"RMSE\": 40.518050685964006, \"R2\": -512.1052117095793, \"Memory in Mb\": 0.2672185897827148, \"Time in s\": 19.543925 }, { \"step\": 820, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"TrumpApproval\", \"MAE\": 40.50894034146341, \"RMSE\": 40.5479845946661, \"R2\": -518.5068774177179, \"Memory in Mb\": 0.2752447128295898, \"Time in s\": 20.493024 }, { \"step\": 840, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"TrumpApproval\", \"MAE\": 40.5406558690476, \"RMSE\": 40.57931089736599, \"R2\": -524.140575335229, \"Memory in Mb\": 0.2841558456420898, \"Time in s\": 21.463054 }, { \"step\": 860, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"TrumpApproval\", \"MAE\": 40.58371181395347, \"RMSE\": 40.62239247493601, \"R2\": -524.3496319016275, \"Memory in Mb\": 0.2900037765502929, \"Time in s\": 22.454245 }, { \"step\": 880, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"TrumpApproval\", \"MAE\": 40.62855514772725, \"RMSE\": 40.66738601007716, \"R2\": -522.897851512946, \"Memory in Mb\": 0.2707319259643554, \"Time in s\": 23.471721 }, { \"step\": 900, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"TrumpApproval\", \"MAE\": 40.664104233333326, \"RMSE\": 40.702738445808535, \"R2\": -526.020768835918, \"Memory in Mb\": 0.2802000045776367, \"Time in s\": 24.510012 }, { \"step\": 920, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"TrumpApproval\", \"MAE\": 40.68274704347825, \"RMSE\": 40.72073961991632, \"R2\": -535.1540147256861, \"Memory in Mb\": 0.2901716232299804, \"Time in s\": 25.569536 }, { \"step\": 940, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"TrumpApproval\", \"MAE\": 40.70972619148935, \"RMSE\": 40.74737437775791, \"R2\": -540.4099749760601, \"Memory in Mb\": 0.2975721359252929, \"Time in s\": 26.655616 }, { \"step\": 960, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"TrumpApproval\", \"MAE\": 40.73400636458332, \"RMSE\": 40.771242977826994, \"R2\": -546.7118652484228, \"Memory in Mb\": 0.3080549240112304, \"Time in s\": 27.763054 }, { \"step\": 980, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"TrumpApproval\", \"MAE\": 40.74031829795916, \"RMSE\": 40.77684015923968, \"R2\": -557.5026042066913, \"Memory in Mb\": 0.3134031295776367, \"Time in s\": 28.892281 }, { \"step\": 1000, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"TrumpApproval\", \"MAE\": 40.75359492299998, \"RMSE\": 40.78950075300399, \"R2\": -567.2567645513548, \"Memory in Mb\": 0.3166418075561523, \"Time in s\": 30.043213999999995 }, { \"step\": 11, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"ChickWeights\", \"MAE\": 41.63636363636363, \"RMSE\": 41.64569169030137, \"R2\": -2231.5319148936137, \"Memory in Mb\": 0.0121526718139648, \"Time in s\": 0.028772 }, { \"step\": 22, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"ChickWeights\", \"MAE\": 41.31818181818181, \"RMSE\": 41.32960638133835, \"R2\": -1808.0547045951903, \"Memory in Mb\": 0.0121526718139648, \"Time in s\": 0.079044 }, { \"step\": 33, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"ChickWeights\", \"MAE\": 41.12121212121212, \"RMSE\": 41.13871582091424, \"R2\": -1174.393494897962, \"Memory in Mb\": 0.0121526718139648, \"Time in s\": 0.149857 }, { \"step\": 44, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"ChickWeights\", \"MAE\": 41.159090909090914, \"RMSE\": 41.17451771534076, \"R2\": -1333.7620984139928, \"Memory in Mb\": 0.0121526718139648, \"Time in s\": 0.237747 }, { \"step\": 55, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"ChickWeights\", \"MAE\": 41.5090909090909, \"RMSE\": 41.57075020645253, \"R2\": -336.3506066081568, \"Memory in Mb\": 0.0121526718139648, \"Time in s\": 0.338425 }, { \"step\": 66, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"ChickWeights\", \"MAE\": 42.681818181818166, \"RMSE\": 42.82080349691271, \"R2\": -153.29834830483878, \"Memory in Mb\": 0.0121526718139648, \"Time in s\": 0.452041 }, { \"step\": 77, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"ChickWeights\", \"MAE\": 43.46441536424646, \"RMSE\": 43.6630525281676, \"R2\": -106.5245816691109, \"Memory in Mb\": 0.0121526718139648, \"Time in s\": 0.578307 }, { \"step\": 88, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"ChickWeights\", \"MAE\": 43.36686847531974, \"RMSE\": 43.58986852756654, \"R2\": -96.16251088961316, \"Memory in Mb\": 0.0121526718139648, \"Time in s\": 0.71715 }, { \"step\": 99, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"ChickWeights\", \"MAE\": 39.3777319762956, \"RMSE\": 41.29293125499619, \"R2\": -71.92610223487543, \"Memory in Mb\": 0.0121526718139648, \"Time in s\": 0.8686729999999999 }, { \"step\": 110, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"ChickWeights\", \"MAE\": 36.38865389064824, \"RMSE\": 39.46494228501369, \"R2\": -45.51654802750055, \"Memory in Mb\": 0.0121526718139648, \"Time in s\": 1.032976 }, { \"step\": 121, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"ChickWeights\", \"MAE\": 33.47571311046551, \"RMSE\": 37.6638192516392, \"R2\": -31.6297858502766, \"Memory in Mb\": 0.0121526718139648, \"Time in s\": 1.209623 }, { \"step\": 132, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"ChickWeights\", \"MAE\": 30.829458151969483, \"RMSE\": 36.06523958622657, \"R2\": -23.452489437409387, \"Memory in Mb\": 0.0121526718139648, \"Time in s\": 1.398231 }, { \"step\": 143, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"ChickWeights\", \"MAE\": 28.6940047418964, \"RMSE\": 34.66464981164902, \"R2\": -17.297279097398025, \"Memory in Mb\": 0.0121526718139648, \"Time in s\": 1.59888 }, { \"step\": 154, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"ChickWeights\", \"MAE\": 26.901670019158168, \"RMSE\": 33.42439644535526, \"R2\": -13.457346308406352, \"Memory in Mb\": 0.0121526718139648, \"Time in s\": 1.811498 }, { \"step\": 165, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"ChickWeights\", \"MAE\": 25.73601455188179, \"RMSE\": 32.42918896219868, \"R2\": -9.940044331027543, \"Memory in Mb\": 0.0121526718139648, \"Time in s\": 2.036514 }, { \"step\": 176, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"ChickWeights\", \"MAE\": 24.44416894616589, \"RMSE\": 31.465121114844862, \"R2\": -7.304318935113031, \"Memory in Mb\": 0.0121526718139648, \"Time in s\": 2.273516 }, { \"step\": 187, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"ChickWeights\", \"MAE\": 23.31106527863985, \"RMSE\": 30.560727675640003, \"R2\": -5.414053903472148, \"Memory in Mb\": 0.0121526718139648, \"Time in s\": 2.522216 }, { \"step\": 198, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"ChickWeights\", \"MAE\": 22.2497659881735, \"RMSE\": 29.72926965926448, \"R2\": -3.828220321961205, \"Memory in Mb\": 0.0121526718139648, \"Time in s\": 2.782587 }, { \"step\": 209, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"ChickWeights\", \"MAE\": 21.58385771370322, \"RMSE\": 29.10991604615937, \"R2\": -2.8161237021822814, \"Memory in Mb\": 0.0121526718139648, \"Time in s\": 3.05463 }, { \"step\": 220, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"ChickWeights\", \"MAE\": 21.489125733317938, \"RMSE\": 28.89967848441583, \"R2\": -2.2929294700983545, \"Memory in Mb\": 0.0121526718139648, \"Time in s\": 3.338483 }, { \"step\": 231, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"ChickWeights\", \"MAE\": 20.96077526896472, \"RMSE\": 28.36167354440183, \"R2\": -1.7127571457874735, \"Memory in Mb\": 0.0121526718139648, \"Time in s\": 3.633999 }, { \"step\": 242, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"ChickWeights\", \"MAE\": 20.684837308385998, \"RMSE\": 27.97322591752617, \"R2\": -1.123436519018493, \"Memory in Mb\": 0.0121526718139648, \"Time in s\": 3.941181 }, { \"step\": 253, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"ChickWeights\", \"MAE\": 20.10836609318901, \"RMSE\": 27.433189641281587, \"R2\": -0.7737126718723288, \"Memory in Mb\": 0.0121526718139648, \"Time in s\": 4.259926 }, { \"step\": 264, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"ChickWeights\", \"MAE\": 20.477468707260805, \"RMSE\": 27.77140109557264, \"R2\": -0.6607814047377214, \"Memory in Mb\": 0.012312889099121, \"Time in s\": 4.590346 }, { \"step\": 275, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"ChickWeights\", \"MAE\": 20.61134029899391, \"RMSE\": 28.056502395486245, \"R2\": -0.491554549366604, \"Memory in Mb\": 0.012312889099121, \"Time in s\": 4.932228 }, { \"step\": 286, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"ChickWeights\", \"MAE\": 20.562403371381592, \"RMSE\": 27.861367163045813, \"R2\": -0.2739563786996055, \"Memory in Mb\": 0.012312889099121, \"Time in s\": 5.285550000000001 }, { \"step\": 297, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"ChickWeights\", \"MAE\": 20.2802158532438, \"RMSE\": 27.63742758295081, \"R2\": -0.0872000961905188, \"Memory in Mb\": 0.012312889099121, \"Time in s\": 5.650314000000001 }, { \"step\": 308, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"ChickWeights\", \"MAE\": 20.449355350398, \"RMSE\": 27.920149137510645, \"R2\": 0.0118073081415535, \"Memory in Mb\": 0.012312889099121, \"Time in s\": 6.026518000000001 }, { \"step\": 319, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"ChickWeights\", \"MAE\": 21.50134169290969, \"RMSE\": 30.009678165732637, \"R2\": -0.0359973595319296, \"Memory in Mb\": 0.012312889099121, \"Time in s\": 6.414170000000001 }, { \"step\": 330, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"ChickWeights\", \"MAE\": 21.657358056011915, \"RMSE\": 30.050226639054536, \"R2\": 0.1110159573580076, \"Memory in Mb\": 0.012312889099121, \"Time in s\": 6.813245000000001 }, { \"step\": 341, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"ChickWeights\", \"MAE\": 21.886687938329302, \"RMSE\": 30.333453058468624, \"R2\": 0.1888808568866784, \"Memory in Mb\": 0.012312889099121, \"Time in s\": 7.223789000000001 }, { \"step\": 352, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"ChickWeights\", \"MAE\": 21.886047000767118, \"RMSE\": 30.34044931799507, \"R2\": 0.2623171341711791, \"Memory in Mb\": 0.012312889099121, \"Time in s\": 7.645758000000001 }, { \"step\": 363, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"ChickWeights\", \"MAE\": 22.895757506339034, \"RMSE\": 32.0447958992957, \"R2\": 0.2009350122171253, \"Memory in Mb\": 0.012312889099121, \"Time in s\": 8.07951 }, { \"step\": 374, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"ChickWeights\", \"MAE\": 23.75249563906416, \"RMSE\": 33.838912051163845, \"R2\": 0.1808815149406113, \"Memory in Mb\": 0.012312889099121, \"Time in s\": 8.524693000000001 }, { \"step\": 385, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"ChickWeights\", \"MAE\": 24.560900349589296, \"RMSE\": 34.753482086388026, \"R2\": 0.2355843412503995, \"Memory in Mb\": 0.012312889099121, \"Time in s\": 8.981311000000002 }, { \"step\": 396, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"ChickWeights\", \"MAE\": 24.352453583923488, \"RMSE\": 34.473609026370774, \"R2\": 0.2986981406442135, \"Memory in Mb\": 0.012312889099121, \"Time in s\": 9.449373 }, { \"step\": 407, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"ChickWeights\", \"MAE\": 25.719275464870663, \"RMSE\": 37.04357441923364, \"R2\": 0.2473067090757558, \"Memory in Mb\": 0.012312889099121, \"Time in s\": 9.928858000000002 }, { \"step\": 418, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"ChickWeights\", \"MAE\": 27.096550121003176, \"RMSE\": 39.902565812758986, \"R2\": 0.1801666848023049, \"Memory in Mb\": 0.012312889099121, \"Time in s\": 10.419752000000004 }, { \"step\": 429, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"ChickWeights\", \"MAE\": 28.343681876579897, \"RMSE\": 42.0624752869508, \"R2\": 0.1904695440990398, \"Memory in Mb\": 0.012312889099121, \"Time in s\": 10.922067000000002 }, { \"step\": 440, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"ChickWeights\", \"MAE\": 29.341897763641, \"RMSE\": 43.65194027092424, \"R2\": 0.1898427093039769, \"Memory in Mb\": 0.012312889099121, \"Time in s\": 11.435807000000002 }, { \"step\": 451, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"ChickWeights\", \"MAE\": 30.67273802938304, \"RMSE\": 45.6370688200433, \"R2\": 0.1521253661768283, \"Memory in Mb\": 0.012312889099121, \"Time in s\": 11.960959000000004 }, { \"step\": 462, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"ChickWeights\", \"MAE\": 32.56224145473062, \"RMSE\": 49.915337115223885, \"R2\": 0.0473016837570096, \"Memory in Mb\": 0.012312889099121, \"Time in s\": 12.497734000000005 }, { \"step\": 473, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"ChickWeights\", \"MAE\": 34.43415680949522, \"RMSE\": 53.78695495416363, \"R2\": 0.0232056621878617, \"Memory in Mb\": 0.012312889099121, \"Time in s\": 13.045956000000004 }, { \"step\": 484, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"ChickWeights\", \"MAE\": 37.59302308486845, \"RMSE\": 59.43178585545712, \"R2\": -0.1272256336981969, \"Memory in Mb\": 0.012312889099121, \"Time in s\": 13.605608000000004 }, { \"step\": 495, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"ChickWeights\", \"MAE\": 39.81029832190851, \"RMSE\": 63.2563754686467, \"R2\": -0.2012394142017446, \"Memory in Mb\": 0.012312889099121, \"Time in s\": 14.176690000000004 }, { \"step\": 506, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"ChickWeights\", \"MAE\": 40.81849028215024, \"RMSE\": 64.7117163856453, \"R2\": -0.2206096222811289, \"Memory in Mb\": 0.012312889099121, \"Time in s\": 14.759208000000005 }, { \"step\": 517, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"ChickWeights\", \"MAE\": 42.30053541895924, \"RMSE\": 66.69347450894223, \"R2\": -0.2234984190364006, \"Memory in Mb\": 0.012312889099121, \"Time in s\": 15.353157000000005 }, { \"step\": 528, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"ChickWeights\", \"MAE\": 43.87998520264807, \"RMSE\": 69.22658128427689, \"R2\": -0.1851751428384946, \"Memory in Mb\": 0.012312889099121, \"Time in s\": 15.958328000000003 }, { \"step\": 539, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"ChickWeights\", \"MAE\": 45.090482136166365, \"RMSE\": 70.91807128614899, \"R2\": -0.1924391171188431, \"Memory in Mb\": 0.012312889099121, \"Time in s\": 16.574693000000003 }, { \"step\": 550, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"ChickWeights\", \"MAE\": 46.16603381063717, \"RMSE\": 72.79977252028388, \"R2\": -0.2178315347358042, \"Memory in Mb\": 0.012312889099121, \"Time in s\": 17.202244000000004 }, { \"step\": 561, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"ChickWeights\", \"MAE\": 47.99895001321041, \"RMSE\": 75.49864048778444, \"R2\": -0.2525216922711382, \"Memory in Mb\": 0.012312889099121, \"Time in s\": 17.841166000000005 }, { \"step\": 572, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"ChickWeights\", \"MAE\": 49.57826542612398, \"RMSE\": 77.90262570405233, \"R2\": -0.2335409846027114, \"Memory in Mb\": 0.012312889099121, \"Time in s\": 18.491295000000004 }, { \"step\": 20, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"TrumpApproval\", \"MAE\": 30.45103241731707, \"RMSE\": 33.23585723529438, \"R2\": -2590.0045530336465, \"Memory in Mb\": 0.0131101608276367, \"Time in s\": 0.03648 }, { \"step\": 40, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"TrumpApproval\", \"MAE\": 18.3406114455606, \"RMSE\": 24.1628558112126, \"R2\": -233.72636807636488, \"Memory in Mb\": 0.0131101608276367, \"Time in s\": 0.100456 }, { \"step\": 60, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"TrumpApproval\", \"MAE\": 14.012302795940927, \"RMSE\": 20.27429916426714, \"R2\": -221.7932161673039, \"Memory in Mb\": 0.0131101608276367, \"Time in s\": 0.190991 }, { \"step\": 80, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"TrumpApproval\", \"MAE\": 11.49107264681655, \"RMSE\": 17.720640796103456, \"R2\": -169.73114207921216, \"Memory in Mb\": 0.0131101608276367, \"Time in s\": 0.307239 }, { \"step\": 100, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"TrumpApproval\", \"MAE\": 9.726608796116103, \"RMSE\": 15.913172800750536, \"R2\": -85.38479390162912, \"Memory in Mb\": 0.0131101608276367, \"Time in s\": 0.4491740000000001 }, { \"step\": 120, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"TrumpApproval\", \"MAE\": 8.642897473622819, \"RMSE\": 14.610205599232696, \"R2\": -60.77410396737057, \"Memory in Mb\": 0.0131101608276367, \"Time in s\": 0.6162740000000001 }, { \"step\": 140, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"TrumpApproval\", \"MAE\": 7.627861449957184, \"RMSE\": 13.547130531753504, \"R2\": -53.88423494401591, \"Memory in Mb\": 0.0131101608276367, \"Time in s\": 0.8076440000000001 }, { \"step\": 160, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"TrumpApproval\", \"MAE\": 6.825616478903957, \"RMSE\": 12.68640242718137, \"R2\": -41.44604107616888, \"Memory in Mb\": 0.0131101608276367, \"Time in s\": 1.023068 }, { \"step\": 180, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"TrumpApproval\", \"MAE\": 6.298325880816026, \"RMSE\": 11.99455373608459, \"R2\": -32.32351025206219, \"Memory in Mb\": 0.0131101608276367, \"Time in s\": 1.262663 }, { \"step\": 200, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"TrumpApproval\", \"MAE\": 5.791221405645649, \"RMSE\": 11.389261045241726, \"R2\": -29.134439780883504, \"Memory in Mb\": 0.0131101608276367, \"Time in s\": 1.526002 }, { \"step\": 220, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"TrumpApproval\", \"MAE\": 5.429217208457164, \"RMSE\": 10.877504011291329, \"R2\": -28.650681734644422, \"Memory in Mb\": 0.0131101608276367, \"Time in s\": 1.812923 }, { \"step\": 240, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"TrumpApproval\", \"MAE\": 5.060396678997669, \"RMSE\": 10.421476408956162, \"R2\": -26.4214333861576, \"Memory in Mb\": 0.0131101608276367, \"Time in s\": 2.123256 }, { \"step\": 260, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"TrumpApproval\", \"MAE\": 4.7577579662422185, \"RMSE\": 10.01970369652003, \"R2\": -24.68943115642662, \"Memory in Mb\": 0.013350486755371, \"Time in s\": 2.456738 }, { \"step\": 280, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"TrumpApproval\", \"MAE\": 4.47898479527949, \"RMSE\": 9.659781964354623, \"R2\": -23.99890538466576, \"Memory in Mb\": 0.013350486755371, \"Time in s\": 2.813161 }, { \"step\": 300, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"TrumpApproval\", \"MAE\": 4.231628573868652, \"RMSE\": 9.33546881214548, \"R2\": -21.961936464609707, \"Memory in Mb\": 0.013350486755371, \"Time in s\": 3.192823 }, { \"step\": 320, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"TrumpApproval\", \"MAE\": 4.032288217093912, \"RMSE\": 9.046394584789834, \"R2\": -21.629541054113563, \"Memory in Mb\": 0.013350486755371, \"Time in s\": 3.5954460000000004 }, { \"step\": 340, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"TrumpApproval\", \"MAE\": 3.8351551756309497, \"RMSE\": 8.779241117397504, \"R2\": -21.522318216666985, \"Memory in Mb\": 0.013350486755371, \"Time in s\": 4.021045 }, { \"step\": 360, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"TrumpApproval\", \"MAE\": 3.660607443401045, \"RMSE\": 8.536652519215579, \"R2\": -20.469707840891584, \"Memory in Mb\": 0.013350486755371, \"Time in s\": 4.469678 }, { \"step\": 380, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"TrumpApproval\", \"MAE\": 3.505270605120577, \"RMSE\": 8.311598150636895, \"R2\": -20.056664379019765, \"Memory in Mb\": 0.013350486755371, \"Time in s\": 4.941134 }, { \"step\": 400, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"TrumpApproval\", \"MAE\": 3.360453882251005, \"RMSE\": 8.10307441673556, \"R2\": -19.578991923208985, \"Memory in Mb\": 0.013350486755371, \"Time in s\": 5.435716 }, { \"step\": 420, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"TrumpApproval\", \"MAE\": 3.230603986740656, \"RMSE\": 7.909485894846834, \"R2\": -19.256340082684435, \"Memory in Mb\": 0.013350486755371, \"Time in s\": 5.953156 }, { \"step\": 440, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"TrumpApproval\", \"MAE\": 3.1198347345540465, \"RMSE\": 7.731010347257149, \"R2\": -18.002320884204543, \"Memory in Mb\": 0.013350486755371, \"Time in s\": 6.493473 }, { \"step\": 460, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"TrumpApproval\", \"MAE\": 3.010353198494384, \"RMSE\": 7.562877110682495, \"R2\": -16.244605442867506, \"Memory in Mb\": 0.013350486755371, \"Time in s\": 7.056611999999999 }, { \"step\": 480, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.9023580604284582, \"RMSE\": 7.404341650720478, \"R2\": -15.148937801753288, \"Memory in Mb\": 0.013350486755371, \"Time in s\": 7.642589999999999 }, { \"step\": 500, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.8120368518109427, \"RMSE\": 7.256642134427339, \"R2\": -14.185679409391112, \"Memory in Mb\": 0.013350486755371, \"Time in s\": 8.251631 }, { \"step\": 520, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.722217494708863, \"RMSE\": 7.11705765703134, \"R2\": -13.632593906904136, \"Memory in Mb\": 0.013350486755371, \"Time in s\": 8.883484 }, { \"step\": 540, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.6379600337769933, \"RMSE\": 6.984812005374008, \"R2\": -13.042206620144157, \"Memory in Mb\": 0.013350486755371, \"Time in s\": 9.537691 }, { \"step\": 560, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.573532085936036, \"RMSE\": 6.862931118690264, \"R2\": -12.879442173523902, \"Memory in Mb\": 0.013350486755371, \"Time in s\": 10.214296 }, { \"step\": 580, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.508294944253365, \"RMSE\": 6.745457437445754, \"R2\": -12.739978855164816, \"Memory in Mb\": 0.013350486755371, \"Time in s\": 10.913296 }, { \"step\": 600, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.4431260057234407, \"RMSE\": 6.633435680824833, \"R2\": -12.140422111767563, \"Memory in Mb\": 0.013350486755371, \"Time in s\": 11.634954 }, { \"step\": 620, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.386700775516181, \"RMSE\": 6.528113194197335, \"R2\": -11.53247390884172, \"Memory in Mb\": 0.013350486755371, \"Time in s\": 12.379039999999998 }, { \"step\": 640, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.3242014075410804, \"RMSE\": 6.426013651213371, \"R2\": -10.916538218303476, \"Memory in Mb\": 0.013350486755371, \"Time in s\": 13.145560999999995 }, { \"step\": 660, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.2685968162113417, \"RMSE\": 6.328883008296775, \"R2\": -10.475904112331508, \"Memory in Mb\": 0.013350486755371, \"Time in s\": 13.934486999999995 }, { \"step\": 680, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.2077966601853354, \"RMSE\": 6.235306598260896, \"R2\": -10.31508330333358, \"Memory in Mb\": 0.013350486755371, \"Time in s\": 14.745848999999998 }, { \"step\": 700, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.1581493026656715, \"RMSE\": 6.146460408814674, \"R2\": -10.283704638807178, \"Memory in Mb\": 0.013350486755371, \"Time in s\": 15.579853999999996 }, { \"step\": 720, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.1027567473920863, \"RMSE\": 6.060643545524585, \"R2\": -10.211846444382044, \"Memory in Mb\": 0.013350486755371, \"Time in s\": 16.436290999999997 }, { \"step\": 740, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.051121816682194, \"RMSE\": 5.978350933609188, \"R2\": -9.90287777753909, \"Memory in Mb\": 0.013350486755371, \"Time in s\": 17.315177999999996 }, { \"step\": 760, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.003155134641025, \"RMSE\": 5.899358540561457, \"R2\": -9.731678337792127, \"Memory in Mb\": 0.013350486755371, \"Time in s\": 18.216514 }, { \"step\": 780, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.9581628436412764, \"RMSE\": 5.823548668044576, \"R2\": -9.504483044737295, \"Memory in Mb\": 0.013350486755371, \"Time in s\": 19.140319 }, { \"step\": 800, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.920522972774301, \"RMSE\": 5.751090425279772, \"R2\": -9.337362153707344, \"Memory in Mb\": 0.013350486755371, \"Time in s\": 20.086783 }, { \"step\": 820, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.884378546616626, \"RMSE\": 5.681435320930898, \"R2\": -9.199265318679966, \"Memory in Mb\": 0.013350486755371, \"Time in s\": 21.055682 }, { \"step\": 840, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.8462172344878476, \"RMSE\": 5.613649762186833, \"R2\": -9.049787223051387, \"Memory in Mb\": 0.013350486755371, \"Time in s\": 22.047027 }, { \"step\": 860, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.8114873001009923, \"RMSE\": 5.548504609880171, \"R2\": -8.800976301633217, \"Memory in Mb\": 0.013350486755371, \"Time in s\": 23.060786 }, { \"step\": 880, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.7737427907909609, \"RMSE\": 5.485176730961097, \"R2\": -8.530931568707373, \"Memory in Mb\": 0.013350486755371, \"Time in s\": 24.096958 }, { \"step\": 900, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.7378742216236305, \"RMSE\": 5.423968591721732, \"R2\": -8.358684442608821, \"Memory in Mb\": 0.013350486755371, \"Time in s\": 25.155795 }, { \"step\": 920, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.7058202733924563, \"RMSE\": 5.364926267836746, \"R2\": -8.30648673172487, \"Memory in Mb\": 0.013350486755371, \"Time in s\": 26.237104 }, { \"step\": 940, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.6771033983092818, \"RMSE\": 5.307938337650396, \"R2\": -8.187106100071555, \"Memory in Mb\": 0.013350486755371, \"Time in s\": 27.340904 }, { \"step\": 960, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.6470899020587193, \"RMSE\": 5.252615019014566, \"R2\": -8.09065943244505, \"Memory in Mb\": 0.013350486755371, \"Time in s\": 28.467203 }, { \"step\": 980, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.6179664800066769, \"RMSE\": 5.198922142498095, \"R2\": -8.078721464420347, \"Memory in Mb\": 0.013350486755371, \"Time in s\": 29.615952 }, { \"step\": 1000, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.591389315944533, \"RMSE\": 5.1470018023734205, \"R2\": -8.048080908594342, \"Memory in Mb\": 0.013350486755371, \"Time in s\": 30.787287 }, { \"step\": 11, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"ChickWeights\", \"MAE\": 4.664574314574316, \"RMSE\": 12.7079745317607, \"R2\": -206.87879383707747, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.002154 }, { \"step\": 22, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"ChickWeights\", \"MAE\": 2.767694704637076, \"RMSE\": 9.018587183866767, \"R2\": -85.14025986830408, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.005944 }, { \"step\": 33, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"ChickWeights\", \"MAE\": 2.3093367298127023, \"RMSE\": 7.420500566500976, \"R2\": -37.24267181629702, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.010037 }, { \"step\": 44, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"ChickWeights\", \"MAE\": 1.892363968348808, \"RMSE\": 6.441521936619904, \"R2\": -31.668094594906044, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.014469 }, { \"step\": 55, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"ChickWeights\", \"MAE\": 2.1129412159858934, \"RMSE\": 6.114058653243701, \"R2\": -6.297346571779499, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.019181 }, { \"step\": 66, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"ChickWeights\", \"MAE\": 2.832849782567835, \"RMSE\": 6.236602142425367, \"R2\": -2.2730130120415795, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.024182 }, { \"step\": 77, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"ChickWeights\", \"MAE\": 3.4069290990236856, \"RMSE\": 6.402381882180361, \"R2\": -1.3118663438824, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.029453 }, { \"step\": 88, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"ChickWeights\", \"MAE\": 3.650377971160808, \"RMSE\": 6.321189272940957, \"R2\": -1.043267371916866, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.034986 }, { \"step\": 99, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"ChickWeights\", \"MAE\": 4.035631404360372, \"RMSE\": 6.4483291916176695, \"R2\": -0.7783857772357967, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.040781 }, { \"step\": 110, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"ChickWeights\", \"MAE\": 4.693189868957898, \"RMSE\": 7.0697740144659305, \"R2\": -0.4927792786841307, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.046845 }, { \"step\": 121, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"ChickWeights\", \"MAE\": 5.274396860168236, \"RMSE\": 7.6542276724395, \"R2\": -0.3476225254437259, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.053171 }, { \"step\": 132, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"ChickWeights\", \"MAE\": 5.875758254207378, \"RMSE\": 8.194624755054596, \"R2\": -0.2624191661321591, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.05976 }, { \"step\": 143, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"ChickWeights\", \"MAE\": 6.530760796045927, \"RMSE\": 8.870097879563003, \"R2\": -0.1980355424044948, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.066612 }, { \"step\": 154, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"ChickWeights\", \"MAE\": 7.121466111912466, \"RMSE\": 9.458403141043558, \"R2\": -0.1577027852151795, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.073726 }, { \"step\": 165, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"ChickWeights\", \"MAE\": 7.772438504082036, \"RMSE\": 10.375670403553157, \"R2\": -0.1198999930450892, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.081109 }, { \"step\": 176, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"ChickWeights\", \"MAE\": 8.565827130563894, \"RMSE\": 11.410434180005833, \"R2\": -0.0920676568626532, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.088752 }, { \"step\": 187, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"ChickWeights\", \"MAE\": 9.429958588641576, \"RMSE\": 12.495061319237752, \"R2\": -0.0722153171628203, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.096665 }, { \"step\": 198, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"ChickWeights\", \"MAE\": 10.47731537859646, \"RMSE\": 13.900491647656429, \"R2\": -0.0555502703757588, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.104851 }, { \"step\": 209, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"ChickWeights\", \"MAE\": 11.43172675762076, \"RMSE\": 15.229123619635446, \"R2\": -0.0444565128716372, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.1133 }, { \"step\": 220, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"ChickWeights\", \"MAE\": 11.97432098008114, \"RMSE\": 16.22368260926648, \"R2\": -0.0377560869847111, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.122025 }, { \"step\": 231, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"ChickWeights\", \"MAE\": 12.9382196746461, \"RMSE\": 17.489503190785292, \"R2\": -0.0315781972827118, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.131017 }, { \"step\": 242, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"ChickWeights\", \"MAE\": 14.229204186206864, \"RMSE\": 19.43725798629848, \"R2\": -0.0252367718674193, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.140272 }, { \"step\": 253, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"ChickWeights\", \"MAE\": 15.339413196393396, \"RMSE\": 20.82023831254592, \"R2\": -0.0216497893038387, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.149788 }, { \"step\": 264, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"ChickWeights\", \"MAE\": 15.948617107030818, \"RMSE\": 21.75817315507082, \"R2\": -0.0194401851240946, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.159572 }, { \"step\": 275, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"ChickWeights\", \"MAE\": 16.794155127707494, \"RMSE\": 23.16724301729152, \"R2\": -0.0169996193237813, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.169617 }, { \"step\": 286, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"ChickWeights\", \"MAE\": 17.990009992534457, \"RMSE\": 24.865985915258104, \"R2\": -0.0147547133955299, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.17992 }, { \"step\": 297, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"ChickWeights\", \"MAE\": 19.34919450213405, \"RMSE\": 26.67620929760368, \"R2\": -0.0128904565600072, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.190491 }, { \"step\": 308, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"ChickWeights\", \"MAE\": 20.46881241431745, \"RMSE\": 28.248013022827838, \"R2\": -0.011537481517321, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.201321 }, { \"step\": 319, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"ChickWeights\", \"MAE\": 20.993702124162965, \"RMSE\": 29.63814114349949, \"R2\": -0.0105036731193923, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.212414 }, { \"step\": 330, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"ChickWeights\", \"MAE\": 22.586872779548436, \"RMSE\": 32.01796640002603, \"R2\": -0.0092202379520505, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.223765 }, { \"step\": 341, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"ChickWeights\", \"MAE\": 23.97345887210737, \"RMSE\": 33.821533603903084, \"R2\": -0.0083877019037323, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.235368 }, { \"step\": 352, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"ChickWeights\", \"MAE\": 25.315991788770976, \"RMSE\": 35.461698606860665, \"R2\": -0.0077313021586467, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.247225 }, { \"step\": 363, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"ChickWeights\", \"MAE\": 25.615062978866305, \"RMSE\": 35.981300981590465, \"R2\": -0.0074437490312051, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.259337 }, { \"step\": 374, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"ChickWeights\", \"MAE\": 26.673321526932543, \"RMSE\": 37.51836715700961, \"R2\": -0.0069358461242559, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.271713 }, { \"step\": 385, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"ChickWeights\", \"MAE\": 28.27694482780972, \"RMSE\": 39.8753298933956, \"R2\": -0.0063325109838794, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.2843549999999999 }, { \"step\": 396, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"ChickWeights\", \"MAE\": 29.55612496209691, \"RMSE\": 41.28848705945016, \"R2\": -0.0059801818919071, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.2972059999999999 }, { \"step\": 407, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"ChickWeights\", \"MAE\": 30.56167711268285, \"RMSE\": 42.81802042618151, \"R2\": -0.0056467231500465, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.3102509999999999 }, { \"step\": 418, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"ChickWeights\", \"MAE\": 31.39346669137945, \"RMSE\": 44.18765357092498, \"R2\": -0.0053697143301307, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.3234889999999999 }, { \"step\": 429, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"ChickWeights\", \"MAE\": 33.10612890637694, \"RMSE\": 46.865579751152914, \"R2\": -0.0049663660706051, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.3369199999999999 }, { \"step\": 440, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"ChickWeights\", \"MAE\": 34.54914638861108, \"RMSE\": 48.61167278858254, \"R2\": -0.0047161238549726, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.3505489999999999 }, { \"step\": 451, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"ChickWeights\", \"MAE\": 35.43263419295921, \"RMSE\": 49.67507127970072, \"R2\": -0.0045536938071879, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.3643729999999999 }, { \"step\": 462, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"ChickWeights\", \"MAE\": 36.308550382896186, \"RMSE\": 51.2507761435036, \"R2\": -0.0043573774895468, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.3783909999999999 }, { \"step\": 473, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"ChickWeights\", \"MAE\": 38.26330298063241, \"RMSE\": 54.53225049728104, \"R2\": -0.0040516612048955, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.3926029999999999 }, { \"step\": 484, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"ChickWeights\", \"MAE\": 39.59866234800828, \"RMSE\": 56.08659790201894, \"R2\": -0.0039023944795495, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.4070059999999999 }, { \"step\": 495, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"ChickWeights\", \"MAE\": 40.94697327298068, \"RMSE\": 57.823326559810994, \"R2\": -0.0037535911132069, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.4215999999999999 }, { \"step\": 506, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"ChickWeights\", \"MAE\": 41.42384714758024, \"RMSE\": 58.67984594201592, \"R2\": -0.0036652347211194, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.4363849999999999 }, { \"step\": 517, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"ChickWeights\", \"MAE\": 42.72663002099646, \"RMSE\": 60.40151056768402, \"R2\": -0.0035345422299792, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.4513639999999999 }, { \"step\": 528, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"ChickWeights\", \"MAE\": 44.77321528369677, \"RMSE\": 63.69509749878913, \"R2\": -0.0033415055563215, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.4665329999999999 }, { \"step\": 539, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"ChickWeights\", \"MAE\": 45.99579764939489, \"RMSE\": 65.0494992510053, \"R2\": -0.003252609562637, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.4818929999999999 }, { \"step\": 550, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"ChickWeights\", \"MAE\": 46.57020777663759, \"RMSE\": 66.07332710234044, \"R2\": -0.0031815200825582, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.4974459999999999 }, { \"step\": 561, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"ChickWeights\", \"MAE\": 47.75825760640621, \"RMSE\": 67.5643396193493, \"R2\": -0.0030950009187136, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.5131919999999999 }, { \"step\": 572, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"ChickWeights\", \"MAE\": 49.49138874897682, \"RMSE\": 70.24569214117749, \"R2\": -0.0029719424061886, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.5291269999999999 }, { \"step\": 20, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.695184981652336, \"RMSE\": 9.807184976514188, \"R2\": -224.6021011118197, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.003379 }, { \"step\": 40, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.3994713447037435, \"RMSE\": 7.102066178895935, \"R2\": -19.27845129783118, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.0081789999999999 }, { \"step\": 60, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.8170744682035584, \"RMSE\": 5.815253847056423, \"R2\": -17.329373299766118, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.0135909999999999 }, { \"step\": 80, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.604995404573344, \"RMSE\": 5.081770494168446, \"R2\": -13.040545957103586, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.0195849999999999 }, { \"step\": 100, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.824259078948539, \"RMSE\": 4.70488333223354, \"R2\": -6.5512954222403845, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.0262549999999999 }, { \"step\": 120, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.918744608116588, \"RMSE\": 4.412336880489357, \"R2\": -4.634185300646759, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.033502 }, { \"step\": 140, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.8761207739327503, \"RMSE\": 4.13187920011476, \"R2\": -4.105616799680584, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.041316 }, { \"step\": 160, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.961232939518506, \"RMSE\": 3.976173487274506, \"R2\": -3.1695661963674864, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.0496949999999999 }, { \"step\": 180, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.066134597500757, \"RMSE\": 3.873731518767916, \"R2\": -2.4756944369169624, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.0586379999999999 }, { \"step\": 200, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.051125997923389, \"RMSE\": 3.731810291394655, \"R2\": -2.23527456693896, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.0682329999999999 }, { \"step\": 220, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.94095193468414, \"RMSE\": 3.56902990398404, \"R2\": -2.19210047340805, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.078397 }, { \"step\": 240, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.9366756524315063, \"RMSE\": 3.4612902974772624, \"R2\": -2.024876884626847, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.089124 }, { \"step\": 260, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.9250039777458068, \"RMSE\": 3.363327951159923, \"R2\": -1.8945640461454525, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.1004139999999999 }, { \"step\": 280, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.8726934920539136, \"RMSE\": 3.257010428159885, \"R2\": -1.8420037280027224, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.11227 }, { \"step\": 300, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.8907476896224935, \"RMSE\": 3.1958821895815714, \"R2\": -1.6910252267675163, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.124747 }, { \"step\": 320, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.819623890420079, \"RMSE\": 3.103812605138666, \"R2\": -1.663886258690169, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.137792 }, { \"step\": 340, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.7396293145937214, \"RMSE\": 3.014220627768389, \"R2\": -1.654906383755708, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.151284 }, { \"step\": 360, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.7350691203787965, \"RMSE\": 2.9569384317632506, \"R2\": -1.5759385016835008, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.165202 }, { \"step\": 380, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.6987131960417108, \"RMSE\": 2.8893997308323693, \"R2\": -1.5446951110541192, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.179547 }, { \"step\": 400, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.673610627740774, \"RMSE\": 2.82935583501861, \"R2\": -1.5089937655143242, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.194363 }, { \"step\": 420, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.6410137122925974, \"RMSE\": 2.7701802079251965, \"R2\": -1.484737486096575, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.209602 }, { \"step\": 440, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.6565972573555454, \"RMSE\": 2.7427790467379385, \"R2\": -1.391750010744973, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.225268 }, { \"step\": 460, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.699464840115161, \"RMSE\": 2.73946740401384, \"R2\": -1.2626191030939884, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.241356 }, { \"step\": 480, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.7224824441896145, \"RMSE\": 2.7219018737730583, \"R2\": -1.182307732575659, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.257864 }, { \"step\": 500, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.7446092142173422, \"RMSE\": 2.70580354422956, \"R2\": -1.1113262021905803, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.274789 }, { \"step\": 520, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.7464998751860934, \"RMSE\": 2.677192702589883, \"R2\": -1.0705208906620065, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.292197 }, { \"step\": 540, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.7535492786865423, \"RMSE\": 2.653885630983747, \"R2\": -1.027170706279252, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.310025 }, { \"step\": 560, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.7201019899937544, \"RMSE\": 2.614359234374483, \"R2\": -1.0141103337708768, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.328271 }, { \"step\": 580, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.6887559504032663, \"RMSE\": 2.5757257291728384, \"R2\": -1.0033760803823184, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.346933 }, { \"step\": 600, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.701917368353294, \"RMSE\": 2.561424763732869, \"R2\": -0.9592753712060648, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.366058 }, { \"step\": 620, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.7178157166185173, \"RMSE\": 2.551346895968156, \"R2\": -0.9142580419512064, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.38561 }, { \"step\": 640, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.7365901196485038, \"RMSE\": 2.545046385321895, \"R2\": -0.8692105635365064, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.405582 }, { \"step\": 660, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.7465677425181807, \"RMSE\": 2.532051562790666, \"R2\": -0.8368676529707118, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.425968 }, { \"step\": 680, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.731617734826669, \"RMSE\": 2.504226186170861, \"R2\": -0.8251107974736909, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.446783 }, { \"step\": 700, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.6973720107412231, \"RMSE\": 2.47026789197972, \"R2\": -0.8225927549994396, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.468073 }, { \"step\": 720, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.6698372433333928, \"RMSE\": 2.4400355004771077, \"R2\": -0.81732226470892, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.489788 }, { \"step\": 740, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.6732482399922957, \"RMSE\": 2.425592833263792, \"R2\": -0.7947920429290933, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.511921 }, { \"step\": 760, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.6653913599894004, \"RMSE\": 2.404136439714782, \"R2\": -0.7822814452716051, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.53447 }, { \"step\": 780, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.6644612180457288, \"RMSE\": 2.387561393188575, \"R2\": -0.7656652158374817, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.557436 }, { \"step\": 800, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.6556359332933146, \"RMSE\": 2.368497267913513, \"R2\": -0.7532954885990883, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.580851 }, { \"step\": 820, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.6452077788467467, \"RMSE\": 2.348678653798561, \"R2\": -0.7430103139622937, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.604682 }, { \"step\": 840, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.6374623223784903, \"RMSE\": 2.3305035344735936, \"R2\": -0.7320713255917544, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.6289220000000001 }, { \"step\": 860, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.6419505315856449, \"RMSE\": 2.320208013716276, \"R2\": -0.7138439732116804, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.653572 }, { \"step\": 880, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.6490002164922652, \"RMSE\": 2.3126155324510744, \"R2\": -0.6941855677649247, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.6786340000000001 }, { \"step\": 900, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.6474991175923384, \"RMSE\": 2.299197536504521, \"R2\": -0.6816400531907807, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.704114 }, { \"step\": 920, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.6301006788336792, \"RMSE\": 2.2779225390149764, \"R2\": -0.6777843948800273, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.730046 }, { \"step\": 940, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.6221876471839871, \"RMSE\": 2.262378737250057, \"R2\": -0.6690049120995847, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.7563869999999999 }, { \"step\": 960, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.6124120493571743, \"RMSE\": 2.245866476718547, \"R2\": -0.6619276404267609, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.7831509999999999 }, { \"step\": 980, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.5867001120604314, \"RMSE\": 2.223758235975506, \"R2\": -0.661013659831075, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.810322 }, { \"step\": 1000, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.5681359363812415, \"RMSE\": 2.2037391763141216, \"R2\": -0.6587014308970958, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.8379 } ] }, \"params\": [ { \"name\": \"models\", \"select\": { \"type\": \"point\", \"fields\": [ \"model\" ] }, \"bind\": \"legend\" }, { \"name\": \"Dataset\", \"value\": \"ChickWeights\", \"bind\": { \"input\": \"select\", \"options\": [ \"ChickWeights\", \"TrumpApproval\" ] } }, { \"name\": \"grid\", \"select\": \"interval\", \"bind\": \"scales\" } ], \"transform\": [ { \"filter\": { \"field\": \"dataset\", \"equal\": { \"expr\": \"Dataset\" } } } ], \"repeat\": { \"row\": [ \"MAE\", \"RMSE\", \"R2\", \"Memory in Mb\", \"Time in s\" ] }, \"spec\": { \"width\": \"container\", \"mark\": \"line\", \"encoding\": { \"x\": { \"field\": \"step\", \"type\": \"quantitative\", \"axis\": { \"titleFontSize\": 18, \"labelFontSize\": 18, \"title\": \"Instance\" } }, \"y\": { \"field\": { \"repeat\": \"row\" }, \"type\": \"quantitative\", \"axis\": { \"titleFontSize\": 18, \"labelFontSize\": 18 } }, \"color\": { \"field\": \"model\", \"type\": \"ordinal\", \"scale\": { \"scheme\": \"category20b\" }, \"title\": \"Models\", \"legend\": { \"titleFontSize\": 18, \"labelFontSize\": 18, \"labelLimit\": 500 } }, \"opacity\": { \"condition\": { \"param\": \"models\", \"value\": 1 }, \"value\": 0.2 } } } }

    "},{"location":"benchmarks/Regression/#datasets","title":"Datasets","text":"ChickWeights

    Chick weights along time.

    The stream contains 578 items and 3 features. The goal is to predict the weight of each chick along time, according to the diet the chick is on. The data is ordered by time and then by chick.

    Name  ChickWeights                                                 \nTask  Regression\n

    Samples 578 Features 3 Sparse False Path /home/kulbach/projects/river/river/datasets/chick-weights.csv

    TrumpApproval

    Donald Trump approval ratings.

    This dataset was obtained by reshaping the data used by FiveThirtyEight for analyzing Donald Trump's approval ratings. It contains 5 features, which are approval ratings collected by 5 polling agencies. The target is the approval rating from FiveThirtyEight's model. The goal of this task is to see if we can reproduce FiveThirtyEight's model.

    Name  TrumpApproval                                                    \nTask  Regression\n

    Samples 1,001 Features 6 Sparse False Path /home/kulbach/projects/river/river/datasets/trump_approval.csv.gz

    "},{"location":"benchmarks/Regression/#models","title":"Models","text":"Linear Regression

    Pipeline (\n  StandardScaler (\n    with_std=True\n  ),\n  LinearRegression (\n    optimizer=SGD (\n      lr=Constant (\n        learning_rate=0.01\n      )\n    )\n    loss=Squared ()\n    l2=0.\n    l1=0.\n    intercept_init=0.\n    intercept_lr=Constant (\n      learning_rate=0.01\n    )\n    clip_gradient=1e+12\n    initializer=Zeros ()\n  )\n)

    Linear Regression with l1 regularization

    Pipeline (\n  StandardScaler (\n    with_std=True\n  ),\n  LinearRegression (\n    optimizer=SGD (\n      lr=Constant (\n        learning_rate=0.01\n      )\n    )\n    loss=Squared ()\n    l2=0.\n    l1=1.\n    intercept_init=0.\n    intercept_lr=Constant (\n      learning_rate=0.01\n    )\n    clip_gradient=1e+12\n    initializer=Zeros ()\n  )\n)

    Linear Regression with l2 regularization

    Pipeline (\n  StandardScaler (\n    with_std=True\n  ),\n  LinearRegression (\n    optimizer=SGD (\n      lr=Constant (\n        learning_rate=0.01\n      )\n    )\n    loss=Squared ()\n    l2=1.\n    l1=0.\n    intercept_init=0.\n    intercept_lr=Constant (\n      learning_rate=0.01\n    )\n    clip_gradient=1e+12\n    initializer=Zeros ()\n  )\n)

    Passive-Aggressive Regressor, mode 1

    Pipeline (\n  StandardScaler (\n    with_std=True\n  ),\n  PARegressor (\n    C=1.\n    mode=1\n    eps=0.1\n    learn_intercept=True\n  )\n)

    Passive-Aggressive Regressor, mode 2

    Pipeline (\n  StandardScaler (\n    with_std=True\n  ),\n  PARegressor (\n    C=1.\n    mode=2\n    eps=0.1\n    learn_intercept=True\n  )\n)

    k-Nearest Neighbors

    Pipeline (\n  StandardScaler (\n    with_std=True\n  ),\n  KNNRegressor (\n    n_neighbors=5\n    window_size=100\n    aggregation_method=\"mean\"\n    min_distance_keep=0.\n    distance_func=functools.partial(, p=2)\n  )\n)\n

    \nHoeffding Tree\n

    Pipeline (\n  StandardScaler (\n    with_std=True\n  ),\n  HoeffdingTreeRegressor (\n    grace_period=200\n    max_depth=inf\n    delta=1e-07\n    tau=0.05\n    leaf_prediction=\"adaptive\"\n    leaf_model=LinearRegression (\n      optimizer=SGD (\n        lr=Constant (\n          learning_rate=0.01\n        )\n      )\n      loss=Squared ()\n      l2=0.\n      l1=0.\n      intercept_init=0.\n      intercept_lr=Constant (\n        learning_rate=0.01\n      )\n      clip_gradient=1e+12\n      initializer=Zeros ()\n    )\n    model_selector_decay=0.95\n    nominal_attributes=None\n    splitter=TEBSTSplitter (\n      digits=1\n    )\n    min_samples_split=5\n    binary_split=False\n    max_size=500.\n    memory_estimate_period=1000000\n    stop_mem_management=False\n    remove_poor_attrs=False\n    merit_preprune=True\n  )\n)

    \n

    \nHoeffding Adaptive Tree\n

    Pipeline (\n  StandardScaler (\n    with_std=True\n  ),\n  HoeffdingAdaptiveTreeRegressor (\n    grace_period=200\n    max_depth=inf\n    delta=1e-07\n    tau=0.05\n    leaf_prediction=\"adaptive\"\n    leaf_model=LinearRegression (\n      optimizer=SGD (\n        lr=Constant (\n          learning_rate=0.01\n        )\n      )\n      loss=Squared ()\n      l2=0.\n      l1=0.\n      intercept_init=0.\n      intercept_lr=Constant (\n        learning_rate=0.01\n      )\n      clip_gradient=1e+12\n      initializer=Zeros ()\n    )\n    model_selector_decay=0.95\n    nominal_attributes=None\n    splitter=TEBSTSplitter (\n      digits=1\n    )\n    min_samples_split=5\n    bootstrap_sampling=True\n    drift_window_threshold=300\n    drift_detector=ADWIN (\n      delta=0.002\n      clock=32\n      max_buckets=5\n      min_window_length=5\n      grace_period=10\n    )\n    switch_significance=0.05\n    binary_split=False\n    max_size=500.\n    memory_estimate_period=1000000\n    stop_mem_management=False\n    remove_poor_attrs=False\n    merit_preprune=True\n    seed=42\n  )\n)

    \n

    \nStochastic Gradient Tree\n

    SGTRegressor (\n  delta=1e-07\n  grace_period=200\n  init_pred=0.\n  max_depth=inf\n  lambda_value=0.1\n  gamma=1.\n  nominal_attributes=[]\n  feature_quantizer=StaticQuantizer (\n    n_bins=64\n    warm_start=100\n    buckets=None\n  )\n)

    \n

    \nAdaptive Random Forest\n

    Pipeline (\n  StandardScaler (\n    with_std=True\n  ),\n  []\n)

    \n

    \nAdaptive Model Rules\n

    Pipeline (\n  StandardScaler (\n    with_std=True\n  ),\n  AMRules (\n    n_min=200\n    delta=1e-07\n    tau=0.05\n    pred_type=\"adaptive\"\n    pred_model=LinearRegression (\n      optimizer=SGD (\n        lr=Constant (\n          learning_rate=0.01\n        )\n      )\n      loss=Squared ()\n      l2=0.\n      l1=0.\n      intercept_init=0.\n      intercept_lr=Constant (\n        learning_rate=0.01\n      )\n      clip_gradient=1e+12\n      initializer=Zeros ()\n    )\n    splitter=TEBSTSplitter (\n      digits=1\n    )\n    drift_detector=ADWIN (\n      delta=0.002\n      clock=32\n      max_buckets=5\n      min_window_length=5\n      grace_period=10\n    )\n    fading_factor=0.99\n    anomaly_threshold=-0.75\n    m_min=30\n    ordered_rule_set=True\n    min_samples_split=5\n  )\n)

    \n

    \nStreaming Random Patches\n

    Pipeline (\n  StandardScaler (\n    with_std=True\n  ),\n  SRPRegressor (\n    model=HoeffdingTreeRegressor (\n      grace_period=50\n      max_depth=inf\n      delta=0.01\n      tau=0.05\n      leaf_prediction=\"adaptive\"\n      leaf_model=LinearRegression (\n        optimizer=SGD (\n          lr=Constant (\n            learning_rate=0.01\n          )\n        )\n        loss=Squared ()\n        l2=0.\n        l1=0.\n        intercept_init=0.\n        intercept_lr=Constant (\n          learning_rate=0.01\n        )\n        clip_gradient=1e+12\n        initializer=Zeros ()\n      )\n      model_selector_decay=0.95\n      nominal_attributes=None\n      splitter=TEBSTSplitter (\n        digits=1\n      )\n      min_samples_split=5\n      binary_split=False\n      max_size=500.\n      memory_estimate_period=1000000\n      stop_mem_management=False\n      remove_poor_attrs=False\n      merit_preprune=True\n    )\n    n_models=10\n    subspace_size=0.6\n    training_method=\"patches\"\n    lam=6\n    drift_detector=ADWIN (\n      delta=1e-05\n      clock=32\n      max_buckets=5\n      min_window_length=5\n      grace_period=10\n    )\n    warning_detector=ADWIN (\n      delta=0.0001\n      clock=32\n      max_buckets=5\n      min_window_length=5\n      grace_period=10\n    )\n    disable_detector=\"off\"\n    disable_weighted_vote=True\n    drift_detection_criteria=\"error\"\n    aggregation_method=\"mean\"\n    seed=42\n    metric=MAE ()\n  )\n)

    \n

    \nBagging\n

    Pipeline (\n  StandardScaler (\n    with_std=True\n  ),\n  [HoeffdingAdaptiveTreeRegressor (\n    grace_period=200\n    max_depth=inf\n    delta=1e-07\n    tau=0.05\n    leaf_prediction=\"adaptive\"\n    leaf_model=LinearRegression (\n      optimizer=SGD (\n        lr=Constant (\n          learning_rate=0.01\n        )\n      )\n      loss=Squared ()\n      l2=0.\n      l1=0.\n      intercept_init=0.\n      intercept_lr=Constant (\n        learning_rate=0.01\n      )\n      clip_gradient=1e+12\n      initializer=Zeros ()\n    )\n    model_selector_decay=0.95\n    nominal_attributes=None\n    splitter=TEBSTSplitter (\n      digits=1\n    )\n    min_samples_split=5\n    bootstrap_sampling=False\n    drift_window_threshold=300\n    drift_detector=ADWIN (\n      delta=0.002\n      clock=32\n      max_buckets=5\n      min_window_length=5\n      grace_period=10\n    )\n    switch_significance=0.05\n    binary_split=False\n    max_size=500.\n    memory_estimate_period=1000000\n    stop_mem_management=False\n    remove_poor_attrs=False\n    merit_preprune=True\n    seed=None\n  ), HoeffdingAdaptiveTreeRegressor (\n    grace_period=200\n    max_depth=inf\n    delta=1e-07\n    tau=0.05\n    leaf_prediction=\"adaptive\"\n    leaf_model=LinearRegression (\n      optimizer=SGD (\n        lr=Constant (\n          learning_rate=0.01\n        )\n      )\n      loss=Squared ()\n      l2=0.\n      l1=0.\n      intercept_init=0.\n      intercept_lr=Constant (\n        learning_rate=0.01\n      )\n      clip_gradient=1e+12\n      initializer=Zeros ()\n    )\n    model_selector_decay=0.95\n    nominal_attributes=None\n    splitter=TEBSTSplitter (\n      digits=1\n    )\n    min_samples_split=5\n    bootstrap_sampling=False\n    drift_window_threshold=300\n    drift_detector=ADWIN (\n      delta=0.002\n      clock=32\n      max_buckets=5\n      min_window_length=5\n      grace_period=10\n    )\n    switch_significance=0.05\n    binary_split=False\n    max_size=500.\n    memory_estimate_period=1000000\n    stop_mem_management=False\n    remove_poor_attrs=False\n    merit_preprune=True\n    seed=None\n  ), HoeffdingAdaptiveTreeRegressor (\n    grace_period=200\n    max_depth=inf\n    delta=1e-07\n    tau=0.05\n    leaf_prediction=\"adaptive\"\n    leaf_model=LinearRegression (\n      optimizer=SGD (\n        lr=Constant (\n          learning_rate=0.01\n        )\n      )\n      loss=Squared ()\n      l2=0.\n      l1=0.\n      intercept_init=0.\n      intercept_lr=Constant (\n        learning_rate=0.01\n      )\n      clip_gradient=1e+12\n      initializer=Zeros ()\n    )\n    model_selector_decay=0.95\n    nominal_attributes=None\n    splitter=TEBSTSplitter (\n      digits=1\n    )\n    min_samples_split=5\n    bootstrap_sampling=False\n    drift_window_threshold=300\n    drift_detector=ADWIN (\n      delta=0.002\n      clock=32\n      max_buckets=5\n      min_window_length=5\n      grace_period=10\n    )\n    switch_significance=0.05\n    binary_split=False\n    max_size=500.\n    memory_estimate_period=1000000\n    stop_mem_management=False\n    remove_poor_attrs=False\n    merit_preprune=True\n    seed=None\n  ), HoeffdingAdaptiveTreeRegressor (\n    grace_period=200\n    max_depth=inf\n    delta=1e-07\n    tau=0.05\n    leaf_prediction=\"adaptive\"\n    leaf_model=LinearRegression (\n      optimizer=SGD (\n        lr=Constant (\n          learning_rate=0.01\n        )\n      )\n      loss=Squared ()\n      l2=0.\n      l1=0.\n      intercept_init=0.\n      intercept_lr=Constant (\n        learning_rate=0.01\n      )\n      clip_gradient=1e+12\n      initializer=Zeros ()\n    )\n    model_selector_decay=0.95\n    nominal_attributes=None\n    splitter=TEBSTSplitter (\n      digits=1\n    )\n    min_samples_split=5\n    bootstrap_sampling=False\n    drift_window_threshold=300\n    drift_detector=ADWIN (\n      delta=0.002\n      clock=32\n      max_buckets=5\n      min_window_length=5\n      grace_period=10\n    )\n    switch_significance=0.05\n    binary_split=False\n    max_size=500.\n    memory_estimate_period=1000000\n    stop_mem_management=False\n    remove_poor_attrs=False\n    merit_preprune=True\n    seed=None\n  ), HoeffdingAdaptiveTreeRegressor (\n    grace_period=200\n    max_depth=inf\n    delta=1e-07\n    tau=0.05\n    leaf_prediction=\"adaptive\"\n    leaf_model=LinearRegression (\n      optimizer=SGD (\n        lr=Constant (\n          learning_rate=0.01\n        )\n      )\n      loss=Squared ()\n      l2=0.\n      l1=0.\n      intercept_init=0.\n      intercept_lr=Constant (\n        learning_rate=0.01\n      )\n      clip_gradient=1e+12\n      initializer=Zeros ()\n    )\n    model_selector_decay=0.95\n    nominal_attributes=None\n    splitter=TEBSTSplitter (\n      digits=1\n    )\n    min_samples_split=5\n    bootstrap_sampling=False\n    drift_window_threshold=300\n    drift_detector=ADWIN (\n      delta=0.002\n      clock=32\n      max_buckets=5\n      min_window_length=5\n      grace_period=10\n    )\n    switch_significance=0.05\n    binary_split=False\n    max_size=500.\n    memory_estimate_period=1000000\n    stop_mem_management=False\n    remove_poor_attrs=False\n    merit_preprune=True\n    seed=None\n  ), HoeffdingAdaptiveTreeRegressor (\n    grace_period=200\n    max_depth=inf\n    delta=1e-07\n    tau=0.05\n    leaf_prediction=\"adaptive\"\n    leaf_model=LinearRegression (\n      optimizer=SGD (\n        lr=Constant (\n          learning_rate=0.01\n        )\n      )\n      loss=Squared ()\n      l2=0.\n      l1=0.\n      intercept_init=0.\n      intercept_lr=Constant (\n        learning_rate=0.01\n      )\n      clip_gradient=1e+12\n      initializer=Zeros ()\n    )\n    model_selector_decay=0.95\n    nominal_attributes=None\n    splitter=TEBSTSplitter (\n      digits=1\n    )\n    min_samples_split=5\n    bootstrap_sampling=False\n    drift_window_threshold=300\n    drift_detector=ADWIN (\n      delta=0.002\n      clock=32\n      max_buckets=5\n      min_window_length=5\n      grace_period=10\n    )\n    switch_significance=0.05\n    binary_split=False\n    max_size=500.\n    memory_estimate_period=1000000\n    stop_mem_management=False\n    remove_poor_attrs=False\n    merit_preprune=True\n    seed=None\n  ), HoeffdingAdaptiveTreeRegressor (\n    grace_period=200\n    max_depth=inf\n    delta=1e-07\n    tau=0.05\n    leaf_prediction=\"adaptive\"\n    leaf_model=LinearRegression (\n      optimizer=SGD (\n        lr=Constant (\n          learning_rate=0.01\n        )\n      )\n      loss=Squared ()\n      l2=0.\n      l1=0.\n      intercept_init=0.\n      intercept_lr=Constant (\n        learning_rate=0.01\n      )\n      clip_gradient=1e+12\n      initializer=Zeros ()\n    )\n    model_selector_decay=0.95\n    nominal_attributes=None\n    splitter=TEBSTSplitter (\n      digits=1\n    )\n    min_samples_split=5\n    bootstrap_sampling=False\n    drift_window_threshold=300\n    drift_detector=ADWIN (\n      delta=0.002\n      clock=32\n      max_buckets=5\n      min_window_length=5\n      grace_period=10\n    )\n    switch_significance=0.05\n    binary_split=False\n    max_size=500.\n    memory_estimate_period=1000000\n    stop_mem_management=False\n    remove_poor_attrs=False\n    merit_preprune=True\n    seed=None\n  ), HoeffdingAdaptiveTreeRegressor (\n    grace_period=200\n    max_depth=inf\n    delta=1e-07\n    tau=0.05\n    leaf_prediction=\"adaptive\"\n    leaf_model=LinearRegression (\n      optimizer=SGD (\n        lr=Constant (\n          learning_rate=0.01\n        )\n      )\n      loss=Squared ()\n      l2=0.\n      l1=0.\n      intercept_init=0.\n      intercept_lr=Constant (\n        learning_rate=0.01\n      )\n      clip_gradient=1e+12\n      initializer=Zeros ()\n    )\n    model_selector_decay=0.95\n    nominal_attributes=None\n    splitter=TEBSTSplitter (\n      digits=1\n    )\n    min_samples_split=5\n    bootstrap_sampling=False\n    drift_window_threshold=300\n    drift_detector=ADWIN (\n      delta=0.002\n      clock=32\n      max_buckets=5\n      min_window_length=5\n      grace_period=10\n    )\n    switch_significance=0.05\n    binary_split=False\n    max_size=500.\n    memory_estimate_period=1000000\n    stop_mem_management=False\n    remove_poor_attrs=False\n    merit_preprune=True\n    seed=None\n  ), HoeffdingAdaptiveTreeRegressor (\n    grace_period=200\n    max_depth=inf\n    delta=1e-07\n    tau=0.05\n    leaf_prediction=\"adaptive\"\n    leaf_model=LinearRegression (\n      optimizer=SGD (\n        lr=Constant (\n          learning_rate=0.01\n        )\n      )\n      loss=Squared ()\n      l2=0.\n      l1=0.\n      intercept_init=0.\n      intercept_lr=Constant (\n        learning_rate=0.01\n      )\n      clip_gradient=1e+12\n      initializer=Zeros ()\n    )\n    model_selector_decay=0.95\n    nominal_attributes=None\n    splitter=TEBSTSplitter (\n      digits=1\n    )\n    min_samples_split=5\n    bootstrap_sampling=False\n    drift_window_threshold=300\n    drift_detector=ADWIN (\n      delta=0.002\n      clock=32\n      max_buckets=5\n      min_window_length=5\n      grace_period=10\n    )\n    switch_significance=0.05\n    binary_split=False\n    max_size=500.\n    memory_estimate_period=1000000\n    stop_mem_management=False\n    remove_poor_attrs=False\n    merit_preprune=True\n    seed=None\n  ), HoeffdingAdaptiveTreeRegressor (\n    grace_period=200\n    max_depth=inf\n    delta=1e-07\n    tau=0.05\n    leaf_prediction=\"adaptive\"\n    leaf_model=LinearRegression (\n      optimizer=SGD (\n        lr=Constant (\n          learning_rate=0.01\n        )\n      )\n      loss=Squared ()\n      l2=0.\n      l1=0.\n      intercept_init=0.\n      intercept_lr=Constant (\n        learning_rate=0.01\n      )\n      clip_gradient=1e+12\n      initializer=Zeros ()\n    )\n    model_selector_decay=0.95\n    nominal_attributes=None\n    splitter=TEBSTSplitter (\n      digits=1\n    )\n    min_samples_split=5\n    bootstrap_sampling=False\n    drift_window_threshold=300\n    drift_detector=ADWIN (\n      delta=0.002\n      clock=32\n      max_buckets=5\n      min_window_length=5\n      grace_period=10\n    )\n    switch_significance=0.05\n    binary_split=False\n    max_size=500.\n    memory_estimate_period=1000000\n    stop_mem_management=False\n    remove_poor_attrs=False\n    merit_preprune=True\n    seed=None\n  )]\n)

    \n

    \nExponentially Weighted Average\n

    Pipeline (\n  StandardScaler (\n    with_std=True\n  ),\n  [LinearRegression (\n    optimizer=SGD (\n      lr=Constant (\n        learning_rate=0.01\n      )\n    )\n    loss=Squared ()\n    l2=0.\n    l1=0.\n    intercept_init=0.\n    intercept_lr=Constant (\n      learning_rate=0.01\n    )\n    clip_gradient=1e+12\n    initializer=Zeros ()\n  ), HoeffdingAdaptiveTreeRegressor (\n    grace_period=200\n    max_depth=inf\n    delta=1e-07\n    tau=0.05\n    leaf_prediction=\"adaptive\"\n    leaf_model=LinearRegression (\n      optimizer=SGD (\n        lr=Constant (\n          learning_rate=0.01\n        )\n      )\n      loss=Squared ()\n      l2=0.\n      l1=0.\n      intercept_init=0.\n      intercept_lr=Constant (\n        learning_rate=0.01\n      )\n      clip_gradient=1e+12\n      initializer=Zeros ()\n    )\n    model_selector_decay=0.95\n    nominal_attributes=None\n    splitter=TEBSTSplitter (\n      digits=1\n    )\n    min_samples_split=5\n    bootstrap_sampling=True\n    drift_window_threshold=300\n    drift_detector=ADWIN (\n      delta=0.002\n      clock=32\n      max_buckets=5\n      min_window_length=5\n      grace_period=10\n    )\n    switch_significance=0.05\n    binary_split=False\n    max_size=500.\n    memory_estimate_period=1000000\n    stop_mem_management=False\n    remove_poor_attrs=False\n    merit_preprune=True\n    seed=None\n  ), KNNRegressor (\n    n_neighbors=5\n    window_size=100\n    aggregation_method=\"mean\"\n    min_distance_keep=0.\n    distance_func=functools.partial(, p=2)\n  ), AMRules (\n    n_min=200\n    delta=1e-07\n    tau=0.05\n    pred_type=\"adaptive\"\n    pred_model=LinearRegression (\n      optimizer=SGD (\n        lr=Constant (\n          learning_rate=0.01\n        )\n      )\n      loss=Squared ()\n      l2=0.\n      l1=0.\n      intercept_init=0.\n      intercept_lr=Constant (\n        learning_rate=0.01\n      )\n      clip_gradient=1e+12\n      initializer=Zeros ()\n    )\n    splitter=TEBSTSplitter (\n      digits=1\n    )\n    drift_detector=ADWIN (\n      delta=0.002\n      clock=32\n      max_buckets=5\n      min_window_length=5\n      grace_period=10\n    )\n    fading_factor=0.99\n    anomaly_threshold=-0.75\n    m_min=30\n    ordered_rule_set=True\n    min_samples_split=5\n  )]\n)\n

    \nRiver MLP\n

    Pipeline (\n  StandardScaler (\n    with_std=True\n  ),\n  MLPRegressor (\n    hidden_dims=(5,)\n    activations=(, , )\n    loss=Squared ()\n    optimizer=SGD (\n      lr=Constant (\n        learning_rate=0.001\n      )\n    )\n    seed=42\n  )\n)\n

    \n[baseline] Mean predictor\n

    StatisticRegressor (\n  statistic=Mean ()\n)

    \n

    "},{"location":"benchmarks/Regression/#environment","title":"Environment","text":"
    Python implementation: CPython\nPython version       : 3.11.5\nIPython version      : 8.15.0\n\nriver       : 0.19.0\nnumpy       : 1.25.2\nscikit-learn: 1.3.0\npandas      : 2.1.0\nscipy       : 1.11.2\n\nCompiler    : GCC 11.4.0\nOS          : Linux\nRelease     : 5.15.0-1041-azure\nMachine     : x86_64\nProcessor   : x86_64\nCPU cores   : 2\nArchitecture: 64bit\n
    "},{"location":"examples/batch-to-online/","title":"From batch to online/stream","text":""},{"location":"examples/batch-to-online/#a-quick-overview-of-batch-learning","title":"A quick overview of batch learning","text":"

    If you've already delved into machine learning, then you shouldn't have any difficulty in getting to use incremental learning. If you are somewhat new to machine learning, then do not worry! The point of this notebook in particular is to introduce simple notions. We'll also start to show how River fits in and explain how to use it.

    The whole point of machine learning is to learn from data. In supervised learning you want to learn how to predict a target \\(y\\) given a set of features \\(X\\). Meanwhile in an unsupervised learning there is no target, and the goal is rather to identify patterns and trends in the features \\(X\\). At this point most people tend to imagine \\(X\\) as a somewhat big table where each row is an observation and each column is a feature, and they would be quite right. Learning from tabular data is part of what's called batch learning, which basically that all of the data is available to our learning algorithm at once. Multiple libraries have been created to handle the batch learning regime, with one of the most prominent being Python's scikit-learn.

    As a simple example of batch learning let's say we want to learn to predict if a women has breast cancer or not. We'll use the breast cancer dataset available with scikit-learn. We'll learn to map a set of features to a binary decision using a logistic regression. Like many other models based on numerical weights, logistic regression is sensitive to the scale of the features. Rescaling the data so that each feature has mean 0 and variance 1 is generally considered good practice. We can apply the rescaling and fit the logistic regression sequentially in an elegant manner using a Pipeline. To measure the performance of the model we'll evaluate the average ROC AUC score using a 5 fold cross-validation.

    from sklearn import datasets\nfrom sklearn import linear_model\nfrom sklearn import metrics\nfrom sklearn import model_selection\nfrom sklearn import pipeline\nfrom sklearn import preprocessing\n\n\n# Load the data\ndataset = datasets.load_breast_cancer()\nX, y = dataset.data, dataset.target\n\n# Define the steps of the model\nmodel = pipeline.Pipeline([\n    ('scale', preprocessing.StandardScaler()),\n    ('lin_reg', linear_model.LogisticRegression(solver='lbfgs'))\n])\n\n# Define a determistic cross-validation procedure\ncv = model_selection.KFold(n_splits=5, shuffle=True, random_state=42)\n\n# Compute the MSE values\nscorer = metrics.make_scorer(metrics.roc_auc_score)\nscores = model_selection.cross_val_score(model, X, y, scoring=scorer, cv=cv)\n\n# Display the average score and it's standard deviation\nprint(f'ROC AUC: {scores.mean():.3f} (\u00b1 {scores.std():.3f})')\n
    ROC AUC: 0.975 (\u00b1 0.011)\n

    This might be a lot to take in if you're not accustomed to scikit-learn, but it probably isn't if you are. Batch learning basically boils down to:

    1. Loading (and preprocessing) the data
    2. Fitting a model to the data
    3. Computing the performance of the model on unseen data

    This is pretty standard and is maybe how most people imagine a machine learning pipeline. However, this way of proceeding has certain downsides. First of all your laptop would crash if the load_boston function returned a dataset who's size exceeds your available amount of RAM. Sometimes you can use some tricks to get around this. For example by optimizing the data types and by using sparse representations when applicable you can potentially save precious gigabytes of RAM. However, like many tricks this only goes so far. If your dataset weighs hundreds of gigabytes then you won't go far without some special hardware. One solution is to do out-of-core learning; that is, algorithms that can learn by being presented the data in chunks or mini-batches. If you want to go down this road then take a look at Dask and Spark's MLlib.

    Another issue with the batch learning regime is that it can't elegantly learn from new data. Indeed if new data is made available, then the model has to learn from scratch with a new dataset composed of the old data and the new data. This is particularly annoying in a real situation where you might have new incoming data every week, day, hour, minute, or even second. For example if you're building a recommendation engine for an e-commerce app, then you're probably training your model from 0 every week or so. As your app grows in popularity, so does the dataset you're training on. This will lead to longer and longer training times and might require a hardware upgrade.

    A final downside that isn't very easy to grasp concerns the manner in which features are extracted. Every time you want to train your model you first have to extract features. The trick is that some features might not be accessible at the particular point in time you are at. For example maybe that some attributes in your data warehouse get overwritten with time. In other words maybe that all the features pertaining to a particular observations are not available, whereas they were a week ago. This happens more often than not in real scenarios, and apart if you have a sophisticated data engineering pipeline then you will encounter these issues at some point.

    "},{"location":"examples/batch-to-online/#a-hands-on-introduction-to-incremental-learning","title":"A hands-on introduction to incremental learning","text":"

    Incremental learning is also often called online learning or stream learning, but if you google online learning a lot of the results will point to educational websites. Hence, the terms \"incremental learning\" and \"stream learning\" (from which River derives it's name) are prefered. The point of incremental learning is to fit a model to a stream of data. In other words, the data isn't available in it's entirety, but rather the observations are provided one by one. As an example let's stream through the dataset used previously.

    for xi, yi in zip(X, y):\n    # This is where the model learns\n    pass\n

    In this case we're iterating over a dataset that is already in memory, but we could just as well stream from a CSV file, a Kafka stream, an SQL query, etc. If we look at xi we can notice that it is a numpy.ndarray.

    xi\n
    array([7.760e+00, 2.454e+01, 4.792e+01, 1.810e+02, 5.263e-02, 4.362e-02,\n       0.000e+00, 0.000e+00, 1.587e-01, 5.884e-02, 3.857e-01, 1.428e+00,\n       2.548e+00, 1.915e+01, 7.189e-03, 4.660e-03, 0.000e+00, 0.000e+00,\n       2.676e-02, 2.783e-03, 9.456e+00, 3.037e+01, 5.916e+01, 2.686e+02,\n       8.996e-02, 6.444e-02, 0.000e+00, 0.000e+00, 2.871e-01, 7.039e-02])\n

    River by design works with dicts. We believe that dicts are more enjoyable to program with than numpy.ndarrays, at least for when single observations are concerned. dict's bring the added benefit that each feature can be accessed by name rather than by position.

    for xi, yi in zip(X, y):\n    xi = dict(zip(dataset.feature_names, xi))\n    pass\n\nxi\n
    {'mean radius': 7.76,\n 'mean texture': 24.54,\n 'mean perimeter': 47.92,\n 'mean area': 181.0,\n 'mean smoothness': 0.05263,\n 'mean compactness': 0.04362,\n 'mean concavity': 0.0,\n 'mean concave points': 0.0,\n 'mean symmetry': 0.1587,\n 'mean fractal dimension': 0.05884,\n 'radius error': 0.3857,\n 'texture error': 1.428,\n 'perimeter error': 2.548,\n 'area error': 19.15,\n 'smoothness error': 0.007189,\n 'compactness error': 0.00466,\n 'concavity error': 0.0,\n 'concave points error': 0.0,\n 'symmetry error': 0.02676,\n 'fractal dimension error': 0.002783,\n 'worst radius': 9.456,\n 'worst texture': 30.37,\n 'worst perimeter': 59.16,\n 'worst area': 268.6,\n 'worst smoothness': 0.08996,\n 'worst compactness': 0.06444,\n 'worst concavity': 0.0,\n 'worst concave points': 0.0,\n 'worst symmetry': 0.2871,\n 'worst fractal dimension': 0.07039}\n

    Conveniently, River's stream module has an iter_sklearn_dataset method that we can use instead.

    from river import stream\n\nfor xi, yi in stream.iter_sklearn_dataset(datasets.load_breast_cancer()):\n    pass\n

    The simple fact that we are getting the data as a stream means that we can't do a lot of things the same way as in a batch setting. For example let's say we want to scale the data so that it has mean 0 and variance 1, as we did earlier. To do so we simply have to subtract the mean of each feature to each value and then divide the result by the standard deviation of the feature. The problem is that we can't possible know the values of the mean and the standard deviation before actually going through all the data! One way to proceed would be to do a first pass over the data to compute the necessary values and then scale the values during a second pass. The problem is that this defeats our purpose, which is to learn by only looking at the data once. Although this might seem rather restrictive, it reaps sizable benefits down the road.

    The way we do feature scaling in River involves computing running statistics (also know as moving statistics). The idea is that we use a data structure that estimates the mean and updates itself when it is provided with a value. The same goes for the variance (and thus the standard deviation). For example, if we denote \\(\\mu_t\\) the mean and \\(n_t\\) the count at any moment \\(t\\), then updating the mean can be done as so:

    \\[ \\begin{cases} n_{t+1} = n_t + 1 \\\\ \\mu_{t+1} = \\mu_t + \\frac{x - \\mu_t}{n_{t+1}} \\end{cases} \\]

    Likewise, the running variance can be computed as so:

    \\[ \\begin{cases} n_{t+1} = n_t + 1 \\\\ \\mu_{t+1} = \\mu_t + \\frac{x - \\mu_t}{n_{t+1}} \\\\ s_{t+1} = s_t + (x - \\mu_t) \\times (x - \\mu_{t+1}) \\\\ \\sigma_{t+1} = \\frac{s_{t+1}}{n_{t+1}} \\end{cases} \\]

    where \\(s_t\\) is a running sum of squares and \\(\\sigma_t\\) is the running variance at time \\(t\\). This might seem a tad more involved than the batch algorithms you learn in school, but it is rather elegant. Implementing this in Python is not too difficult. For example let's compute the running mean and variance of the 'mean area' variable.

    n, mean, sum_of_squares, variance = 0, 0, 0, 0\n\nfor xi, yi in stream.iter_sklearn_dataset(datasets.load_breast_cancer()):\n    n += 1\n    old_mean = mean\n    mean += (xi['mean area'] - mean) / n\n    sum_of_squares += (xi['mean area'] - old_mean) * (xi['mean area'] - mean)\n    variance = sum_of_squares / n\n\nprint(f'Running mean: {mean:.3f}')\nprint(f'Running variance: {variance:.3f}')\n
    Running mean: 654.889\nRunning variance: 123625.903\n

    Let's compare this with numpy. But remember, numpy requires access to \"all\" the data.

    import numpy as np\n\ni = list(dataset.feature_names).index('mean area')\nprint(f'True mean: {np.mean(X[:, i]):.3f}')\nprint(f'True variance: {np.var(X[:, i]):.3f}')\n
    True mean: 654.889\nTrue variance: 123625.903\n

    The results seem to be exactly the same! The twist is that the running statistics won't be very accurate for the first few observations. In general though this doesn't matter too much. Some would even go as far as to say that this descrepancy is beneficial and acts as some sort of regularization...

    Now the idea is that we can compute the running statistics of each feature and scale them as they come along. The way to do this with River is to use the StandardScaler class from the preprocessing module, as so:

    from river import preprocessing\n\nscaler = preprocessing.StandardScaler()\n\nfor xi, yi in stream.iter_sklearn_dataset(datasets.load_breast_cancer()):\n    scaler = scaler.learn_one(xi)\n

    Now that we are scaling the data, we can start doing some actual machine learning. We're going to implement an online linear regression task. Because all the data isn't available at once, we are obliged to do what is called stochastic gradient descent, which is a popular research topic and has a lot of variants. SGD is commonly used to train neural networks. The idea is that at each step we compute the loss between the target prediction and the truth. We then calculate the gradient, which is simply a set of derivatives with respect to each weight from the linear regression. Once we have obtained the gradient, we can update the weights by moving them in the opposite direction of the gradient. The amount by which the weights are moved typically depends on a learning rate, which is typically set by the user. Different optimizers have different ways of managing the weight update, and some handle the learning rate implicitly. Online linear regression can be done in River with the LinearRegression class from the linear_model module. We'll be using plain and simple SGD using the SGD optimizer from the optim module. During training we'll measure the squared error between the truth and the predictions.

    from river import linear_model\nfrom river import optim\n\nscaler = preprocessing.StandardScaler()\noptimizer = optim.SGD(lr=0.01)\nlog_reg = linear_model.LogisticRegression(optimizer)\n\ny_true = []\ny_pred = []\n\nfor xi, yi in stream.iter_sklearn_dataset(datasets.load_breast_cancer(), shuffle=True, seed=42):\n\n    # Scale the features\n    xi_scaled = scaler.learn_one(xi).transform_one(xi)\n\n    # Test the current model on the new \"unobserved\" sample\n    yi_pred = log_reg.predict_proba_one(xi_scaled)\n    # Train the model with the new sample\n    log_reg.learn_one(xi_scaled, yi)\n\n    # Store the truth and the prediction\n    y_true.append(yi)\n    y_pred.append(yi_pred[True])\n\nprint(f'ROC AUC: {metrics.roc_auc_score(y_true, y_pred):.3f}')\n
    ROC AUC: 0.990\n

    The ROC AUC is significantly better than the one obtained from the cross-validation of scikit-learn's logisitic regression. However to make things really comparable it would be nice to compare with the same cross-validation procedure. River has a compat module that contains utilities for making River compatible with other Python libraries. Because we're doing regression we'll be using the SKLRegressorWrapper. We'll also be using Pipeline to encapsulate the logic of the StandardScaler and the LogisticRegression in one single object.

    from river import compat\nfrom river import compose\n\n# We define a Pipeline, exactly like we did earlier for sklearn \nmodel = compose.Pipeline(\n    ('scale', preprocessing.StandardScaler()),\n    ('log_reg', linear_model.LogisticRegression())\n)\n\n# We make the Pipeline compatible with sklearn\nmodel = compat.convert_river_to_sklearn(model)\n\n# We compute the CV scores using the same CV scheme and the same scoring\nscores = model_selection.cross_val_score(model, X, y, scoring=scorer, cv=cv)\n\n# Display the average score and it's standard deviation\nprint(f'ROC AUC: {scores.mean():.3f} (\u00b1 {scores.std():.3f})')\n
    ROC AUC: 0.964 (\u00b1 0.016)\n

    This time the ROC AUC score is lower, which is what we would expect. Indeed online learning isn't as accurate as batch learning. However it all depends in what you're interested in. If you're only interested in predicting the next observation then the online learning regime would be better. That's why it's a bit hard to compare both approaches: they're both suited to different scenarios.

    "},{"location":"examples/batch-to-online/#going-further","title":"Going further","text":"

    Here a few resources if you want to do some reading:

    • Online learning -- Wikipedia
    • What is online machine learning? -- Max Pagels
    • Introduction to Online Learning -- USC course
    • Online Methods in Machine Learning -- MIT course
    • Online Learning: A Comprehensive Survey
    • Streaming 101: The world beyond batch
    • Machine learning for data streams
    • Data Stream Mining: A Practical Approach
    "},{"location":"examples/bike-sharing-forecasting/","title":"Bike-sharing forecasting","text":"

    In this tutorial we're going to forecast the number of bikes in 5 bike stations from the city of Toulouse. We'll do so by building a simple model step by step. The dataset contains 182,470 observations. Let's first take a peak at the data.

    from pprint import pprint\nfrom river import datasets\n\ndataset = datasets.Bikes()\n\nfor x, y in dataset:\n    pprint(x)\n    print(f'Number of available bikes: {y}')\n    break\n
    {'clouds': 75,\n 'description': 'light rain',\n 'humidity': 81,\n 'moment': datetime.datetime(2016, 4, 1, 0, 0, 7),\n 'pressure': 1017.0,\n 'station': 'metro-canal-du-midi',\n 'temperature': 6.54,\n 'wind': 9.3}\nNumber of available bikes: 1\n

    Let's start by using a simple linear regression on the numeric features. We can select the numeric features and discard the rest of the features using a Select. Linear regression is very likely to go haywire if we don't scale the data, so we'll use a StandardScaler to do just that. We'll evaluate the model by measuring the mean absolute error. Finally we'll print the score every 20,000 observations.

    from river import compose\nfrom river import linear_model\nfrom river import metrics\nfrom river import evaluate\nfrom river import preprocessing\nfrom river import optim\n\nmodel = compose.Select('clouds', 'humidity', 'pressure', 'temperature', 'wind')\nmodel |= preprocessing.StandardScaler()\nmodel |= linear_model.LinearRegression(optimizer=optim.SGD(0.001))\n\nmetric = metrics.MAE()\n\nevaluate.progressive_val_score(dataset, model, metric, print_every=20_000)\n
    [20,000] MAE: 4.912763\n[40,000] MAE: 5.333578\n[60,000] MAE: 5.330969\n[80,000] MAE: 5.392334\n[100,000] MAE: 5.423078\n[120,000] MAE: 5.541239\n[140,000] MAE: 5.613038\n[160,000] MAE: 5.622441\n[180,000] MAE: 5.567836\n[182,470] MAE: 5.563905\n\n\n\n\n\nMAE: 5.563905\n

    The model doesn't seem to be doing that well, but then again we didn't provide a lot of features. Generally, a good idea for this kind of problem is to look at an average of the previous values. For example, for each station we can look at the average number of bikes per hour. To do so we first have to extract the hour from the moment field. We can then use a TargetAgg to aggregate the values of the target.

    from river import feature_extraction\nfrom river import stats\n\ndef get_hour(x):\n    x['hour'] = x['moment'].hour\n    return x\n\nmodel = compose.Select('clouds', 'humidity', 'pressure', 'temperature', 'wind')\nmodel += (\n    get_hour |\n    feature_extraction.TargetAgg(by=['station', 'hour'], how=stats.Mean())\n)\nmodel |= preprocessing.StandardScaler()\nmodel |= linear_model.LinearRegression(optimizer=optim.SGD(0.001))\n\nmetric = metrics.MAE()\n\nevaluate.progressive_val_score(dataset, model, metric, print_every=20_000)\n
    [20,000] MAE: 3.720766\n[40,000] MAE: 3.829739\n[60,000] MAE: 3.844905\n[80,000] MAE: 3.910137\n[100,000] MAE: 3.888553\n[120,000] MAE: 3.923644\n[140,000] MAE: 3.980882\n[160,000] MAE: 3.949972\n[180,000] MAE: 3.934489\n[182,470] MAE: 3.933442\n\n\n\n\n\nMAE: 3.933442\n

    By adding a single feature, we've managed to significantly reduce the mean absolute error. At this point you might think that the model is getting slightly complex, and is difficult to understand and test. Pipelines have the advantage of being terse, but they aren't always to debug. Thankfully River has some ways to relieve the pain.

    The first thing we can do it to visualize the pipeline, to get an idea of how the data flows through it.

    model\n
    ['clouds', [...]
    Select ( clouds humidity pressure temperature wind )
    get_hour
    def get_hour(x): x['hour'] = x['moment'].hour return x
    y_mean_by_station_and_hour
    TargetAgg ( by=['station', 'hour'] how=Mean () target_name=\"y\" )
    StandardScaler
    StandardScaler ( with_std=True )
    LinearRegression
    LinearRegression ( optimizer=SGD ( lr=Constant ( learning_rate=0.001 ) ) loss=Squared () l2=0. l1=0. intercept_init=0. intercept_lr=Constant ( learning_rate=0.01 ) clip_gradient=1e+12 initializer=Zeros () )

    We can also use the debug_one method to see what happens to one particular instance. Let's train the model on the first 10,000 observations and then call debug_one on the next one. To do this, we will turn the Bike object into a Python generator with iter() function. The Pythonic way to read the first 10,000 elements of a generator is to use itertools.islice.

    import itertools\n\nmodel = compose.Select('clouds', 'humidity', 'pressure', 'temperature', 'wind')\nmodel += (\n    get_hour |\n    feature_extraction.TargetAgg(by=['station', 'hour'], how=stats.Mean())\n)\nmodel |= preprocessing.StandardScaler()\nmodel |= linear_model.LinearRegression()\n\nfor x, y in itertools.islice(dataset, 10000):\n    y_pred = model.predict_one(x)\n    model.learn_one(x, y)\n\nx, y = next(iter(dataset))\nprint(model.debug_one(x))\n
    0. Input\n--------\nclouds: 75 (int)\ndescription: light rain (str)\nhumidity: 81 (int)\nmoment: 2016-04-01 00:00:07 (datetime)\npressure: 1,017.00000 (float)\nstation: metro-canal-du-midi (str)\ntemperature: 6.54000 (float)\nwind: 9.30000 (float)\n\n1. Transformer union\n--------------------\n    1.0 Select\n    ----------\n    clouds: 75 (int)\n    humidity: 81 (int)\n    pressure: 1,017.00000 (float)\n    temperature: 6.54000 (float)\n    wind: 9.30000 (float)\n\n    1.1 get_hour | y_mean_by_station_and_hour\n    -----------------------------------------\n    y_mean_by_station_and_hour: 4.43243 (float)\n\nclouds: 75 (int)\nhumidity: 81 (int)\npressure: 1,017.00000 (float)\ntemperature: 6.54000 (float)\nwind: 9.30000 (float)\ny_mean_by_station_and_hour: 4.43243 (float)\n\n2. StandardScaler\n-----------------\nclouds: 0.47566 (float)\nhumidity: 0.42247 (float)\npressure: 1.05314 (float)\ntemperature: -1.22098 (float)\nwind: 2.21104 (float)\ny_mean_by_station_and_hour: -0.59098 (float)\n\n3. LinearRegression\n-------------------\nName      Value     Weight    Contribution  \nIntercept  1.00000   6.58252       6.58252  \npressure   1.05314   3.78529       3.98646  \nhumidity   0.42247   1.44921       0.61225  \ny_mean_by_station_and_hour -0.59098   0.54167      -0.32011  \n clouds    0.47566  -1.92255      -0.91448  \n   wind    2.21104  -0.77720      -1.71843  \ntemperature -1.22098   2.47030      -3.01619\n\nPrediction: 5.21201\n

    The debug_one method shows what happens to an input set of features, step by step.

    And now comes the catch. Up until now we've been using the progressive_val_score method from the evaluate module. What this does it that it sequentially predicts the output of an observation and updates the model immediately afterwards. This way of proceeding is often used for evaluating online learning models. But in some cases it is the wrong approach.

    When evaluating a machine learning model, the goal is to simulate production conditions in order to get a trust-worthy assessment of the performance of the model. In our case, we typically want to forecast the number of bikes available in a station, say, 30 minutes ahead. Then, once the 30 minutes have passed, the true number of available bikes will be available and we will be able to update the model using the features available 30 minutes ago.

    What we really want is to evaluate the model by forecasting 30 minutes ahead and only updating the model once the true values are available. This can be done using the moment and delay parameters in the progressive_val_score method. The idea is that each observation in the stream of the data is shown twice to the model: once for making a prediction, and once for updating the model when the true value is revealed. The moment parameter determines which variable should be used as a timestamp, while the delay parameter controls the duration to wait before revealing the true values to the model.

    import datetime as dt\n\nevaluate.progressive_val_score(\n    dataset=dataset,\n    model=model.clone(),\n    metric=metrics.MAE(),\n    moment='moment',\n    delay=dt.timedelta(minutes=30),\n    print_every=20_000\n)\n
    [20,000] MAE: 20.198137\n[40,000] MAE: 12.199763\n[60,000] MAE: 9.468279\n[80,000] MAE: 8.126625\n[100,000] MAE: 7.273133\n[120,000] MAE: 6.735469\n[140,000] MAE: 6.376704\n[160,000] MAE: 6.06156\n[180,000] MAE: 5.806744\n[182,470] MAE: 5.780772\n\n\n\n\n\nMAE: 5.780772\n

    The performance is a bit worse, which is to be expected. Indeed, the task is more difficult: the model is only shown the ground truth 30 minutes after making a prediction.

    The takeaway of this notebook is that the progressive_val_score method can be used to simulate a production scenario, and is thus extremely valuable.

    "},{"location":"examples/building-a-simple-nowcasting-model/","title":"Building a simple nowcasting model","text":"

    Nowcasting is a special case of forecasting. It simply consists in predicting the next value in a time series.

    We'll be using the international airline passenger data available from here. This particular dataset is included with River in the datasets module.

    from river import datasets\n\nfor x, y in datasets.AirlinePassengers():\n    print(x, y)\n    break\n
    {'month': datetime.datetime(1949, 1, 1, 0, 0)} 112\n

    The data is as simple as can be: it consists of a sequence of months and values representing the total number of international airline passengers per month. Our goal is going to be to predict the number of passengers for the next month at each step. Notice that because the dataset is small -- which is usually the case for time series -- we could just fit a model from scratch each month. However for the sake of example we're going to train a single model online. Although the overall performance might be potentially weaker, training a time series model online has the benefit of being scalable if, say, you have have thousands of time series to manage.

    We'll start with a very simple model where the only feature will be the ordinal date of each month. This should be able to capture some of the underlying trend.

    from river import compose\nfrom river import linear_model\nfrom river import preprocessing\n\n\ndef get_ordinal_date(x):\n    return {'ordinal_date': x['month'].toordinal()}\n\n\nmodel = compose.Pipeline(\n    ('ordinal_date', compose.FuncTransformer(get_ordinal_date)),\n    ('scale', preprocessing.StandardScaler()),\n    ('lin_reg', linear_model.LinearRegression())\n)\n

    We'll write down a function to evaluate the model. This will go through each observation in the dataset and update the model as it goes on. The prior predictions will be stored along with the true values and will be plotted together.

    from river import metrics\nfrom river import utils\nimport matplotlib.pyplot as plt\n\n\ndef evaluate_model(model): \n\n    metric = utils.Rolling(metrics.MAE(), 12)\n\n    dates = []\n    y_trues = []\n    y_preds = []\n\n    for x, y in datasets.AirlinePassengers():\n\n        # Obtain the prior prediction and update the model in one go\n        y_pred = model.predict_one(x)\n        model.learn_one(x, y)\n\n        # Update the error metric\n        metric.update(y, y_pred)\n\n        # Store the true value and the prediction\n        dates.append(x['month'])\n        y_trues.append(y)\n        y_preds.append(y_pred)\n\n    # Plot the results\n    fig, ax = plt.subplots(figsize=(10, 6))\n    ax.grid(alpha=0.75)\n    ax.plot(dates, y_trues, lw=3, color='#2ecc71', alpha=0.8, label='Ground truth')\n    ax.plot(dates, y_preds, lw=3, color='#e74c3c', alpha=0.8, label='Prediction')\n    ax.legend()\n    ax.set_title(metric)\n

    Let's evaluate our first model.

    evaluate_model(model)\n

    The model has captured a trend but not the right one. Indeed it thinks the trend is linear whereas we can visually see that the growth of the data increases with time. In other words the second derivative of the series is positive. This is a well know problem in time series forecasting and there are thus many ways to handle it; for example by using a Box-Cox transform. However we are going to do something a bit different, and instead linearly detrend the series using a TargetStandardScaler.

    from river import stats\n\n\nmodel = compose.Pipeline(\n    ('ordinal_date', compose.FuncTransformer(get_ordinal_date)),\n    ('scale', preprocessing.StandardScaler()),\n    ('lin_reg', linear_model.LinearRegression(intercept_lr=0)),\n)\n\nmodel = preprocessing.TargetStandardScaler(regressor=model)\n\nevaluate_model(model)\n

    Now let's try and capture the monthly trend by one-hot encoding the month name.

    import calendar\n\n\ndef get_month(x):\n    return {\n        calendar.month_name[month]: month == x['month'].month\n        for month in range(1, 13)\n    }\n\n\nmodel = compose.Pipeline(\n    ('features', compose.TransformerUnion(\n        ('ordinal_date', compose.FuncTransformer(get_ordinal_date)),\n        ('month', compose.FuncTransformer(get_month)),\n    )),\n    ('scale', preprocessing.StandardScaler()),\n    ('lin_reg', linear_model.LinearRegression(intercept_lr=0))\n)\n\nmodel = preprocessing.TargetStandardScaler(regressor=model)\n\nevaluate_model(model)\n

    This seems pretty decent. We can take a look at the weights of the linear regression to get an idea of the importance of each feature.

    model.regressor['lin_reg'].weights\n
    {'January': -0.13808091575141299,\n 'February': -0.18716063793638954,\n 'March': -0.026469206216021102,\n 'April': -0.03500685108350436,\n 'May': -0.013638742192777328,\n 'June': 0.16194267303548826,\n 'July': 0.31995865445067634,\n 'August': 0.2810396556938982,\n 'September': 0.03834350518076595,\n 'October': -0.11655850082390988,\n 'November': -0.2663497734491209,\n 'December': -0.15396048501165746,\n 'ordinal_date': 1.0234863735122575}\n

    As could be expected the months of July and August have the highest weights because these are the months where people typically go on holiday abroad. The month of December has a low weight because this is a month of festivities in most of the Western world where people usually stay at home.

    Our model seems to understand which months are important, but it fails to see that the importance of each month grows multiplicatively as the years go on. In other words our model is too shy. We can fix this by increasing the learning rate of the LinearRegression's optimizer.

    from river import optim\n\nmodel = compose.Pipeline(\n    ('features', compose.TransformerUnion(\n        ('ordinal_date', compose.FuncTransformer(get_ordinal_date)),\n        ('month', compose.FuncTransformer(get_month)),\n    )),\n    ('scale', preprocessing.StandardScaler()),\n    ('lin_reg', linear_model.LinearRegression(\n        intercept_lr=0,\n        optimizer=optim.SGD(0.03)\n    ))\n)\n\nmodel = preprocessing.TargetStandardScaler(regressor=model)\n\nevaluate_model(model)\n

    This is starting to look good! Naturally in production we would tune the learning rate, ideally in real-time.

    Before finishing, we're going to introduce a cool feature extraction trick based on radial basis function kernels. The one-hot encoding we did on the month is a good idea but if you think about it is a bit rigid. Indeed the value of each feature is going to be 0 or 1, depending on the month of each observation. We're basically saying that the month of September is as distant to the month of August as it is to the month of March. Of course this isn't true, and it would be nice if our features would reflect this. To do so we can simply calculate the distance between the month of each observation and all the months in the calendar. Instead of simply computing the distance linearly, we're going to use a so-called Gaussian radial basic function kernel. This is a bit of a mouthful but for us it boils down to a simple formula, which is:

    \\[d(i, j) = exp(-\\frac{(i - j)^2}{2\\sigma^2})\\]

    Intuitively this computes a similarity between two months -- denoted by \\(i\\) and \\(j\\) -- which decreases the further apart they are from each other. The \\(sigma\\) parameter can be seen as a hyperparameter than can be tuned -- in the following snippet we'll simply ignore it. The thing to take away is that this results in smoother predictions than when using a one-hot encoding scheme, which is often a desirable property. You can also see trick in action in this nice presentation.

    import math\n\ndef get_month_distances(x):\n    return {\n        calendar.month_name[month]: math.exp(-(x['month'].month - month) ** 2)\n        for month in range(1, 13)\n    }\n\n\nmodel = compose.Pipeline(\n    ('features', compose.TransformerUnion(\n        ('ordinal_date', compose.FuncTransformer(get_ordinal_date)),\n        ('month_distances', compose.FuncTransformer(get_month_distances)),\n    )),\n    ('scale', preprocessing.StandardScaler()),\n    ('lin_reg', linear_model.LinearRegression(\n        intercept_lr=0,\n        optimizer=optim.SGD(0.03)\n    ))\n)\n\nmodel = preprocessing.TargetStandardScaler(regressor=model)\n\nevaluate_model(model)\n

    We've managed to get a good looking prediction curve with a reasonably simple model. What's more our model has the advantage of being interpretable and easy to debug. There surely are more rocks to squeeze (e.g. tune the hyperparameters, use an ensemble model, etc.) but we'll leave that as an exercice to the reader.

    As a finishing touch we'll rewrite our pipeline using the | operator, which is called a \"pipe\".

    extract_features = compose.TransformerUnion(get_ordinal_date, get_month_distances)\n\nscale = preprocessing.StandardScaler()\n\nlearn = linear_model.LinearRegression(\n    intercept_lr=0,\n    optimizer=optim.SGD(0.03)\n)\n\nmodel = extract_features | scale | learn\nmodel = preprocessing.TargetStandardScaler(regressor=model)\n\nevaluate_model(model)\n

    model\n
    TargetStandardScaler
    TargetStandardScaler ( regressor=Pipeline ( steps=OrderedDict([('TransformerUnion', TransformerUnion ( FuncTransformer ( func=\"get_ordinal_date\" ), FuncTransformer ( func=\"get_month_distances\" ) )), ('StandardScaler', StandardScaler ( with_std=True )), ('LinearRegression', LinearRegression ( optimizer=SGD ( lr=Constant ( learning_rate=0.03 ) ) loss=Squared () l2=0. l1=0. intercept_init=0. intercept_lr=Constant ( learning_rate=0 ) clip_gradient=1e+12 initializer=Zeros () ))]) ) )
    get_ordinal_date
    def get_ordinal_date(x): return {'ordinal_date': x['month'].toordinal()}
    get_month_distances
    def get_month_distances(x): return { calendar.month_name[month]: math.exp(-(x['month'].month - month) ** 2) for month in range(1, 13) }
    StandardScaler
    StandardScaler ( with_std=True )
    LinearRegression
    LinearRegression ( optimizer=SGD ( lr=Constant ( learning_rate=0.03 ) ) loss=Squared () l2=0. l1=0. intercept_init=0. intercept_lr=Constant ( learning_rate=0 ) clip_gradient=1e+12 initializer=Zeros () ) "},{"location":"examples/content-personalization/","title":"Content personalization","text":""},{"location":"examples/content-personalization/#without-context","title":"Without context","text":"

    This example takes inspiration from Vowpal Wabbit's excellent tutorial.

    Content personalization is about taking into account user preferences. It's a special case of recommender systems. Ideally, side-information should be taken into account in addition to the user. But we'll start with something simpler. We'll assume that each user has stable preferences that are independent of the context. We capture this by implementing a \"reward\" function.

    def get_reward(user, item, context):\n\n    time_of_day = context['time_of_day']\n\n    USER_LIKED_ARTICLE = 1\n    USER_DISLIKED_ARTICLE = 0\n\n    if user == 'Tom':\n        if time_of_day == 'morning' and item == 'politics':\n            return USER_LIKED_ARTICLE\n        elif time_of_day == 'afternoon' and item == 'music':\n            return USER_LIKED_ARTICLE\n        else:\n            return USER_DISLIKED_ARTICLE\n    elif user == 'Anna':\n        if time_of_day == 'morning' and item == 'sports':\n            return USER_LIKED_ARTICLE\n        elif time_of_day == 'afternoon' and item == 'politics':\n            return USER_LIKED_ARTICLE\n        else:\n            return USER_DISLIKED_ARTICLE\n\nget_reward('Tom', 'politics', {'time_of_day': 'morning'})\n
    1\n

    Measuring the performance of a recommendation is not straightforward, mostly because of the interactive aspect of recommender systems. In a real situation, recommendations are presented to a user, and the user gives feedback indicating whether they like what they have been recommended or not. This feedback loop can't be captured entirely by a historical dataset. Some kind of simulator is required to generate recommendations and capture feedback. We already have a reward function. Now let's implement a simulation function.

    import random\nimport matplotlib.pyplot as plt\n\ndef plot_ctr(ctr):\n    plt.plot(range(1, len(ctr) + 1), ctr)\n    plt.xlabel('n_iterations', fontsize=14)\n    plt.ylabel('CTR', fontsize=14)\n    plt.ylim([0, 1])\n    plt.title(f'final CTR: {ctr[-1]:.2%}', fontsize=14)\n    plt.grid()\n\nusers = ['Tom', 'Anna']\ntimes_of_day = ['morning', 'afternoon']\nitems = {'politics', 'sports', 'music', 'food', 'finance', 'health', 'camping'}\n\ndef simulate(n, reward_func, model, seed):\n\n    rng = random.Random(seed)\n    n_clicks = 0\n    ctr = []  # click-through rate along time\n\n    for i in range(n):\n\n        # Generate a context at random\n        user = rng.choice(users)\n        context = {\n            'time_of_day': rng.choice(times_of_day)\n        }\n\n        # Make a single recommendation\n        item = model.rank(user, items=items, x=context)[0]\n\n        # Measure the reward\n        clicked = reward_func(user, item, context)\n        n_clicks += clicked\n        ctr.append(n_clicks / (i + 1))\n\n        # Update the model\n        model.learn_one(user, item, y=clicked, x=context)\n\n    plot_ctr(ctr)\n

    This simulation function does quite a few things. It can be seen as a simple reinforcement learning simulation. It samples a user, and then ask the model to provide a single recommendation. The user then gives as to whether they liked the recommendation or not. Crucially, the user doesn't tell us what item they would have liked. We could model this as a multi-class classification problem if that were the case.

    The strategy parameter determines the mechanism used to generate the recommendations. The 'best' strategy means that the items are each scored by the model, and are then ranked from the most preferred to the least preferred. Here the most preferred item is the one which gets recommended. But you could imagine all sorts of alternative ways to proceed.

    We can first evaluate a recommended which acts completely at random. It assigns a random preference to each item, regardless of the user.

    from river import reco\n\nmodel = reco.RandomNormal(seed=10)\nsimulate(5_000, get_reward, model, seed=42)\n

    We can see that the click-through rate (CTR) oscillates around 28.74%. In fact, this model is expected to be correct 100 * (2 / 7)% = 28.57% of the time. Indeed, each user likes two items, and there are seven items in total.

    Let's now use the Baseline recommended. This one models each preference as the following sum:

    \\[preference = \\bar{y} + b_{u} + b_{i}\\]

    where

    • \\(\\bar{y}\\) is the average CTR overall
    • \\(b_{u}\\) is the average CTR per user minus \\(\\bar{y}\\) -- it's therefore called a bias
    • \\(b_{i}\\) is the average CTR per item minus \\(\\bar{y}\\)

    This model is considered to be a baseline because it doesn't actually learn what items are preferred by each user. Instead it models each user and item separately. We shouldn't expect it to be a strong model. It should however do better than the random model used above.

    model = reco.Baseline(seed=10)\nsimulate(5_000, get_reward, model, seed=42)\n

    This baseline model seems perfect, which is surprising. The reason why it works so well is because both users have in common that they both like politics. The model therefore learns that the 'politics' is a good item to recommend.

    model.i_biases\n
    defaultdict(Zeros (),\n            {'politics': 0.06389451550325113,\n             'music': -0.04041254194187752,\n             'finance': -0.040319730234734,\n             'camping': -0.03581829597317823,\n             'food': -0.037778771188204816,\n             'health': -0.04029646665611086,\n             'sports': -0.03661678982763635})\n

    The model is not as performant if we use a reward function where both users have different preferences.

    simulate(\n    5_000,\n    reward_func=lambda user, item, context: (\n        item in {'music', 'politics'} if user == \"Tom\" else\n        item in {'food', 'sports'}\n    ),\n    model=model,\n    seed=42\n)\n

    A good recommender model should at the very least understand what kind of items each user prefers. One of the simplest and yet performant way to do this is Simon Funk's SGD method he developped for the Netflix challenge and wrote about here. It models each user and each item as latent vectors. The dot product of these two vectors is the expected preference of the user for the item.

    model = reco.FunkMF(seed=10)\nsimulate(5_000, get_reward, model, seed=42)\n

    We can see that this model learns what items each user enjoys very well. Of course, there are some caveats. In our simulation, we ask the model to recommend the item most likely to be preferred for each user. Indeed, we rank all the items and pick the item at the top of the list. We do this many times for only two users.

    This is of course not realistic. Users will get fed up with recommendations if they're always shown the same item. It's important to include diversity into recommendations, and to let the model explore other options instead of always focusing on the item with the highest score. This is where evaluating recommender systems gets tricky: the reward function itself is difficult to model.

    We will keep ignoring these caveats in this notebook. Instead we will focus on a different concern: making recommendations when context is involved.

    "},{"location":"examples/content-personalization/#with-context","title":"With context","text":"

    We'll add some context by making it so that user preferences change depending on the time the day. Very simply, preferences might change from morning to afternoon. This is captured by the following reward function.

    times_of_day = ['morning', 'afternoon']\n\ndef get_reward(user, item, context):\n    if user == 'Tom':\n        if context['time_of_day'] == 'morning':\n            return item == 'politics'\n        if context['time_of_day'] == 'afternoon':\n            return item == 'music'\n    if user == 'Anna':\n        if context['time_of_day'] == 'morning':\n            return item == 'sports'\n        if context['time_of_day'] == 'afternoon':\n            return item == 'politics'\n

    We have to update our simulation function to generate a random context at each step. We also want our model to use it for recommending items as well as learning.

    def simulate(n, reward_func, model, seed):\n\n    rng = random.Random(seed)\n    n_clicks = 0\n    ctr = []\n\n    for i in range(n):\n\n        user = rng.choice(users)\n\n        # New: pass a context\n        context = {'time_of_day': rng.choice(times_of_day)}\n        item = model.rank(user, items, context)[0]\n\n        clicked = reward_func(user, item, context)\n        n_clicks += clicked\n        ctr.append(n_clicks / (i + 1))\n\n        # New: pass a context\n        model.learn_one(user, item, clicked, context)\n\n    plot_ctr(ctr)\n

    Not all models are capable of taking into account context. For instance, the FunkMF model only models users and items. It completely ignores the context, even when we provide one. All recommender models inherit from the base Recommender class. They also have a property which indicates whether or not they are able to handle context:

    model = reco.FunkMF(seed=10)\nmodel.is_contextual\n
    False\n

    Let's see well it performs.

    simulate(5_000, get_reward, model, seed=42)\n

    The performance has roughly been divided by half. This is most likely because there are now two times of day, and if the model has learnt preferences for one time of the day, then it's expected to be wrong half of the time.

    Before delving into recsys models that can handle context, a simple hack is to notice that we can append the time of day to the user. This effectively results in new users which our model can distinguish between. We could apply this trick during the simulation, but we can also override the behavior of the learn_one and rank methods of our model.

    class FunkMFWithHack(reco.FunkMF):\n\n    def learn_one(self, user, item, reward, context):\n        user = f\"{user}@{context['time_of_day']}\"\n        return super().learn_one(user, item, reward, context)\n\n    def rank(self, user, items, context):\n        user = f\"{user}@{context['time_of_day']}\"\n        return super().rank(user, items, context)\n\nmodel = FunkMFWithHack(seed=29)\nsimulate(5_000, get_reward, model, seed=42)\n

    We can verify that the model has learnt the correct preferences by looking at the expected preference for each (user, item) pair.

    import pandas as pd\n\n(\n    pd.DataFrame(\n        {\n            'user': user,\n            'item': item,\n            'preference': model.predict_one(user, item)\n        }\n        for user in model.u_latents\n        for item in model.i_latents\n    )\n    .pivot(index='user', columns='item')\n    .style.highlight_max(color='lightgreen', axis='columns')\n)\n
    preference item camping finance food health music politics sports user Anna@afternoon -0.059041 -0.018105 0.069222 0.032865 0.168353 1.000000 0.195960 Anna@morning -0.136399 -0.117577 0.076300 0.081131 0.154483 0.221890 1.000000 Tom@afternoon -0.233071 0.057220 -0.074671 -0.027115 1.000000 0.163607 0.141781 Tom@morning -0.050107 -0.028562 0.061163 -0.005428 0.063483 1.000000 0.125515"},{"location":"examples/debugging-a-pipeline/","title":"Debugging a pipeline","text":"

    River encourages users to make use of pipelines. The biggest pain point of pipelines is that it can be hard to understand what's happening to the data, especially when the pipeline is complex. Fortunately the Pipeline class has a debug_one method that can help out.

    Let's look at a fairly complex pipeline for predicting the number of bikes in 5 bike stations from the city of Toulouse. It doesn't matter if you understand the pipeline or not; the point of this notebook is to learn how to introspect a pipeline.

    import datetime as dt\nfrom river import compose\nfrom river import datasets\nfrom river import feature_extraction\nfrom river import linear_model\nfrom river import metrics\nfrom river import preprocessing\nfrom river import stats\nfrom river import stream\n\n\nX_y = datasets.Bikes()\nX_y = stream.simulate_qa(X_y, moment='moment', delay=dt.timedelta(minutes=30))\n\ndef add_time_features(x):\n    return {\n        **x,\n        'hour': x['moment'].hour,\n        'day': x['moment'].weekday()\n    }\n\nmodel = add_time_features\nmodel |= (\n    compose.Select('clouds', 'humidity', 'pressure', 'temperature', 'wind') +\n    feature_extraction.TargetAgg(by=['station', 'hour'], how=stats.Mean()) +\n    feature_extraction.TargetAgg(by='station', how=stats.EWMean())\n)\nmodel |= preprocessing.StandardScaler()\nmodel |= linear_model.LinearRegression()\n\nmetric = metrics.MAE()\n\nquestions = {}\n\nfor i, x, y in X_y:\n    # Question\n    is_question = y is None\n    if is_question:\n        y_pred = model.predict_one(x)\n        questions[i] = y_pred\n\n    # Answer\n    else:\n        metric.update(y, questions[i])\n        model = model.learn_one(x, y)\n\n        if i >= 30000 and i % 30000 == 0:\n            print(i, metric)\n
    30000 MAE: 13.328051\n60000 MAE: 7.824087\n90000 MAE: 6.003909\n120000 MAE: 5.052855\n150000 MAE: 4.496826\n180000 MAE: 4.140702\n

    Let's start by looking at the pipeline. You can click each cell to display the current state for each step of the pipeline.

    model\n
    add_time_features
    def add_time_features(x): return { **x, 'hour': x['moment'].hour, 'day': x['moment'].weekday() }
    ['clouds', [...]
    Select ( clouds humidity pressure temperature wind )
    y_mean_by_station_and_hour
    TargetAgg ( by=['station', 'hour'] how=Mean () target_name=\"y\" )
    y_ewm_0.5_by_station
    TargetAgg ( by=['station'] how=EWMean ( fading_factor=0.5 ) target_name=\"y\" )
    StandardScaler
    StandardScaler ( with_std=True )
    LinearRegression
    LinearRegression ( optimizer=SGD ( lr=Constant ( learning_rate=0.01 ) ) loss=Squared () l2=0. l1=0. intercept_init=0. intercept_lr=Constant ( learning_rate=0.01 ) clip_gradient=1e+12 initializer=Zeros () )

    As mentioned above the Pipeline class has a debug_one method. You can use this at any point you want to visualize what happen to an input x. For example, let's see what happens to the last seen x.

    print(model.debug_one(x))\n
    0. Input\n--------\nclouds: 88 (int)\ndescription: overcast clouds (str)\nhumidity: 84 (int)\nmoment: 2016-10-05 09:57:18 (datetime)\npressure: 1,017.34000 (float)\nstation: pomme (str)\ntemperature: 17.45000 (float)\nwind: 1.95000 (float)\n\n1. add_time_features\n--------------------\nclouds: 88 (int)\nday: 2 (int)\ndescription: overcast clouds (str)\nhour: 9 (int)\nhumidity: 84 (int)\nmoment: 2016-10-05 09:57:18 (datetime)\npressure: 1,017.34000 (float)\nstation: pomme (str)\ntemperature: 17.45000 (float)\nwind: 1.95000 (float)\n\n2. Transformer union\n--------------------\n    2.0 Select\n    ----------\n    clouds: 88 (int)\n    humidity: 84 (int)\n    pressure: 1,017.34000 (float)\n    temperature: 17.45000 (float)\n    wind: 1.95000 (float)\n\n    2.1 TargetAgg\n    -------------\n    y_mean_by_station_and_hour: 7.89396 (float)\n\n    2.2 TargetAgg1\n    --------------\n    y_ewm_0.5_by_station: 11.80372 (float)\n\nclouds: 88 (int)\nhumidity: 84 (int)\npressure: 1,017.34000 (float)\ntemperature: 17.45000 (float)\nwind: 1.95000 (float)\ny_ewm_0.5_by_station: 11.80372 (float)\ny_mean_by_station_and_hour: 7.89396 (float)\n\n3. StandardScaler\n-----------------\nclouds: 1.54778 (float)\nhumidity: 1.16366 (float)\npressure: 0.04916 (float)\ntemperature: -0.51938 (float)\nwind: -0.69426 (float)\ny_ewm_0.5_by_station: 0.19640 (float)\ny_mean_by_station_and_hour: -0.27110 (float)\n\n4. LinearRegression\n-------------------\nName       Value      Weight     Contribution  \nIntercept   1.00000    9.19960        9.19960  \ny_ewm_0.5_by_station  0.19640    9.19349        1.80562  \nhumidity    1.16366    1.01680        1.18320  \ntemperature -0.51938   -0.41575        0.21593  \n    wind   -0.69426   -0.03810        0.02645  \npressure    0.04916    0.18321        0.00901  \ny_mean_by_station_and_hour -0.27110    0.19553       -0.05301  \n  clouds    1.54778   -0.32838       -0.50827\n\nPrediction: 11.87854\n

    The pipeline does quite a few things, but using debug_one shows what happens step by step. This is really useful for checking that the pipeline is behaving as you're expecting it too. Remember that you can debug_one whenever you wish, be it before, during, or after training a model.

    "},{"location":"examples/imbalanced-learning/","title":"Working with imbalanced data","text":"

    In machine learning it is quite usual to have to deal with imbalanced dataset. This is particularly true in online learning for tasks such as fraud detection and spam classification. In these two cases, which are binary classification problems, there are usually many more 0s than 1s, which generally hinders the performance of the classifiers we thrown at them.

    As an example we'll use the credit card dataset available in River. We'll first use a collections.Counter to count the number of 0s and 1s in order to get an idea of the class balance.

    import collections\nfrom river import datasets\n\nX_y = datasets.CreditCard()\n\ncounts = collections.Counter(y for _, y in X_y)\n\nfor c, count in counts.items():\n    print(f'{c}: {count} ({count / sum(counts.values()):.5%})')\n
    0: 284315 (99.82725%)\n1: 492 (0.17275%)\n
    "},{"location":"examples/imbalanced-learning/#baseline","title":"Baseline","text":"

    The dataset is quite unbalanced. For each 1 there are about 578 0s. Let's now train a logistic regression with default parameters and see how well it does. We'll measure the ROC AUC score.

    from river import linear_model\nfrom river import metrics\nfrom river import evaluate\nfrom river import preprocessing\n\n\nX_y = datasets.CreditCard()\n\nmodel = (\n    preprocessing.StandardScaler() |\n    linear_model.LogisticRegression()\n)\n\nmetric = metrics.ROCAUC()\n\nevaluate.progressive_val_score(X_y, model, metric)\n
    ROCAUC: 89.11%\n
    "},{"location":"examples/imbalanced-learning/#importance-weighting","title":"Importance weighting","text":"

    The performance is already quite acceptable, but as we will now see we can do even better. The first thing we can do is to add weight to the 1s by using the weight_pos argument of the Log loss function.

    from river import optim\n\nmodel = (\n    preprocessing.StandardScaler() |\n    linear_model.LogisticRegression(\n        loss=optim.losses.Log(weight_pos=5)\n    )\n)\n\nmetric = metrics.ROCAUC()\n\nevaluate.progressive_val_score(X_y, model, metric)\n
    ROCAUC: 91.43%\n
    "},{"location":"examples/imbalanced-learning/#focal-loss","title":"Focal loss","text":"

    The deep learning for object detection community has produced a special loss function for imbalanced learning called focal loss. We are doing binary classification, so we can plug the binary version of focal loss into our logistic regression and see how well it fairs.

    model = (\n    preprocessing.StandardScaler() |\n    linear_model.LogisticRegression(loss=optim.losses.BinaryFocalLoss(2, 1))\n)\n\nmetric = metrics.ROCAUC()\n\nevaluate.progressive_val_score(X_y, model, metric)\n
    ROCAUC: 91.31%\n
    "},{"location":"examples/imbalanced-learning/#under-sampling-the-majority-class","title":"Under-sampling the majority class","text":"

    Adding importance weights only works with gradient-based models (which includes neural networks). A more generic, and potentially more effective approach, is to use undersamplig and oversampling. As an example, we'll under-sample the stream so that our logistic regression encounter 20% of 1s and 80% of 0s. Under-sampling has the additional benefit of requiring less training steps, and thus reduces the total training time.

    from river import imblearn\n\nmodel = (\n    preprocessing.StandardScaler() |\n    imblearn.RandomUnderSampler(\n        classifier=linear_model.LogisticRegression(),\n        desired_dist={0: .8, 1: .2},\n        seed=42\n    )\n)\n\nmetric = metrics.ROCAUC()\n\nevaluate.progressive_val_score(X_y, model, metric)\n
    ROCAUC: 94.75%\n

    The RandomUnderSampler class is a wrapper for classifiers. This is represented by a rectangle around the logistic regression bubble when we visualize the model.

    model\n
    StandardScaler
    StandardScaler ( with_std=True )
    RandomUnderSampler
    RandomUnderSampler ( classifier=LogisticRegression ( optimizer=SGD ( lr=Constant ( learning_rate=0.01 ) ) loss=Log ( weight_pos=1. weight_neg=1. ) l2=0. l1=0. intercept_init=0. intercept_lr=Constant ( learning_rate=0.01 ) clip_gradient=1e+12 initializer=Zeros () ) desired_dist={0: 0.8, 1: 0.2} seed=42 )
    LogisticRegression
    LogisticRegression ( optimizer=SGD ( lr=Constant ( learning_rate=0.01 ) ) loss=Log ( weight_pos=1. weight_neg=1. ) l2=0. l1=0. intercept_init=0. intercept_lr=Constant ( learning_rate=0.01 ) clip_gradient=1e+12 initializer=Zeros () ) "},{"location":"examples/imbalanced-learning/#over-sampling-the-minority-class","title":"Over-sampling the minority class","text":"

    We can also attain the same class distribution by over-sampling the minority class. This will come at cost of having to train with more samples.

    model = (\n    preprocessing.StandardScaler() |\n    imblearn.RandomOverSampler(\n        classifier=linear_model.LogisticRegression(),\n        desired_dist={0: .8, 1: .2},\n        seed=42\n    )\n)\n\nmetric = metrics.ROCAUC()\n\nevaluate.progressive_val_score(X_y, model, metric)\n
    ROCAUC: 91.71%\n
    "},{"location":"examples/imbalanced-learning/#sampling-with-a-desired-sample-size","title":"Sampling with a desired sample size","text":"

    The downside of both RandomUnderSampler and RandomOverSampler is that you don't have any control on the amount of data the classifier trains on. The number of samples is adjusted so that the target distribution can be attained, either by under-sampling or over-sampling. However, you can do both at the same time and choose how much data the classifier will see. To do so, we can use the RandomSampler class. In addition to the desired class distribution, we can specify how much data to train on. The samples will both be under-sampled and over-sampled in order to fit your constraints. This is powerful because it allows you to control both the class distribution and the size of the training data (and thus the training time). In the following example we'll set it so that the model will train with 1 percent of the data.

    model = (\n    preprocessing.StandardScaler() |\n    imblearn.RandomSampler(\n        classifier=linear_model.LogisticRegression(),\n        desired_dist={0: .8, 1: .2},\n        sampling_rate=.01,\n        seed=42\n    )\n)\n\nmetric = metrics.ROCAUC()\n\nevaluate.progressive_val_score(X_y, model, metric)\n
    ROCAUC: 94.71%\n
    "},{"location":"examples/imbalanced-learning/#hybrid-approach","title":"Hybrid approach","text":"

    As you might have guessed by now, nothing is stopping you from mixing imbalanced learning methods together. As an example, let's combine sampling.RandomUnderSampler and the weight_pos parameter from the optim.losses.Log loss function.

    model = (\n    preprocessing.StandardScaler() |\n    imblearn.RandomUnderSampler(\n        classifier=linear_model.LogisticRegression(\n            loss=optim.losses.Log(weight_pos=5)\n        ),\n        desired_dist={0: .8, 1: .2},\n        seed=42\n    )\n)\n\nmetric = metrics.ROCAUC()\n\nevaluate.progressive_val_score(X_y, model, metric)\n
    ROCAUC: 96.52%\n
    "},{"location":"examples/quantile-regression-uncertainty/","title":"Handling uncertainty with quantile regression","text":"
    %matplotlib inline\n

    Quantile regression is useful when you're not so much interested in the accuracy of your model, but rather you want your model to be good at ranking observations correctly. The typical way to perform quantile regression is to use a special loss function, namely the quantile loss. The quantile loss takes a parameter, \\(\\alpha\\) (alpha), which indicates which quantile the model should be targeting. In the case of \\(\\alpha = 0.5\\), then this is equivalent to asking the model to predict the median value of the target, and not the most likely value which would be the mean.

    A nice thing we can do with quantile regression is to produce a prediction interval for each prediction. Indeed, if we predict the lower and upper quantiles of the target then we will be able to obtain a \"trust region\" in between which the true value is likely to belong. Of course, the likeliness will depend on the chosen quantiles. For a slightly more detailed explanation see this blog post.

    As an example, let us take the simple nowcasting model we built in another notebook. Instead of predicting the mean value of the target distribution, we will predict the 5th, 50th, 95th quantiles. This will require training three separate models, so we will encapsulate the model building logic in a function called make_model. We also have to slightly adapt the training loop, but not by much. Finally, we will draw the prediction interval along with the predictions from for 50th quantile (i.e. the median) and the true values.

    import calendar\nimport math\nimport matplotlib.pyplot as plt\nfrom river import compose\nfrom river import datasets\nfrom river import linear_model\nfrom river import metrics\nfrom river import optim\nfrom river import preprocessing\nfrom river import stats\n\n\ndef get_ordinal_date(x):\n    return {'ordinal_date': x['month'].toordinal()}    \n\n\ndef get_month_distances(x):\n    return {\n        calendar.month_name[month]: math.exp(-(x['month'].month - month) ** 2)\n        for month in range(1, 13)\n    }\n\n\ndef make_model(alpha):\n\n    extract_features = compose.TransformerUnion(get_ordinal_date, get_month_distances)\n\n    scale = preprocessing.StandardScaler()\n\n    learn = linear_model.LinearRegression(\n        intercept_lr=0,\n        optimizer=optim.SGD(0.03),\n        loss=optim.losses.Quantile(alpha=alpha)\n    )\n\n    model = extract_features | scale | learn\n    model = preprocessing.TargetStandardScaler(regressor=model)\n\n    return model\n\nmetric = metrics.MAE()\n\nmodels = {\n    'lower': make_model(alpha=0.05),\n    'center': make_model(alpha=0.5),\n    'upper': make_model(alpha=0.95)\n}\n\ndates = []\ny_trues = []\ny_preds = {\n    'lower': [],\n    'center': [],\n    'upper': []\n}\n\nfor x, y in datasets.AirlinePassengers():\n    y_trues.append(y)\n    dates.append(x['month'])\n\n    for name, model in models.items():\n        y_preds[name].append(model.predict_one(x))\n        model.learn_one(x, y)\n\n    # Update the error metric\n    metric.update(y, y_preds['center'][-1])\n\n# Plot the results\nfig, ax = plt.subplots(figsize=(10, 6))\nax.grid(alpha=0.75)\nax.plot(dates, y_trues, lw=3, color='#2ecc71', alpha=0.8, label='Truth')\nax.plot(dates, y_preds['center'], lw=3, color='#e74c3c', alpha=0.8, label='Prediction')\nax.fill_between(dates, y_preds['lower'], y_preds['upper'], color='#e74c3c', alpha=0.3, label='Prediction interval')\nax.legend()\nax.set_title(metric);\n

    An important thing to note is that the prediction interval we obtained should not be confused with a confidence interval. Simply put, a prediction interval represents uncertainty for where the true value lies, whereas a confidence interval encapsulates the uncertainty on the prediction. You can find out more by reading this CrossValidated post.

    "},{"location":"examples/sentence-classification/","title":"Sentence classification","text":"

    In this tutorial we will try to predict whether an SMS is a spam or not. To train our model, we will use the SMSSpam dataset. This dataset is unbalanced, there is only 13.4% spam. Let's look at the data:

    from river import datasets\n\ndatasets.SMSSpam()\n
    SMS Spam Collection dataset.\n\nThe data contains 5,574 items and 1 feature (i.e. SMS body). Spam messages represent\n13.4% of the dataset. The goal is to predict whether an SMS is a spam or not.\n\n      Name  SMSSpam                                                                              \n      Task  Binary classification                                                                \n   Samples  5,574                                                                                \n  Features  1                                                                                    \n    Sparse  False                                                                                \n      Path  /Users/max/river_data/SMSSpam/SMSSpamCollection                                      \n       URL  https://archive.ics.uci.edu/ml/machine-learning-databases/00228/smsspamcollection.zip\n      Size  466.71 KB                                                                            \nDownloaded  True\n
    from pprint import pprint\n\nX_y = datasets.SMSSpam()\n\nfor x, y in X_y:\n    pprint(x)\n    print(f'Spam: {y}')\n    break\n
    {'body': 'Go until jurong point, crazy.. Available only in bugis n great world '\n         'la e buffet... Cine there got amore wat...\\n'}\nSpam: False\n

    Let's start by building a simple model like a Naive Bayes classifier. We will first preprocess the sentences with a TF-IDF transform that our model can consume. Then, we will measure the accuracy of our model with the AUC metric. This is the right metric to use when the classes are not balanced. In addition, the Naive Bayes models can perform very well on unbalanced datasets and can be used for both binary and multi-class classification problems.

    from river import feature_extraction\nfrom river import naive_bayes\nfrom river import metrics\n\nX_y = datasets.SMSSpam()\n\nmodel = (\n    feature_extraction.TFIDF(on='body') | \n    naive_bayes.BernoulliNB(alpha=0)\n)\n\nmetric = metrics.ROCAUC()\ncm = metrics.ConfusionMatrix()\n\nfor x, y in X_y:\n\n    y_pred = model.predict_one(x)\n\n    if y_pred is not None:\n        metric.update(y_pred=y_pred, y_true=y)\n        cm.update(y_pred=y_pred, y_true=y)\n\n    model.learn_one(x, y)\n\nmetric\n
    ROCAUC: 93.00%\n

    The confusion matrix:

    cm\n
         False   True  \nFalse 4,809     17  \nTrue   102    645\n

    The results are quite good with this first model.

    Since we are working with an imbalanced dataset, we can use the imblearn module to rebalance the classes of our dataset. For more information about the imblearn module, you can find a dedicated tutorial here.

    from river import imblearn\n\nX_y = datasets.SMSSpam()\n\nmodel = (\n    feature_extraction.TFIDF(on='body') | \n    imblearn.RandomUnderSampler(\n        classifier=naive_bayes.BernoulliNB(alpha=0),\n        desired_dist={0: .5, 1: .5},\n        seed=42\n    )\n)\n\nmetric = metrics.ROCAUC()\ncm = metrics.ConfusionMatrix()\n\nfor x, y in X_y:\n\n    y_pred = model.predict_one(x)\n\n    if y_pred is not None:\n        metric.update(y_pred=y_pred, y_true=y)\n        cm.update(y_pred=y_pred, y_true=y)\n\n    model.learn_one(x, y)\n\nmetric\n
    ROCAUC: 94.61%\n

    The imblearn module improved our results. Not bad! We can visualize the pipeline to understand how the data is processed.

    The confusion matrix:

    cm\n
         False   True  \nFalse 4,570    255  \nTrue    41    706\n
    model\n
    TFIDF
    TFIDF ( normalize=True on=\"body\" strip_accents=True lowercase=True preprocessor=None tokenizer=None ngram_range=(1, 1) )
    RandomUnderSampler
    RandomUnderSampler ( classifier=BernoulliNB ( alpha=0 true_threshold=0. ) desired_dist={0: 0.5, 1: 0.5} seed=42 )
    BernoulliNB
    BernoulliNB ( alpha=0 true_threshold=0. )

    Now let's try to use logistic regression to classify messages. We will use different tips to make my model perform better. As in the previous example, we rebalance the classes of our dataset. The logistics regression will be fed from a TF-IDF.

    from river import linear_model\nfrom river import optim\nfrom river import preprocessing\n\nX_y = datasets.SMSSpam()\n\nmodel = (\n    feature_extraction.TFIDF(on='body') | \n    preprocessing.Normalizer() | \n    imblearn.RandomUnderSampler(\n        classifier=linear_model.LogisticRegression(\n            optimizer=optim.SGD(.9), \n            loss=optim.losses.Log()\n        ),\n        desired_dist={0: .5, 1: .5},\n        seed=42\n    )\n)\n\nmetric = metrics.ROCAUC()\ncm = metrics.ConfusionMatrix()\n\nfor x, y in X_y:\n\n    y_pred = model.predict_one(x)\n\n    metric.update(y_pred=y_pred, y_true=y)\n    cm.update(y_pred=y_pred, y_true=y)\n\n    model.learn_one(x, y)\n\nmetric\n
    ROCAUC: 93.80%\n

    The confusion matrix:

    cm\n
         False   True  \nFalse 4,584    243  \nTrue    55    692\n
    model\n
    TFIDF
    TFIDF ( normalize=True on=\"body\" strip_accents=True lowercase=True preprocessor=None tokenizer=None ngram_range=(1, 1) )
    Normalizer
    Normalizer ( order=2 )
    RandomUnderSampler
    RandomUnderSampler ( classifier=LogisticRegression ( optimizer=SGD ( lr=Constant ( learning_rate=0.9 ) ) loss=Log ( weight_pos=1. weight_neg=1. ) l2=0. l1=0. intercept_init=0. intercept_lr=Constant ( learning_rate=0.01 ) clip_gradient=1e+12 initializer=Zeros () ) desired_dist={0: 0.5, 1: 0.5} seed=42 )
    LogisticRegression
    LogisticRegression ( optimizer=SGD ( lr=Constant ( learning_rate=0.9 ) ) loss=Log ( weight_pos=1. weight_neg=1. ) l2=0. l1=0. intercept_init=0. intercept_lr=Constant ( learning_rate=0.01 ) clip_gradient=1e+12 initializer=Zeros () )

    The results of the logistic regression are quite good but still inferior to the naive Bayes model.

    Let's try to use word embeddings to improve our logistic regression. Word embeddings allow you to represent a word as a vector. Embeddings are developed to build semantically rich vectors. For instance, the vector which represents the word python should be close to the vector which represents the word programming. We will use spaCy to convert our sentence to vectors. spaCy converts a sentence to a vector by calculating the average of the embeddings of the words in the sentence.

    You can download pre-trained embeddings in many languages. We will use English pre-trained embeddings as our SMS are in English.

    The command below allows you to download the pre-trained embeddings that spaCy makes available. More informations about spaCy and its installation may be found here here.

    python -m spacy download en_core_web_sm\n

    Here, we create a custom transformer to convert an input sentence to a dict of floats. We will integrate this transformer into our pipeline.

    import spacy\n\nfrom river.base import Transformer\n\nclass Embeddings(Transformer):\n\"\"\"My custom transformer, word embedding using spaCy.\"\"\"\n\n    def __init__(self, on: str):\n        self.on = on\n        self.embeddings = spacy.load('en_core_web_sm')\n\n    def transform_one(self, x, y=None):\n        return {dimension: xi for dimension, xi in enumerate(self.embeddings(x[self.on]).vector)}\n

    Let's train our logistic regression:

    X_y = datasets.SMSSpam()\n\nmodel = (\n    Embeddings(on='body') | \n    preprocessing.Normalizer() |\n    imblearn.RandomOverSampler(\n        classifier=linear_model.LogisticRegression(\n            optimizer=optim.SGD(.5), \n            loss=optim.losses.Log()\n        ),\n        desired_dist={0: .5, 1: .5},\n        seed=42\n    )\n)\n\nmetric = metrics.ROCAUC()\ncm = metrics.ConfusionMatrix()\n\nfor x, y in X_y:\n\n    y_pred = model.predict_one(x)\n\n    metric.update(y_pred=y_pred, y_true=y)\n    cm.update(y_pred=y_pred, y_true=y)\n\n    model.learn_one(x, y)\n\nmetric\n
    ROCAUC: 91.31%\n

    The confusion matrix:

    cm\n
         False   True  \nFalse 4,537    290  \nTrue    85    662\n
    model\n
    Embeddings
    Embeddings ( on=\"body\" )
    Normalizer
    Normalizer ( order=2 )
    RandomOverSampler
    RandomOverSampler ( classifier=LogisticRegression ( optimizer=SGD ( lr=Constant ( learning_rate=0.5 ) ) loss=Log ( weight_pos=1. weight_neg=1. ) l2=0. l1=0. intercept_init=0. intercept_lr=Constant ( learning_rate=0.01 ) clip_gradient=1e+12 initializer=Zeros () ) desired_dist={0: 0.5, 1: 0.5} seed=42 )
    LogisticRegression
    LogisticRegression ( optimizer=SGD ( lr=Constant ( learning_rate=0.5 ) ) loss=Log ( weight_pos=1. weight_neg=1. ) l2=0. l1=0. intercept_init=0. intercept_lr=Constant ( learning_rate=0.01 ) clip_gradient=1e+12 initializer=Zeros () )

    The results of the logistic regression using spaCy embeddings are lower than those obtained with TF-IDF values. We could surely improve the results by cleaning up the text. We could also use embeddings more suited to our dataset. However, on this problem, the logistic regression is not better than the Naive Bayes model. No free lunch today.

    "},{"location":"examples/the-art-of-using-pipelines/","title":"The art of using pipelines","text":"

    Pipelines are a natural way to think about a machine learning system. Indeed with some practice a data scientist can visualise data \"flowing\" through a series of steps. The input is typically some raw data which has to be processed in some manner. The goal is to represent the data in such a way that is can be ingested by a machine learning algorithm. Along the way some steps will extract features, while others will normalize the data and remove undesirable elements. Pipelines are simple, and yet they are a powerful way of designing sophisticated machine learning systems.

    Both scikit-learn and pandas make it possible to use pipelines. However it's quite rare to see pipelines being used in practice (at least on Kaggle). Sometimes you get to see people using scikit-learn's pipeline module, however the pipe method from pandas is sadly underappreciated. A big reason why pipelines are not given much love is that it's easier to think of batch learning in terms of a script or a notebook. Indeed many people doing data science seem to prefer a procedural style to a declarative style. Moreover in practice pipelines can be a bit rigid if one wishes to do non-orthodox operations.

    Although pipelines may be a bit of an odd fit for batch learning, they make complete sense when they are used for online learning. Indeed the UNIX philosophy has advocated the use of pipelines for data processing for many decades. If you can visualise data as a stream of observations then using pipelines should make a lot of sense to you. We'll attempt to convince you by writing a machine learning algorithm in a procedural way and then converting it to a declarative pipeline in small steps. Hopefully by the end you'll be convinced, or not!

    In this notebook we'll manipulate data from the Kaggle Recruit Restaurants Visitor Forecasting competition. The data is directly available through River's datasets module.

    from pprint import pprint\nfrom river import datasets\n\nfor x, y in datasets.Restaurants():\n    pprint(x)\n    pprint(y)\n    break\n
    Downloading https://maxhalford.github.io/files/datasets/kaggle_recruit_restaurants.zip (4.28 MB)\nUncompressing into /Users/max/river_data/Restaurants\n{'area_name': 'T\u014dky\u014d-to Nerima-ku Toyotamakita',\n 'date': datetime.datetime(2016, 1, 1, 0, 0),\n 'genre_name': 'Izakaya',\n 'is_holiday': True,\n 'latitude': 35.7356234,\n 'longitude': 139.6516577,\n 'store_id': 'air_04341b588bde96cd'}\n10\n

    We'll start by building and running a model using a procedural coding style. The performance of the model doesn't matter, we're simply interested in the design of the model.

    from river import feature_extraction\nfrom river import linear_model\nfrom river import metrics\nfrom river import preprocessing\nfrom river import stats\nfrom river import utils\n\nmeans = (\n    feature_extraction.TargetAgg(by='store_id', how=utils.Rolling(stats.Mean(), 7)),\n    feature_extraction.TargetAgg(by='store_id', how=utils.Rolling(stats.Mean(), 14)),\n    feature_extraction.TargetAgg(by='store_id', how=utils.Rolling(stats.Mean(), 21))\n)\n\nscaler = preprocessing.StandardScaler()\nlin_reg = linear_model.LinearRegression()\nmetric = metrics.MAE()\n\nfor x, y in datasets.Restaurants():\n\n    # Derive date features\n    x['weekday'] = x['date'].weekday()\n    x['is_weekend'] = x['date'].weekday() in (5, 6)\n\n    # Process the rolling means of the target  \n    for mean in means:\n        x = {**x, **mean.transform_one(x)}\n        mean.learn_one(x, y)\n\n    # Remove the key/value pairs that aren't features\n    for key in ['store_id', 'date', 'genre_name', 'area_name', 'latitude', 'longitude']:\n        x.pop(key)\n\n    # Rescale the data\n    x = scaler.learn_one(x).transform_one(x)\n\n    # Fit the linear regression\n    y_pred = lin_reg.predict_one(x)\n    lin_reg.learn_one(x, y)\n\n    # Update the metric using the out-of-fold prediction\n    metric.update(y, y_pred)\n\nprint(metric)\n
    MAE: 8.316538\n

    We're not using many features. We can print the last x to get an idea of the features (don't forget they've been scaled!)

    pprint(x)\n
    {'is_holiday': -0.23103573677646685,\n 'is_weekend': 1.6249280076334165,\n 'weekday': 1.0292832579142892,\n 'y_mean_by_store_id': -1.3980979075298516}\n

    The above chunk of code is quite explicit but it's a bit verbose. The whole point of libraries such as River is to make life easier for users. Moreover there's too much space for users to mess up the order in which things are done, which increases the chance of there being target leakage. We'll now rewrite our model in a declarative fashion using a pipeline \u00e0 la sklearn.

    from river import compose\n\n\ndef get_date_features(x):\n    weekday =  x['date'].weekday()\n    return {'weekday': weekday, 'is_weekend': weekday in (5, 6)}\n\n\nmodel = compose.Pipeline(\n    ('features', compose.TransformerUnion(\n        ('date_features', compose.FuncTransformer(get_date_features)),\n        ('last_7_mean', feature_extraction.TargetAgg(by='store_id', how=utils.Rolling(stats.Mean(), 7))),\n        ('last_14_mean', feature_extraction.TargetAgg(by='store_id', how=utils.Rolling(stats.Mean(), 14))),\n        ('last_21_mean', feature_extraction.TargetAgg(by='store_id', how=utils.Rolling(stats.Mean(), 21)))\n    )),\n    ('drop_non_features', compose.Discard('store_id', 'date', 'genre_name', 'area_name', 'latitude', 'longitude')),\n    ('scale', preprocessing.StandardScaler()),\n    ('lin_reg', linear_model.LinearRegression())\n)\n\nmetric = metrics.MAE()\n\nfor x, y in datasets.Restaurants():\n\n    # Make a prediction without using the target\n    y_pred = model.predict_one(x)\n\n    # Update the model using the target\n    model.learn_one(x, y)\n\n    # Update the metric using the out-of-fold prediction\n    metric.update(y, y_pred)\n\nprint(metric)\n
    MAE: 8.413859\n

    We use a Pipeline to arrange each step in a sequential order. A TransformerUnion is used to merge multiple feature extractors into a single transformer. The for loop is now much shorter and is thus easier to grok: we get the out-of-fold prediction, we fit the model, and finally we update the metric. This way of evaluating a model is typical of online learning, and so we put it wrapped it inside a function called progressive_val_score part of the evaluate module. We can use it to replace the for loop.

    from river import evaluate\n\nmodel = compose.Pipeline(\n    ('features', compose.TransformerUnion(\n        ('date_features', compose.FuncTransformer(get_date_features)),\n        ('last_7_mean', feature_extraction.TargetAgg(by='store_id', how=utils.Rolling(stats.Mean(), 7))),\n        ('last_14_mean', feature_extraction.TargetAgg(by='store_id', how=utils.Rolling(stats.Mean(), 14))),\n        ('last_21_mean', feature_extraction.TargetAgg(by='store_id', how=utils.Rolling(stats.Mean(), 21)))\n    )),\n    ('drop_non_features', compose.Discard('store_id', 'date', 'genre_name', 'area_name', 'latitude', 'longitude')),\n    ('scale', preprocessing.StandardScaler()),\n    ('lin_reg', linear_model.LinearRegression())\n)\n\nevaluate.progressive_val_score(dataset=datasets.Restaurants(), model=model, metric=metrics.MAE())\n
    MAE: 8.413859\n

    Notice that you couldn't have used the progressive_val_score method if you wrote the model in a procedural manner.

    Our code is getting shorter, but it's still a bit difficult on the eyes. Indeed there is a lot of boilerplate code associated with pipelines that can get tedious to write. However River has some special tricks up it's sleeve to save you from a lot of pain.

    The first trick is that the name of each step in the pipeline can be omitted. If no name is given for a step then River automatically infers one.

    model = compose.Pipeline(\n    compose.TransformerUnion(\n        compose.FuncTransformer(get_date_features),\n        feature_extraction.TargetAgg(by='store_id', how=utils.Rolling(stats.Mean(), 7)),\n        feature_extraction.TargetAgg(by='store_id', how=utils.Rolling(stats.Mean(), 14)),\n        feature_extraction.TargetAgg(by='store_id', how=utils.Rolling(stats.Mean(), 21))\n    ),\n    compose.Discard('store_id', 'date', 'genre_name', 'area_name', 'latitude', 'longitude'),\n    preprocessing.StandardScaler(),\n    linear_model.LinearRegression()\n)\n\nevaluate.progressive_val_score(datasets.Restaurants(), model, metrics.MAE())\n
    MAE: 8.413859\n

    Under the hood a Pipeline inherits from collections.OrderedDict. Indeed this makes sense because if you think about it a Pipeline is simply a sequence of steps where each step has a name. The reason we mention this is because it means you can manipulate a Pipeline the same way you would manipulate an ordinary dict. For instance we can print the name of each step by using the keys method.

    for name in model.steps:\n    print(name)\n
    TransformerUnion\nDiscard\nStandardScaler\nLinearRegression\n

    The first step is a FeatureUnion and it's string representation contains the string representation of each of it's elements. Not having to write names saves up some time and space and is certainly less tedious.

    The next trick is that we can use mathematical operators to compose our pipeline. For example we can use the + operator to merge Transformers into a TransformerUnion.

    model = compose.Pipeline(\n    compose.FuncTransformer(get_date_features) + \\\n    feature_extraction.TargetAgg(by='store_id', how=utils.Rolling(stats.Mean(), 7)) + \\\n    feature_extraction.TargetAgg(by='store_id', how=utils.Rolling(stats.Mean(), 14)) + \\\n    feature_extraction.TargetAgg(by='store_id', how=utils.Rolling(stats.Mean(), 21)),\n\n    compose.Discard('store_id', 'date', 'genre_name', 'area_name', 'latitude', 'longitude'),\n    preprocessing.StandardScaler(),\n    linear_model.LinearRegression()\n)\n\nevaluate.progressive_val_score(datasets.Restaurants(), model, metrics.MAE())\n
    MAE: 8.413859\n

    Likewhise we can use the | operator to assemble steps into a Pipeline.

    model = (\n    compose.FuncTransformer(get_date_features) +\n    feature_extraction.TargetAgg(by='store_id', how=utils.Rolling(stats.Mean(), 7)) +\n    feature_extraction.TargetAgg(by='store_id', how=utils.Rolling(stats.Mean(), 14)) +\n    feature_extraction.TargetAgg(by='store_id', how=utils.Rolling(stats.Mean(), 21))\n)\n\nto_discard = ['store_id', 'date', 'genre_name', 'area_name', 'latitude', 'longitude']\n\nmodel = model | compose.Discard(*to_discard) | preprocessing.StandardScaler()\n\nmodel |= linear_model.LinearRegression()\n\nevaluate.progressive_val_score(datasets.Restaurants(), model, metrics.MAE())\n
    MAE: 8.413859\n

    Hopefully you'll agree that this is a powerful way to express machine learning pipelines. For some people this should be quite remeniscent of the UNIX pipe operator. One final trick we want to mention is that functions are automatically wrapped with a FuncTransformer, which can be quite handy.

    model = get_date_features\n\nfor n in [7, 14, 21]:\n    model += feature_extraction.TargetAgg(by='store_id', how=utils.Rolling(stats.Mean(), n))\n\nmodel |= compose.Discard(*to_discard)\nmodel |= preprocessing.StandardScaler()\nmodel |= linear_model.LinearRegression()\n\nevaluate.progressive_val_score(datasets.Restaurants(), model, metrics.MAE())\n
    MAE: 8.413859\n

    Naturally some may prefer the procedural style we first used because they find it easier to work with. It all depends on your style and you should use what you feel comfortable with. However we encourage you to use operators because we believe that this will increase the readability of your code, which is very important. To each their own!

    Before finishing we can take an interactive look at our pipeline.

    model\n
    get_date_features
    def get_date_features(x): weekday = x['date'].weekday() return {'weekday': weekday, 'is_weekend': weekday in (5, 6)}
    y_mean_by_store_id
    TargetAgg ( by=['store_id'] how=Rolling ( obj=Mean () window_size=7 ) target_name=\"y\" )
    y_mean_by_store_id
    TargetAgg ( by=['store_id'] how=Rolling ( obj=Mean () window_size=14 ) target_name=\"y\" )
    y_mean_by_store_id
    TargetAgg ( by=['store_id'] how=Rolling ( obj=Mean () window_size=21 ) target_name=\"y\" )
    ~['area_name', [...]
    Discard ( area_name date genre_name latitude longitude store_id )
    StandardScaler
    StandardScaler ( with_std=True )
    LinearRegression
    LinearRegression ( optimizer=SGD ( lr=Constant ( learning_rate=0.01 ) ) loss=Squared () l2=0. l1=0. intercept_init=0. intercept_lr=Constant ( learning_rate=0.01 ) clip_gradient=1e+12 initializer=Zeros () ) "},{"location":"examples/matrix-factorization-for-recommender-systems/part-1/","title":"Part 1","text":"

    Table of contents of this tutorial series on matrix factorization for recommender systems:

    • Part 1 - Traditional Matrix Factorization methods for Recommender Systems
    • Part 2 - Factorization Machines and Field-aware Factorization Machines
    • Part 3 - Large scale learning and better predictive power with multiple pass learning
    "},{"location":"examples/matrix-factorization-for-recommender-systems/part-1/#introduction","title":"Introduction","text":"

    A recommender system is a software tool designed to generate and suggest items or entities to the users. Popular large scale examples include:

    • Amazon (suggesting products)
    • Facebook (suggesting posts in users' news feeds)
    • Spotify (suggesting music)

    Social recommendation from graph (mostly used by social networks) are not covered in River. We focus on the general case, item recommendation. This problem can be represented with the user-item matrix:

    \\[ \\normalsize \\begin{matrix} & \\begin{matrix} _1 & _\\cdots & _\\cdots & _\\cdots & _I \\end{matrix} \\\\ \\begin{matrix} _1 \\\\ _\\vdots \\\\ _\\vdots \\\\ _\\vdots \\\\ _U \\end{matrix} & \\begin{bmatrix} {\\color{Red} ?} & 2 & \\cdots & {\\color{Red} ?} & {\\color{Red} ?} \\\\ {\\color{Red} ?} & {\\color{Red} ?} & \\cdots & {\\color{Red} ?} & 4.5 \\\\ \\vdots & \\ddots & \\ddots & \\ddots & \\vdots \\\\ 3 & {\\color{Red} ?} & \\cdots & {\\color{Red} ?} & {\\color{Red} ?} \\\\ {\\color{Red} ?} & {\\color{Red} ?} & \\cdots & 5 & {\\color{Red} ?} \\end{bmatrix} \\end{matrix} \\]

    Where \\(U\\) and \\(I\\) are the number of user and item of the system, respectively. A matrix entry represents a user's preference for an item, it can be a rating, a like or dislike, etc. Because of the huge number of users and items compared to the number of observed entries, those matrices are very sparsed (usually less than 1% filled).

    Matrix Factorization (MF) is a class of collaborative filtering algorithms derived from Singular Value Decomposition (SVD). MF strength lies in its capacity to able to model high cardinality categorical variables interactions. This subfield boomed during the famous Netflix Prize contest in 2006, when numerous novel variants has been invented and became popular thanks to their attractive accuracy and scalability.

    MF approach seeks to fill the user-item matrix considering the problem as a matrix completion one. MF core idea assume a latent model learning its own representation of the users and the items in a lower latent dimensional space by factorizing the observed parts of the matrix.

    A factorized user or item is represented as a vector \\(\\mathbf{v}_u\\) or \\(\\mathbf{v}_i\\) composed of \\(k\\) latent factors, with \\(k << U, I\\). Those learnt latent variables represent, for an item the various aspects describing it, and for a user its interests in terms of those aspects. The model then assume a user's choice or fondness is composed of a sum of preferences about the various aspects of the concerned item. This sum being the dot product between the latent vectors of a given user-item pair:

    \\[ \\normalsize \\langle \\mathbf{v}_u, \\mathbf{v}_i \\rangle = \\sum_{f=1}^{k} \\mathbf{v}_{u, f} \\cdot \\mathbf{v}_{i, f} \\]

    MF models weights are learnt in an online fashion, often with stochastic gradient descent as it provides relatively fast running time and good accuracy. There is a great and widely popular library named surprise that implements MF models (and others) but in contrast with River doesn't follow a pure online philosophy (all the data have to be loaded in memory and the API doesn't allow you to update your model with new data).

    Notes:

    • In recent years, proposed deep learning techniques for recommendation tasks claim state of the art results. However, recent work (August 2019) showed that those promises can't be taken for granted and traditional MF methods are still relevant today.
    • For more information about how the business value of recommender systems is measured and why they are one of the main success stories of machine learning, see the following literature survey (December 2019).
    "},{"location":"examples/matrix-factorization-for-recommender-systems/part-1/#lets-start","title":"Let's start","text":"

    In this tutorial, we are going to explore MF algorithms available in River and test them on a movie recommendation problem with the MovieLens 100K dataset. This latter is a collection of movie ratings (from 1 to 5) that includes various information about both the items and the users. We can access it from the river.datasets module:

    import json\n\nfrom river import datasets\n\nfor x, y in datasets.MovieLens100K():\n    print(f'x = {json.dumps(x, indent=4)}')\n    print(f'y = {y}')\n    break\n
    Downloading https://maxhalford.github.io/files/datasets/ml_100k.zip (1.83 MB)\nUncompressing into /Users/max/river_data/MovieLens100K\nx = {\n    \"user\": \"259\",\n    \"item\": \"255\",\n    \"timestamp\": 874731910000000000,\n    \"title\": \"My Best Friend's Wedding (1997)\",\n    \"release_date\": 866764800000000000,\n    \"genres\": \"comedy, romance\",\n    \"age\": 21.0,\n    \"gender\": \"M\",\n    \"occupation\": \"student\",\n    \"zip_code\": \"48823\"\n}\ny = 4.0\n

    Let's define a routine to evaluate our different models on MovieLens 100K. Mean Absolute Error and Root Mean Squared Error will be our metrics printed alongside model's computation time and memory usage:

    from river import metrics\nfrom river.evaluate import progressive_val_score\n\ndef evaluate(model, unpack_user_and_item=True):\n    X_y = datasets.MovieLens100K(unpack_user_and_item)\n    metric = metrics.MAE() + metrics.RMSE()\n    _ = progressive_val_score(X_y, model, metric, print_every=25_000, show_time=True, show_memory=True)\n
    "},{"location":"examples/matrix-factorization-for-recommender-systems/part-1/#naive-prediction","title":"Naive prediction","text":"

    It's good practice in machine learning to start with a naive baseline and then iterate from simple things to complex ones observing progress incrementally. Let's start by predicting the target running mean as a first shot:

    from river import dummy\nfrom river import stats\n\nmodel = dummy.StatisticRegressor(stats.Mean())\nevaluate(model, unpack_user_and_item=False)\n
    [25,000] MAE: 0.934259, RMSE: 1.124469 \u2013 00:00:00 \u2013 514 B\n[50,000] MAE: 0.923893, RMSE: 1.105 \u2013 00:00:00 \u2013 514 B\n[75,000] MAE: 0.937359, RMSE: 1.123696 \u2013 00:00:01 \u2013 514 B\n[100,000] MAE: 0.942162, RMSE: 1.125783 \u2013 00:00:01 \u2013 514 B\n
    "},{"location":"examples/matrix-factorization-for-recommender-systems/part-1/#baseline-model","title":"Baseline model","text":"

    Now we can do machine learning and explore available models in river.reco module starting with the baseline model. It extends our naive prediction by adding to the global running mean two bias terms characterizing the user and the item discrepancy from the general tendency. The model equation is defined as:

    \\[ \\normalsize \\hat{y}(x) = \\bar{y} + bu_{u} + bi_{i} \\]

    This baseline model can be viewed as a linear regression where the intercept is replaced by the target running mean with the users and the items one hot encoded.

    All machine learning models in River expect dicts as input with feature names as keys and feature values as values. Specifically, models from river.reco expect a 'user' and an 'item' entries without any type constraint on their values (i.e. can be strings or numbers), e.g.:

    x = {\n    'user': 'Guido',\n    'item': \"Monty Python's Flying Circus\"\n}\n

    Other entries, if exist, are simply ignored. This is quite useful as we don't need to spend time and storage doing one hot encoding.

    from river import preprocessing\nfrom river import optim\nfrom river import reco\n\nbaseline_params = {\n    'optimizer': optim.SGD(0.025),\n    'l2': 0.,\n    'initializer': optim.initializers.Zeros()\n}\n\nmodel = preprocessing.PredClipper(\n    regressor=reco.Baseline(**baseline_params),\n    y_min=1,\n    y_max=5\n)\n\nevaluate(model)\n
    [25,000] MAE: 0.761844, RMSE: 0.960972 \u2013 00:00:00 \u2013 173.6 KB\n[50,000] MAE: 0.753292, RMSE: 0.951223 \u2013 00:00:00 \u2013 242.23 KB\n[75,000] MAE: 0.754177, RMSE: 0.953376 \u2013 00:00:01 \u2013 286.04 KB\n[100,000] MAE: 0.754651, RMSE: 0.954148 \u2013 00:00:01 \u2013 309.64 KB\n

    We won two tenth of MAE compared to our naive prediction (0.7546 vs 0.9421) meaning that significant information has been learnt by the model.

    "},{"location":"examples/matrix-factorization-for-recommender-systems/part-1/#funk-matrix-factorization-funkmf","title":"Funk Matrix Factorization (FunkMF)","text":"

    It's the pure form of matrix factorization consisting of only learning the users and items latent representations as discussed in introduction. Simon Funk popularized its stochastic gradient descent optimization in 2006 during the Netflix Prize. The model equation is defined as:

    \\[ \\normalsize \\hat{y}(x) = \\langle \\mathbf{v}_u, \\mathbf{v}_i \\rangle \\]

    Note: FunkMF is sometimes referred as Probabilistic Matrix Factorization which is an extended probabilistic version.

    funk_mf_params = {\n    'n_factors': 10,\n    'optimizer': optim.SGD(0.05),\n    'l2': 0.1,\n    'initializer': optim.initializers.Normal(mu=0., sigma=0.1, seed=73)\n}\n\nmodel = preprocessing.PredClipper(\n    regressor=reco.FunkMF(**funk_mf_params),\n    y_min=1,\n    y_max=5\n)\n\nevaluate(model)\n
    [25,000] MAE: 1.070136, RMSE: 1.397014 \u2013 00:00:00 \u2013 570.35 KB\n[50,000] MAE: 0.99174, RMSE: 1.290666 \u2013 00:00:01 \u2013 716 KB\n[75,000] MAE: 0.961072, RMSE: 1.250842 \u2013 00:00:01 \u2013 844.09 KB\n[100,000] MAE: 0.944883, RMSE: 1.227688 \u2013 00:00:02 \u2013 945.19 KB\n

    Results are equivalent to our naive prediction (0.9448 vs 0.9421). By only focusing on the users preferences and the items characteristics, the model is limited in his ability to capture different views of the problem. Despite its poor performance alone, this algorithm is quite useful combined in other models or when we need to build dense representations for other tasks.

    "},{"location":"examples/matrix-factorization-for-recommender-systems/part-1/#biased-matrix-factorization-biasedmf","title":"Biased Matrix Factorization (BiasedMF)","text":"

    It's the combination of the Baseline model and FunkMF. The model equation is defined as:

    \\[ \\normalsize \\hat{y}(x) = \\bar{y} + bu_{u} + bi_{i} + \\langle \\mathbf{v}_u, \\mathbf{v}_i \\rangle \\]

    Note: Biased Matrix Factorization name is used by some people but some others refer to it by SVD or Funk SVD. It's the case of Yehuda Koren and Robert Bell in Recommender Systems Handbook (Chapter 5 Advances in Collaborative Filtering) and of surprise library. Nevertheless, SVD could be confused with the original Singular Value Decomposition from which it's derived from, and Funk SVD could also be misleading because of the biased part of the model equation which doesn't come from Simon Funk's work. For those reasons, we chose to side with Biased Matrix Factorization which fits more naturally to it.

    biased_mf_params = {\n    'n_factors': 10,\n    'bias_optimizer': optim.SGD(0.025),\n    'latent_optimizer': optim.SGD(0.05),\n    'weight_initializer': optim.initializers.Zeros(),\n    'latent_initializer': optim.initializers.Normal(mu=0., sigma=0.1, seed=73),\n    'l2_bias': 0.,\n    'l2_latent': 0.\n}\n\nmodel = preprocessing.PredClipper(\n    regressor=reco.BiasedMF(**biased_mf_params),\n    y_min=1,\n    y_max=5\n)\n\nevaluate(model)\n
    [25,000] MAE: 0.761818, RMSE: 0.961057 \u2013 00:00:00 \u2013 669.27 KB\n[50,000] MAE: 0.751667, RMSE: 0.949443 \u2013 00:00:01 \u2013 869.85 KB\n[75,000] MAE: 0.749653, RMSE: 0.948723 \u2013 00:00:02 \u2013 1 MB\n[100,000] MAE: 0.748559, RMSE: 0.947854 \u2013 00:00:02 \u2013 1.11 MB\n

    Results improved (0.7485 vs 0.7546) demonstrating that users and items latent representations bring additional information.

    To conclude this first tutorial about factorization models, let's review the important parameters to tune when dealing with this family of methods:

    • n_factors: the number of latent factors. The more you set, the more items aspects and users preferences you are going to learn. Too many will cause overfitting, l2 regularization could help.
    • *_optimizer: the optimizers. Classic stochastic gradient descent performs well, finding the good learning rate will make the difference.
    • initializer: the latent weights initialization. Latent vectors have to be initialized with non-constant values. We generally sample them from a zero-mean normal distribution with small standard deviation.
    "},{"location":"examples/matrix-factorization-for-recommender-systems/part-2/","title":"Part 2","text":"

    As seen in Part 1, strength of Matrix Factorization (MF) lies in its ability to deal with sparse and high cardinality categorical variables. In this second tutorial we will have a look at Factorization Machines (FM) algorithm and study how it generalizes the power of MF.

    Table of contents of this tutorial series on matrix factorization for recommender systems:

    • Part 1 - Traditional Matrix Factorization methods for Recommender Systems
    • Part 2 - Factorization Machines and Field-aware Factorization Machines
    • Part 3 - Large scale learning and better predictive power with multiple pass learning
    "},{"location":"examples/matrix-factorization-for-recommender-systems/part-2/#factorization-machines","title":"Factorization Machines","text":"

    Steffen Rendel came up in 2010 with Factorization Machines, an algorithm able to handle any real valued feature vector, combining the advantages of general predictors with factorization models. It became quite popular in the field of online advertising, notably after winning several Kaggle competitions. The modeling technique starts with a linear regression to capture the effects of each variable individually:

    \\[ \\normalsize \\hat{y}(x) = w_{0} + \\sum_{j=1}^{p} w_{j} x_{j} \\]

    Then are added interaction terms to learn features relations. Instead of learning a single and specific weight per interaction (as in polynomial regression), a set of latent factors is learnt per feature (as in MF). An interaction is calculated by multiplying involved features product with their latent vectors dot product. The degree of factorization \u2014 or model order \u2014 represents the maximum number of features per interaction considered. The model equation for a factorization machine of degree \\(d\\) = 2 is defined as:

    \\[ \\normalsize \\hat{y}(x) = w_{0} + \\sum_{j=1}^{p} w_{j} x_{j} + \\sum_{j=1}^{p} \\sum_{j'=j+1}^{p} \\langle \\mathbf{v}_j, \\mathbf{v}_{j'} \\rangle x_{j} x_{j'} \\]

    Where \\(\\normalsize \\langle \\mathbf{v}_j, \\mathbf{v}_{j'} \\rangle\\) is the dot product of \\(j\\) and \\(j'\\) latent vectors:

    \\[ \\normalsize \\langle \\mathbf{v}_j, \\mathbf{v}_{j'} \\rangle = \\sum_{f=1}^{k} \\mathbf{v}_{j, f} \\cdot \\mathbf{v}_{j', f} \\]

    Higher-order FM will be covered in a following section, just note that factorization models express their power in sparse settings, which is also where higher-order interactions are hard to estimate.

    Strong emphasis must be placed on feature engineering as it allows FM to mimic most factorization models and significantly impact its performance. High cardinality categorical variables one hot encoding is the most frequent step before feeding the model with data. For more efficiency, River FM implementation considers string values as categorical variables and automatically one hot encode them. FM models have their own module river.facto.

    ## Mimic Biased Matrix Factorization (BiasedMF)

    Let's start with a simple example where we want to reproduce the Biased Matrix Factorization model we trained in the previous tutorial. For a fair comparison with Part 1 example, let's set the same evaluation framework:

    from river import datasets\nfrom river import metrics\nfrom river.evaluate import progressive_val_score\n\ndef evaluate(model):\n    X_y = datasets.MovieLens100K()\n    metric = metrics.MAE() + metrics.RMSE()\n    _ = progressive_val_score(X_y, model, metric, print_every=25_000, show_time=True, show_memory=True)\n

    In order to build an equivalent model we need to use the same hyper-parameters. As we can't replace FM intercept by the global running mean we won't be able to build the exact same model:

    from river import compose\nfrom river import facto\nfrom river import preprocessing\nfrom river import optim\nfrom river import stats\n\nfm_params = {\n    'n_factors': 10,\n    'weight_optimizer': optim.SGD(0.025),\n    'latent_optimizer': optim.SGD(0.05),\n    'sample_normalization': False,\n    'l1_weight': 0.,\n    'l2_weight': 0.,\n    'l1_latent': 0.,\n    'l2_latent': 0.,\n    'intercept': 3,\n    'intercept_lr': .01,\n    'weight_initializer': optim.initializers.Zeros(),\n    'latent_initializer': optim.initializers.Normal(mu=0., sigma=0.1, seed=73),\n}\n\nregressor = compose.Select('user', 'item')\nregressor |= facto.FMRegressor(**fm_params)\n\nmodel = preprocessing.PredClipper(\n    regressor=regressor,\n    y_min=1,\n    y_max=5\n)\n\nevaluate(model)\n
    [25,000] MAE: 0.761761, RMSE: 0.960662 \u2013 00:00:01 \u2013 818.86 KB\n[50,000] MAE: 0.751922, RMSE: 0.949783 \u2013 00:00:03 \u2013 948.77 KB\n[75,000] MAE: 0.749822, RMSE: 0.948634 \u2013 00:00:04 \u2013 1.07 MB\n[100,000] MAE: 0.748393, RMSE: 0.94776 \u2013 00:00:06 \u2013 1.19 MB\n

    Both MAE are very close to each other (0.7486 vs 0.7485) showing that we almost reproduced [reco.BiasedMF](../../../api/reco/BiasedMF) algorithm. The cost is a naturally slower running time as FM implementation offers more flexibility.

    "},{"location":"examples/matrix-factorization-for-recommender-systems/part-2/#feature-engineering-for-fm-models","title":"Feature engineering for FM models","text":"

    Let's study the basics of how to properly encode data for FM models. We are going to keep using MovieLens 100K as it provides various feature types:

    import json\n\nfor x, y in datasets.MovieLens100K():\n    print(f'x = {json.dumps(x, indent=4)}\\ny = {y}')\n    break\n
    x = {\n    \"user\": \"259\",\n    \"item\": \"255\",\n    \"timestamp\": 874731910000000000,\n    \"title\": \"My Best Friend's Wedding (1997)\",\n    \"release_date\": 866764800000000000,\n    \"genres\": \"comedy, romance\",\n    \"age\": 21.0,\n    \"gender\": \"M\",\n    \"occupation\": \"student\",\n    \"zip_code\": \"48823\"\n}\ny = 4.0\n

    The features we are going to add to our model don't improve its predictive power. Nevertheless, they are useful to illustrate different methods of data encoding:

    1. Set-categorical variables

    We have seen that categorical variables are one hot encoded automatically if set to strings, in the other hand, set-categorical variables must be encoded explicitly by the user. A good way of doing so is to assign them a value of \\(1/m\\), where \\(m\\) is the number of elements of the sample set. It gives the feature a constant \"weight\" across all samples preserving model's stability. Let's create a routine to encode movies genres this way:

    def split_genres(x):\n    genres = x['genres'].split(', ')\n    return {f'genre_{genre}': 1 / len(genres) for genre in genres}\n
    1. Numerical variables

    In practice, transforming numerical features into categorical ones works better in most cases. Feature binning is the natural way, but finding good bins is sometimes more an art than a science. Let's encode users age with something simple:

    def bin_age(x):\n    if x['age'] <= 18:\n        return {'age_0-18': 1}\n    elif x['age'] <= 32:\n        return {'age_19-32': 1}\n    elif x['age'] < 55:\n        return {'age_33-54': 1}\n    else:\n        return {'age_55-100': 1}\n

    Let's put everything together:

    fm_params = {\n    'n_factors': 14,\n    'weight_optimizer': optim.SGD(0.01),\n    'latent_optimizer': optim.SGD(0.025),\n    'intercept': 3,\n    'latent_initializer': optim.initializers.Normal(mu=0., sigma=0.05, seed=73),\n}\n\nregressor = compose.Select('user', 'item')\nregressor += (\n    compose.Select('genres') |\n    compose.FuncTransformer(split_genres)\n)\nregressor += (\n    compose.Select('age') |\n    compose.FuncTransformer(bin_age)\n)\nregressor |= facto.FMRegressor(**fm_params)\n\nmodel = preprocessing.PredClipper(\n    regressor=regressor,\n    y_min=1,\n    y_max=5\n)\n\nevaluate(model)\n
    [25,000] MAE: 0.760059, RMSE: 0.961415 \u2013 00:00:04 \u2013 935.54 KB\n[50,000] MAE: 0.751429, RMSE: 0.951504 \u2013 00:00:08 \u2013 1.06 MB\n[75,000] MAE: 0.750568, RMSE: 0.951592 \u2013 00:00:13 \u2013 1.22 MB\n[100,000] MAE: 0.75018, RMSE: 0.951622 \u2013 00:00:17 \u2013 1.37 MB\n

    Note that using more variables involves factorizing a larger latent space, then increasing the number of latent factors \\(k\\) often helps capturing more information.

    Some other feature engineering tips from 3 idiots' winning solution for Kaggle Criteo display ads competition in 2014:

    • Infrequent modalities often bring noise and little information, transforming them into a special tag can help
    • In some cases, sample-wise normalization seems to make the optimization problem easier to be solved
    "},{"location":"examples/matrix-factorization-for-recommender-systems/part-2/#higher-order-factorization-machines-hofm","title":"Higher-Order Factorization Machines (HOFM)","text":"

    The model equation generalized to any order \\(d \\geq 2\\) is defined as:

    \\[ \\normalsize \\hat{y}(x) = w_{0} + \\sum_{j=1}^{p} w_{j} x_{j} + \\sum_{l=2}^{d} \\sum_{j_1=1}^{p} \\cdots \\sum_{j_l=j_{l-1}+1}^{p} \\left(\\prod_{j'=1}^{l} x_{j_{j'}} \\right) \\left(\\sum_{f=1}^{k_l} \\prod_{j'=1}^{l} v_{j_{j'}, f}^{(l)} \\right) \\]
    hofm_params = {\n    'degree': 3,\n    'n_factors': 12,\n    'weight_optimizer': optim.SGD(0.01),\n    'latent_optimizer': optim.SGD(0.025),\n    'intercept': 3,\n    'latent_initializer': optim.initializers.Normal(mu=0., sigma=0.05, seed=73),\n}\n\nregressor = compose.Select('user', 'item')\nregressor += (\n    compose.Select('genres') |\n    compose.FuncTransformer(split_genres)\n)\nregressor += (\n    compose.Select('age') |\n    compose.FuncTransformer(bin_age)\n)\nregressor |= facto.HOFMRegressor(**hofm_params)\n\nmodel = preprocessing.PredClipper(\n    regressor=regressor,\n    y_min=1,\n    y_max=5\n)\n\nevaluate(model)\n
    [25,000] MAE: 0.761379, RMSE: 0.96214 \u2013 00:00:16 \u2013 1.73 MB\n[50,000] MAE: 0.751998, RMSE: 0.951589 \u2013 00:00:32 \u2013 2.03 MB\n[75,000] MAE: 0.750994, RMSE: 0.951616 \u2013 00:00:48 \u2013 2.36 MB\n[100,000] MAE: 0.750849, RMSE: 0.952142 \u2013 00:01:04 \u2013 2.66 MB\n

    As said previously, high-order interactions are often hard to estimate due to too much sparsity, that's why we won't spend too much time here.

    "},{"location":"examples/matrix-factorization-for-recommender-systems/part-2/#field-aware-factorization-machines-ffm","title":"Field-aware Factorization Machines (FFM)","text":"

    Field-aware variant of FM (FFM) improved the original method by adding the notion of \"fields\". A \"field\" is a group of features that belong to a specific domain (e.g. the \"users\" field, the \"items\" field, or the \"movie genres\" field).

    FFM restricts itself to pairwise interactions and factorizes separated latent spaces \u2014 one per combination of fields (e.g. users/items, users/movie genres, or items/movie genres) \u2014 instead of a common one shared by all fields. Therefore, each feature has one latent vector per field it can interact with \u2014 so that it can learn the specific effect with each different field.

    The model equation is defined by:

    \\[ \\normalsize \\hat{y}(x) = w_{0} + \\sum_{j=1}^{p} w_{j} x_{j} + \\sum_{j=1}^{p} \\sum_{j'=j+1}^{p} \\langle \\mathbf{v}_{j, f_{j'}}, \\mathbf{v}_{j', f_{j}} \\rangle x_{j} x_{j'} \\]

    Where \\(f_j\\) and \\(f_{j'}\\) are the fields corresponding to \\(j\\) and \\(j'\\) features, respectively.

    ffm_params = {\n    'n_factors': 8,\n    'weight_optimizer': optim.SGD(0.01),\n    'latent_optimizer': optim.SGD(0.025),\n    'intercept': 3,\n    'latent_initializer': optim.initializers.Normal(mu=0., sigma=0.05, seed=73),\n}\n\nregressor = compose.Select('user', 'item')\nregressor += (\n    compose.Select('genres') |\n    compose.FuncTransformer(split_genres)\n)\nregressor += (\n    compose.Select('age') |\n    compose.FuncTransformer(bin_age)\n)\nregressor |= facto.FFMRegressor(**ffm_params)\n\nmodel = preprocessing.PredClipper(\n    regressor=regressor,\n    y_min=1,\n    y_max=5\n)\n\nevaluate(model)\n
    [25,000] MAE: 0.758339, RMSE: 0.959047 \u2013 00:00:06 \u2013 2.16 MB\n[50,000] MAE: 0.749833, RMSE: 0.948531 \u2013 00:00:13 \u2013 2.54 MB\n[75,000] MAE: 0.749631, RMSE: 0.949418 \u2013 00:00:19 \u2013 2.96 MB\n[100,000] MAE: 0.749776, RMSE: 0.950131 \u2013 00:00:26 \u2013 3.35 MB\n

    Note that FFM usually needs to learn smaller number of latent factors \\(k\\) than FM as each latent vector only deals with one field.

    "},{"location":"examples/matrix-factorization-for-recommender-systems/part-2/#field-weighted-factorization-machines-fwfm","title":"Field-weighted Factorization Machines (FwFM)","text":"

    Field-weighted Factorization Machines (FwFM) address FFM memory issues caused by its large number of parameters, which is in the order of feature number times field number. As FFM, FwFM is an extension of FM restricted to pairwise interactions, but instead of factorizing separated latent spaces, it learns a specific weight \\(r_{f_j, f_{j'}}\\) for each field combination modelling the interaction strength.

    The model equation is defined as:

    \\[ \\normalsize \\hat{y}(x) = w_{0} + \\sum_{j=1}^{p} w_{j} x_{j} + \\sum_{j=1}^{p} \\sum_{j'=j+1}^{p} r_{f_j, f_{j'}} \\langle \\mathbf{v}_j, \\mathbf{v}_{j'} \\rangle x_{j} x_{j'} \\]
    fwfm_params = {\n    'n_factors': 10,\n    'weight_optimizer': optim.SGD(0.01),\n    'latent_optimizer': optim.SGD(0.025),\n    'intercept': 3,\n    'seed': 73,\n}\n\nregressor = compose.Select('user', 'item')\nregressor += (\n    compose.Select('genres') |\n    compose.FuncTransformer(split_genres)\n)\nregressor += (\n    compose.Select('age') |\n    compose.FuncTransformer(bin_age)\n)\nregressor |= facto.FwFMRegressor(**fwfm_params)\n\nmodel = preprocessing.PredClipper(\n    regressor=regressor,\n    y_min=1,\n    y_max=5\n)\n\nevaluate(model)\n
    [25,000] MAE: 0.761435, RMSE: 0.962211 \u2013 00:00:08 \u2013 834.1 KB\n[50,000] MAE: 0.754063, RMSE: 0.953248 \u2013 00:00:17 \u2013 964.01 KB\n[75,000] MAE: 0.754729, RMSE: 0.95507 \u2013 00:00:25 \u2013 1.08 MB\n[100,000] MAE: 0.755697, RMSE: 0.956542 \u2013 00:00:34 \u2013 1.21 MB\n
    "},{"location":"examples/matrix-factorization-for-recommender-systems/part-3/","title":"Part 3","text":"

    To do.

    "},{"location":"faq/","title":"Frequently Asked Questions","text":""},{"location":"faq/#do-all-classifiers-support-multi-class-classification","title":"Do all classifiers support multi-class classification?","text":"

    No, they don't. Although binary classification can be seen as a special case of multi-class classification, there are many optimizations that can be performed if we know that there are only two classes. It would be annoying to have to check whether this is the case in an online setting. All in all we find that separating both cases leads to much cleaner code. Note that the multiclass module contains wrapper models that enable you to perform multi-class classification with binary classifiers.

    "},{"location":"faq/#how-do-i-know-if-a-classifier-supports-multi-class-classification","title":"How do I know if a classifier supports multi-class classification?","text":"

    Each classifier in River inherits from the base.Classifier class. Each classifier therefore has a _multiclass property which indicates whether or not it can process a non-boolean target value.

    >>> from river import linear_model\n\n>>> classifier = linear_model.LogisticRegression()\n>>> classifier._multiclass\nFalse\n
    "},{"location":"faq/#why-doesnt-river-do-any-input-validation","title":"Why doesn't river do any input validation?","text":"

    Python encourages a coding style called EAFP, which stands for \"Easier to Ask for Forgiveness than Permission\". The idea is to assume that runtime errors don't occur, and instead use try/expects to catch errors. The great benefit is that we don't have to drown our code with if statements, which is symptomatic of the LBYL style, which stands for \"look before you leap\". This makes our implementations much more readable than, say, scikit-learn, which does a lot of input validation. The catch is that users have to be careful to use sane inputs. As always, there is no free lunch!

    "},{"location":"faq/#what-about-reinforcement-learning","title":"What about reinforcement learning?","text":"

    Reinforcement learning works in an online manner because of the nature of the task. Reinforcement learning can be therefore be seen as a subcase of online machine learning. However, we prefer not to support it because there are already many existing opensource libraries dedicated to it.

    "},{"location":"faq/#what-are-the-differences-between-scikit-learns-online-learning-algorithm-which-have-a-partial_fit-method-and-their-equivalents-in-river","title":"What are the differences between scikit-learn's online learning algorithm which have a partial_fit method and their equivalents in River?","text":"

    The algorithms from sklearn that support incremental learning are mostly meant for mini-batch learning. In a pure streaming context where the observations arrive one by one, then River is much faster than sklearn. This is mostly because sklearn incurs a lot of overhead by performing data checks. Also, sklearn assumes that you're always using the same number of features. This is not the case with River because it use dictionaries which allows you to drop and add features as you wish.

    "},{"location":"faq/#how-do-i-save-and-load-models","title":"How do I save and load models?","text":"
    >>> from river import ensemble\n>>> import pickle\n\n>>> model = ensemble.AdaptiveRandomForestClassifier()\n\n# save\n>>> with open('model.pkl', 'wb') as f:\n...     pickle.dump(model, f)\n\n# load\n>>> with open('model.pkl', 'rb') as f:\n...     model = pickle.load(f)\n

    We also encourage you to try out dill and cloudpickle.

    "},{"location":"faq/#what-about-neural-networks","title":"What about neural networks?","text":"

    There are many great open-source libraries for building neural network models. We don't feel that we can bring anything of value to the existing Python ecosystem. However, we are open to implementing compatibility wrappers for popular libraries such as PyTorch and Keras.

    "},{"location":"faq/#who-are-the-authors-of-this-library","title":"Who are the authors of this library?","text":"

    We are research engineers, graduate students, PhDs and machine learning researchers. The members of the develompent team are mainly located in France, Brazil and New Zealand.

    "},{"location":"introduction/basic-concepts/","title":"Basic concepts","text":"

    Here are some concepts to give you a feel for what problems River addresses.

    "},{"location":"introduction/basic-concepts/#data-streams","title":"Data streams","text":"

    River is a library to build online machine learning models. Such models operate on data streams. But a data stream is a bit of a vague concept.

    In general, a data stream is a sequence of individual elements. In the case of machine learning, each element is a bunch of features. We call these samples, or observations. Each sample might follow a fixed structure and always contain the same features. But features can also appear and disappear over time. That depends on the use case.

    "},{"location":"introduction/basic-concepts/#reactive-and-proactive-data-streams","title":"Reactive and proactive data streams","text":"

    The origin of a data stream can vary, and usually it doesn't matter. You should be able to use River regardless of where your data comes from. It is however important to keep in mind the difference between reactive and proactive data streams.

    Reactive data streams are ones where the data comes to you. For instance, when a user visits your website, that's out of your control. You have no influence on the event. It just happens and you have to react to it.

    Proactive data streams are ones where you have control on the data stream. For example, you might be reading the data from a file. You decide at which speed you want to read the data, in what order, etc.

    If you consider data analysis as a whole, you're realize that the general approach is to turn reactive streams into proactive datasets. Events are usually logged into a database and are processed offline. Be it for building KPIs or training models.

    The challenge for machine learning is to ensure models you train offline on proactive datasets will perform correctly in production on reactive data streams.

    "},{"location":"introduction/basic-concepts/#online-processing","title":"Online processing","text":"

    Online processing is the act of processing a data stream one element at a time. In the case of machine learning, that means training a model by teaching it one sample at a time. This is completely opposite to the traditional way of doing machine learning, which is to train a model on a whole batch data at a time.

    An online model is therefore a stateful, dynamic object. It keeps learning and doesn't have to revisit past data. It's a different way of doing things, and therefore has its own set of pros and cons.

    "},{"location":"introduction/basic-concepts/#tasks","title":"Tasks","text":"

    Machine learning encompasses many different tasks: classification, regression, anomaly detection, time series forecasting, etc. The ideology behind River is to be a generic machine learning which allows to perform these tasks in a streaming manner. Indeed, many batch machine learning algorithms have online equivalents.

    Note that River also supports some more basic tasks. For instance, you might just want to calculate a running average of a data stream. These are usually smaller parts of a whole stream processing pipeline.

    "},{"location":"introduction/basic-concepts/#dictionaries-everywhere","title":"Dictionaries everywhere","text":"

    River is a Python library. It is composed of a bunch of classes which implement various online processing algorithms. Most of these classes are machine learning models which can process a single sample, be it for learning or for inference.

    We made the choice to use dictionaries as the basic building block. First of all, online processing is different to batch processing, in that vectorization doesn't bring any speedup. Therefore numeric processing libraries such as numpy and PyTorch actually bring too much overhead. Using native Python data structures is faster.

    Dictionaries are therefore a perfect fit. They're native to Python and have excellent support in the standard library. They allow naming each feature. They can hold any kind of data type. They allow transparent support of JSON payloads, allowing seemless integration with web apps.

    "},{"location":"introduction/basic-concepts/#datasets","title":"Datasets","text":"

    In production, you're almost always going to face data streams which you have to react to. Such as users visiting your website. The advantage of online machine learning is that you can design models which make predictions as well as learn from this data stream as it flows.

    But of course, when you're developping a model, you don't usually have access to a real-time feed on which to evaluate your model. You usually have an offline dataset which you want to evaluate your model on. River provides some datasets which can be read in online manner, one sample at a time. It is however crucial to keep in mind that the goal is to reproduce a production scenario as closely as possible, in order to ensure your model will perform just as well in production.

    "},{"location":"introduction/basic-concepts/#model-evaluation","title":"Model evaluation","text":"

    Online model evaluation differs from its traditional batch counterpart. In the latter, you usually perform cross-validation, whereby your training dataset is split into a learning and an evaluation dataset. This is fine, but it doesn't exactly reflect the data generation process that occurs in production.

    Online model evaluation involves learning and inference in the same order as what would happen in production. Indeed, if you know the order in which your data arrives, then you can process it the exact same order. This allows you to replay a production scenario and evaluate your model with higher fidelity than cross-validation.

    This is what makes online machine learning powerful. By replaying datasets in the correct order, you ensure you are designing models which will perform as expected in production.

    "},{"location":"introduction/basic-concepts/#concept-drift","title":"Concept drift","text":"

    The main reason why an offline model might not perform as expected in production is because of concept drift. But this is true for all machine learning models, be they offline or online.

    The advantage of online models over offline models is that they can cope with drift. Indeed, because they can keep learning, they usually adapt to concept drift in a seemless manner. As opposed to batch models which have to be retrained from scratch.

    "},{"location":"introduction/installation/","title":"Installation","text":"

    River is meant to work with Python 3.8 and above. Installation can be done via pip:

    pip install river\n

    You can install the latest development version from GitHub, as so:

    pip install git+https://github.com/online-ml/river --upgrade\n

    Or, through SSH:

    pip install git+ssh://git@github.com/online-ml/river.git --upgrade\n

    Feel welcome to open an issue on GitHub if you are having any trouble.

    "},{"location":"introduction/next-steps/","title":"Next steps","text":"

    The Recipes \ud83c\udf71 section is made up of small tutorials. Each one explains how to perform mundane tasks, such as measuring the performance of a model, selecting hyperparameters, etc.

    The Examples \ud83c\udf36\ufe0f section contains more involved notebooks with less explanations. Each notebook addresses a particular machine learning problem.

    The API \ud83d\udcda section references all the modules, classes, and functions in River. It is automatically generated from the codebase's Python docstrings.

    Feel welcome to open a discussion if you have a question. Before that you can check out the FAQ \ud83d\ude4b, which has answers to recurring questions.

    The released versions are listed in the Releases \ud83c\udfd7 section. Changes that will be part of the next release are listed in the unreleased section of the documentation's development version, which you may find here.

    We recommend checking out Awesome Online Machine Learning if you want to go deeper. There you will find online machine learning related content: research papers, alternative and complementary software, blog posts, etc.

    "},{"location":"introduction/related-projects/","title":"Related projects","text":"

    Here is a list of projects which are more or less coupled with River:

    • deep-river interfaces PyTorch models with River.
    • light-river implements fast algorithms in rust.
    • river-extra regroups experimental features which have yet to prove themselves to make it into the main River repository. Between us we call this \"the arena\".
    • Beaver is an MLOps tool for covering the whole lifecycle of online machine learning models.
    "},{"location":"introduction/why-use-river/","title":"Why use River?","text":""},{"location":"introduction/why-use-river/#processing-one-sample-at-a-time","title":"Processing one sample at a time","text":"

    All the tools in the library can be updated with a single observation at a time. They can therefore be used to process streaming data. Depending on your use case, this might be more convenient than using a batch model.

    "},{"location":"introduction/why-use-river/#adapting-to-drift","title":"Adapting to drift","text":"

    In the streaming setting, data can evolve. Adaptive methods are specifically designed to be robust against concept drift in dynamic environments. Many of River's models can cope with concept drift.

    "},{"location":"introduction/why-use-river/#general-purpose","title":"General purpose","text":"

    River supports different machine learning tasks, including regression, classification, and unsupervised learning. It can also be used for adhoc tasks, such as computing online metrics, as well as concept drift detection.

    "},{"location":"introduction/why-use-river/#user-experience","title":"User experience","text":"

    River is not the only library allowing you to do online machine learning. But it might just the simplest one to use in the Python ecosystem. River plays nicely with Python dictionaries, therefore making it easy to use in the context of web applications where JSON payloads are aplenty.

    "},{"location":"introduction/getting-started/binary-classification/","title":"Binary classification","text":"

    Classification is about predicting an outcome from a fixed list of classes. The prediction is a probability distribution that assigns a probability to each possible outcome.

    A labeled classification sample is made up of a bunch of features and a class. The class is a boolean in the case of binary classification. We'll use the phishing dataset as an example.

    from river import datasets\n\ndataset = datasets.Phishing()\ndataset\n
    Phishing websites.\n\nThis dataset contains features from web pages that are classified as phishing or not.\n\n    Name  Phishing                                                          \n    Task  Binary classification                                             \n Samples  1,250                                                             \nFeatures  9                                                                 \n  Sparse  False                                                             \n    Path  /Users/max/projects/online-ml/river/river/datasets/phishing.csv.gz\n

    This dataset is a streaming dataset which can be looped over.

    for x, y in dataset:\n    pass\n

    Let's take a look at the first sample.

    x, y = next(iter(dataset))\nx\n
    {'empty_server_form_handler': 0.0,\n 'popup_window': 0.0,\n 'https': 0.0,\n 'request_from_other_domain': 0.0,\n 'anchor_from_other_domain': 0.0,\n 'is_popular': 0.5,\n 'long_url': 1.0,\n 'age_of_domain': 1,\n 'ip_in_url': 1}\n
    y\n
    True\n

    A binary classifier's goal is to learn to predict a binary target y from some given features x. We'll try to do this with a logistic regression.

    from river import linear_model\n\nmodel = linear_model.LogisticRegression()\nmodel.predict_proba_one(x)\n
    {False: 0.5, True: 0.5}\n

    The model hasn't been trained on any data, and therefore outputs a default probability of 50% for each class.

    The model can be trained on the sample, which will update the model's state.

    model = model.learn_one(x, y)\n

    If we try to make a prediction on the same sample, we can see that the probabilities are different, because the model has learned something.

    model.predict_proba_one(x)\n
    {False: 0.494687699901455, True: 0.505312300098545}\n

    Note that there is also a predict_one if you're only interested in the most likely class rather than the probability distribution.

    model.predict_one(x)\n
    True\n

    Typically, an online model makes a prediction, and then learns once the ground truth reveals itself. The prediction and the ground truth can be compared to measure the model's correctness. If you have a dataset available, you can loop over it, make a prediction, update the model, and compare the model's output with the ground truth. This is called progressive validation.

    from river import metrics\n\nmodel = linear_model.LogisticRegression()\n\nmetric = metrics.ROCAUC()\n\nfor x, y in dataset:\n    y_pred = model.predict_proba_one(x)\n    model.learn_one(x, y)\n    metric.update(y, y_pred)\n\nmetric\n
    ROCAUC: 89.36%\n

    This is a common way to evaluate an online model. In fact, there is a dedicated evaluate.progressive_val_score function that does this for you.

    from river import evaluate\n\nmodel = linear_model.LogisticRegression()\nmetric = metrics.ROCAUC()\n\nevaluate.progressive_val_score(dataset, model, metric)\n
    ROCAUC: 89.36%\n

    A common way to improve the performance of a logistic regression is to scale the data. This can be done by using a preprocessing.StandardScaler. In particular, we can define a pipeline to organise our model into a sequence of steps:

    from river import compose\nfrom river import preprocessing\n\nmodel = compose.Pipeline(\n    preprocessing.StandardScaler(),\n    linear_model.LogisticRegression()\n)\n\nmodel\n
    StandardScaler
    StandardScaler ( with_std=True )
    LogisticRegression
    LogisticRegression ( optimizer=SGD ( lr=Constant ( learning_rate=0.01 ) ) loss=Log ( weight_pos=1. weight_neg=1. ) l2=0. l1=0. intercept_init=0. intercept_lr=Constant ( learning_rate=0.01 ) clip_gradient=1e+12 initializer=Zeros () )
    metric = metrics.ROCAUC()\nevaluate.progressive_val_score(dataset, model, metric)\n
    ROCAUC: 95.07%\n
    "},{"location":"introduction/getting-started/concept-drift-detection/","title":"Concept drift","text":"

    In online machine learning, it is assumed that data can change over time. When building machine learning models, we assume data has a probability distribution, which is usually fixed, i.e., stationary. Changes in the data distribution give rise to the phenomenon called Concept drift. Such drifts can be either virtual or real. In virtual drifts, only the distribution of the features, \\(P(X)\\), changes, whereas the relationship between \\(X\\) (features) and the target, \\(y\\), remains unchanged. The joint probability of \\(P(X, y)\\) changes in real concept drifts. Consequently, non-supervised online machine learning problems might face only virtual concept drifts.

    Real concept drits can be further divided in abrupt (happen instantly at a given point) or gradual (one \"concept\" changes to another gradually). There are other possible divisions, but they can be fit into abrupt or gradual drifts.

    "},{"location":"introduction/getting-started/concept-drift-detection/#examples-of-concept-drift","title":"Examples of concept drift","text":"

    Concept drifts might happen in the electricity demand across the year, in the stock market, in buying preferences, and in the likelihood of a new movie's success, among others.

    Let us consider the movie example: two movies made at different epochs can have similar features such as famous actors/directors, storyline, production budget, marketing campaigns, etc., yet it is not certain that both will be similarly successful. What the target audience considers is worth watching (and their money) is constantly changing, and production companies must adapt accordingly to avoid \"box office flops\".

    Prior to the pandemics, the usage of hand sanitizers and facial masks was not widespread. When the cases of COVID-19 started increasing, there was a lack of such products for the final consumer. Imagine a batch-learning model deciding how much of each product a supermarket should stock during those times. What a mess!

    "},{"location":"introduction/getting-started/concept-drift-detection/#impact-of-drift-on-learning","title":"Impact of drift on learning","text":"

    Concept drift can have a significant impact on predictive performance if not handled properly. Most batch learning models will fail in the presence of concept drift as they are essentially trained on different data. On the other hand, stream learning methods continuously update themselves and adapt to new concepts. Furthermore, drift-aware methods use change detection methods (a.k.a. drift detectors) to trigger mitigation mechanisms if a change in performance is detected.

    "},{"location":"introduction/getting-started/concept-drift-detection/#detecting-concept-drift","title":"Detecting concept drift","text":"

    Multiple drift detection methods have been proposed. The goal of a drift detector is to signal an alarm in the presence of drift. A good drift detector maximizes the number of true positives while keeping the number of false positives to a minimum. It must also be resource-wise efficient to work in the context of infinite data streams.

    For this example, we will generate a synthetic data stream by concatenating 3 distributions of 1000 samples each:

    • \\(dist_a\\): \\(\\mu=0.8\\), \\(\\sigma=0.05\\)
    • \\(dist_b\\): \\(\\mu=0.4\\), \\(\\sigma=0.02\\)
    • \\(dist_c\\): \\(\\mu=0.6\\), \\(\\sigma=0.1\\).
    import numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib import gridspec\n\n# Generate data for 3 distributions\nrandom_state = np.random.RandomState(seed=42)\ndist_a = random_state.normal(0.8, 0.05, 1000)\ndist_b = random_state.normal(0.4, 0.02, 1000)\ndist_c = random_state.normal(0.6, 0.1, 1000)\n\n# Concatenate data to simulate a data stream with 2 drifts\nstream = np.concatenate((dist_a, dist_b, dist_c))\n\n# Auxiliary function to plot the data\ndef plot_data(dist_a, dist_b, dist_c, drifts=None):\n    fig = plt.figure(figsize=(7,3), tight_layout=True)\n    gs = gridspec.GridSpec(1, 2, width_ratios=[3, 1])\n    ax1, ax2 = plt.subplot(gs[0]), plt.subplot(gs[1])\n    ax1.grid()\n    ax1.plot(stream, label='Stream')\n    ax2.grid(axis='y')\n    ax2.hist(dist_a, label=r'$dist_a$')\n    ax2.hist(dist_b, label=r'$dist_b$')\n    ax2.hist(dist_c, label=r'$dist_c$')\n    if drifts is not None:\n        for drift_detected in drifts:\n            ax1.axvline(drift_detected, color='red')\n    plt.show()\n\nplot_data(dist_a, dist_b, dist_c)\n

    "},{"location":"introduction/getting-started/concept-drift-detection/#drift-detection-test","title":"Drift detection test","text":"

    We will use the ADaptive WINdowing (ADWIN) drift detection method. Remember that the goal is to indicate that drift has occurred after samples 1000 and 2000 in the synthetic data stream.

    from river import drift\n\ndrift_detector = drift.ADWIN()\ndrifts = []\n\nfor i, val in enumerate(stream):\n    drift_detector.update(val)   # Data is processed one sample at a time\n    if drift_detector.drift_detected:\n        # The drift detector indicates after each sample if there is a drift in the data\n        print(f'Change detected at index {i}')\n        drifts.append(i)\n\nplot_data(dist_a, dist_b, dist_c, drifts)\n
    Change detected at index 1055\nChange detected at index 2079\n

    We see that ADWIN successfully indicates the presence of drift (red vertical lines) close to the begining of a new data distribution.

    We conclude this example with some remarks regarding concept drift detectors and their usage:

    • In practice, drift detectors provide stream learning methods with robustness against concept drift. Drift detectors monitor the model usually through a performance metric.
    • Drift detectors work on univariate data. This is why they are used to monitor a model's performance and not the data itself. Remember that concept drift is defined as a change in the relationship between data and the target to learn (in supervised learning).
    • Drift detectors define their expectations regarding input data. It is important to know these expectations to feed a given drift detector with the correct data.
    "},{"location":"introduction/getting-started/multiclass-classification/","title":"Multi-class classification","text":"

    Classification is about predicting an outcome from a fixed list of classes. The prediction is a probability distribution that assigns a probability to each possible outcome.

    A labeled classification sample is made up of a bunch of features and a class. The class is a usually a string or a number in the case of multiclass classification. We'll use the image segments dataset as an example.

    from river import datasets\n\ndataset = datasets.ImageSegments()\ndataset\n
    Image segments classification.\n\nThis dataset contains features that describe image segments into 7 classes: brickface, sky,\nfoliage, cement, window, path, and grass.\n\n    Name  ImageSegments                                                     \n    Task  Multi-class classification                                        \n Samples  2,310                                                             \nFeatures  18                                                                \n Classes  7                                                                 \n  Sparse  False                                                             \n    Path  /Users/max/projects/online-ml/river/river/datasets/segment.csv.zip\n

    This dataset is a streaming dataset which can be looped over.

    for x, y in dataset:\n    pass\n

    Let's take a look at the first sample.

    x, y = next(iter(dataset))\nx\n
    {'region-centroid-col': 218,\n 'region-centroid-row': 178,\n 'short-line-density-5': 0.11111111,\n 'short-line-density-2': 0.0,\n 'vedge-mean': 0.8333326999999999,\n 'vegde-sd': 0.54772234,\n 'hedge-mean': 1.1111094,\n 'hedge-sd': 0.5443307,\n 'intensity-mean': 59.629630000000006,\n 'rawred-mean': 52.44444300000001,\n 'rawblue-mean': 75.22222,\n 'rawgreen-mean': 51.22222,\n 'exred-mean': -21.555555,\n 'exblue-mean': 46.77778,\n 'exgreen-mean': -25.222220999999998,\n 'value-mean': 75.22222,\n 'saturation-mean': 0.31899637,\n 'hue-mean': -2.0405545}\n
    y\n
    'path'\n

    A multiclass classifier's goal is to learn how to predict a class y from a bunch of features x. We'll attempt to do this with a decision tree.

    from river import tree\n\nmodel = tree.HoeffdingTreeClassifier()\nmodel.predict_proba_one(x)\n
    {}\n

    The reason why the output dictionary is empty is because the model hasn't seen any data yet. It isn't aware of the dataset whatsoever. If this were a binary classifier, then it would output a probability of 50% for True and False because the classes are implicit. But in this case we're doing multiclass classification.

    Likewise, the predict_one method initially returns None because the model hasn't seen any labeled data yet.

    print(model.predict_one(x))\n
    None\n

    If we update the model and try again, then we see that a probability of 100% is assigned to the 'path' class because that's the only one the model is aware of.

    model.learn_one(x, y)\nmodel.predict_proba_one(x)\n
    {'path': 1.0}\n

    This is a strength of online classifiers: they're able to deal with new classes appearing in the data stream.

    Typically, an online model makes a prediction, and then learns once the ground truth reveals itself. The prediction and the ground truth can be compared to measure the model's correctness. If you have a dataset available, you can loop over it, make a prediction, update the model, and compare the model's output with the ground truth. This is called progressive validation.

    from river import metrics\n\nmodel = tree.HoeffdingTreeClassifier()\n\nmetric = metrics.ClassificationReport()\n\nfor x, y in dataset:\n    y_pred = model.predict_one(x)\n    model.learn_one(x, y)\n    if y_pred is not None:\n        metric.update(y, y_pred)\n\nmetric\n
                   Precision      Recall         F1             Support\n\n   brickface         77.13%         84.85%         80.81%            330  \n      cement         78.92%         83.94%         81.35%            330  \n     foliage         65.69%         20.30%         31.02%            330  \n       grass        100.00%         96.97%         98.46%            330  \n        path         90.63%         91.19%         90.91%            329  \n         sky         99.08%         98.18%         98.63%            330  \n      window         43.50%         67.88%         53.02%            330\n\n       Macro         79.28%         77.62%         76.31%                 \n       Micro         77.61%         77.61%         77.61%                 \n    Weighted         79.27%         77.61%         76.31%\n\n                             77.61% accuracy\n

    This is a common way to evaluate an online model. In fact, there is a dedicated evaluate.progressive_val_score function that does this for you.

    from river import evaluate\n\nmodel = tree.HoeffdingTreeClassifier()\nmetric = metrics.ClassificationReport()\n\nevaluate.progressive_val_score(dataset, model, metric)\n
                   Precision      Recall         F1             Support\n\n   brickface         77.13%         84.85%         80.81%            330  \n      cement         78.92%         83.94%         81.35%            330  \n     foliage         65.69%         20.30%         31.02%            330  \n       grass        100.00%         96.97%         98.46%            330  \n        path         90.63%         91.19%         90.91%            329  \n         sky         99.08%         98.18%         98.63%            330  \n      window         43.50%         67.88%         53.02%            330\n\n       Macro         79.28%         77.62%         76.31%                 \n       Micro         77.61%         77.61%         77.61%                 \n    Weighted         79.27%         77.61%         76.31%\n\n                             77.61% accuracy\n
    "},{"location":"introduction/getting-started/regression/","title":"Regression","text":"

    Regression is about predicting a numeric output for a given sample. A labeled regression sample is made up of a bunch of features and a number. The number is usually continuous, but it may also be discrete. We'll use the Trump approval rating dataset as an example.

    from river import datasets\n\ndataset = datasets.TrumpApproval()\ndataset\n
    Donald Trump approval ratings.\n\nThis dataset was obtained by reshaping the data used by FiveThirtyEight for analyzing Donald\nTrump's approval ratings. It contains 5 features, which are approval ratings collected by\n5 polling agencies. The target is the approval rating from FiveThirtyEight's model. The goal of\nthis task is to see if we can reproduce FiveThirtyEight's model.\n\n    Name  TrumpApproval                                                           \n    Task  Regression                                                              \n Samples  1,001                                                                   \nFeatures  6                                                                       \n  Sparse  False                                                                   \n    Path  /Users/max/projects/online-ml/river/river/datasets/trump_approval.csv.gz\n

    This dataset is a streaming dataset which can be looped over.

    for x, y in dataset:\n    pass\n

    Let's take a look at the first sample.

    x, y = next(iter(dataset))\nx\n
    {'ordinal_date': 736389,\n 'gallup': 43.843213,\n 'ipsos': 46.19925042857143,\n 'morning_consult': 48.318749,\n 'rasmussen': 44.104692,\n 'you_gov': 43.636914000000004}\n

    A regression model's goal is to learn to predict a numeric target y from a bunch of features x. We'll attempt to do this with a nearest neighbors model.

    from river import neighbors\n\nmodel = neighbors.KNNRegressor()\nmodel.predict_one(x)\n
    0.0\n

    The model hasn't been trained on any data, and therefore outputs a default value of 0.

    The model can be trained on the sample, which will update the model's state.

    model = model.learn_one(x, y)\n

    If we try to make a prediction on the same sample, we can see that the output is different, because the model has learned something.

    model.predict_one(x)\n
    43.75505\n

    Typically, an online model makes a prediction, and then learns once the ground truth reveals itself. The prediction and the ground truth can be compared to measure the model's correctness. If you have a dataset available, you can loop over it, make a prediction, update the model, and compare the model's output with the ground truth. This is called progressive validation.

    from river import metrics\n\nmodel = neighbors.KNNRegressor()\n\nmetric = metrics.MAE()\n\nfor x, y in dataset:\n    y_pred = model.predict_one(x)\n    model.learn_one(x, y)\n    metric.update(y, y_pred)\n\nmetric\n
    MAE: 0.310353\n

    This is a common way to evaluate an online model. In fact, there is a dedicated evaluate.progressive_val_score function that does this for you.

    from river import evaluate\n\nmodel = neighbors.KNNRegressor()\nmetric = metrics.MAE()\n\nevaluate.progressive_val_score(dataset, model, metric)\n
    MAE: 0.310353\n
    "},{"location":"recipes/active-learning/","title":"Active learning","text":"

    Active learning is a training regime, where the goal is to fit a model on the most discriminative samples. It is usually applied in situations where a limited amount of labeled data is available. In such a case, a human might be asked to annotate a sample. Doing this is expensive, so it's important to ask for labels for the most samples that will have the most impact.

    Online active learning is active learning done in a streaming fashion. Every time a prediction is made, an active learning strategy decides whether a label should be asked for or not. In case the strategy decides a yes, then the system could ask for a human to intervene. This is well summarized in the following schema from Online Active Learning Methods for Fast Label-Efficient Spam Filtering.

    "},{"location":"recipes/active-learning/#online-active-learning","title":"Online active learning","text":"

    River's online active learning strategies are located in the active module. The latter contains wrapper models. These wrappers enrich the predict_one and predict_proba_one methods to include a boolean in the output.

    The returned boolean indicates whether or not a label should be asked for. In a production system, we could feed this to a web interface, and get the human to annotate the sample. Offline, we can simply use the label in the dataset.

    We'll implement this basic flow. We'll apply a TFIDF followed by logistic regression to a datasets of spam/ham received by SMS.

    from river import active\nfrom river import datasets\nfrom river import feature_extraction\nfrom river import linear_model\nfrom river import metrics\n\ndataset = datasets.SMSSpam()\nmetric = metrics.Accuracy()\nmodel = (\n    feature_extraction.TFIDF(on='body') |\n    linear_model.LogisticRegression()\n)\nmodel = active.EntropySampler(model, seed=42)\n\nn_samples_used = 0\nfor x, y in dataset:\n    y_pred, ask = model.predict_one(x)\n    metric.update(y, y_pred)\n    if ask:\n        n_samples_used += 1\n        model.learn_one(x, y)\n\nmetric\n
    Accuracy: 86.60%\n

    The performance is reasonable, even though all the dataset wasn't used for training. We can check how many samples were actually used.

    print(f\"{n_samples_used} / {dataset.n_samples} = {n_samples_used / dataset.n_samples:.2%}\")\n
    1921 / 5574 = 34.46%\n

    Note that the above logic can be succinctly reproduced with the progressive_val_score function from the evaluate module. It recognises when an active learning model is provided, and will automatically display the number of samples used.

    from river import evaluate\n\nevaluate.progressive_val_score(\n    dataset=dataset,\n    model=model.clone(),\n    metric=metric.clone(),\n    print_every=1000\n)\n
    [1,000] Accuracy: 86.32% \u2013 661 samples used\n[2,000] Accuracy: 86.44% \u2013 1,057 samples used\n[3,000] Accuracy: 86.52% \u2013 1,339 samples used\n[4,000] Accuracy: 86.62% \u2013 1,568 samples used\n[5,000] Accuracy: 86.57% \u2013 1,790 samples used\n[5,574] Accuracy: 86.60% \u2013 1,921 samples used\n\n\n\n\n\nAccuracy: 86.60%\n
    "},{"location":"recipes/active-learning/#reduce-training-time","title":"Reduce training time","text":"

    Active learning is primarly used to label data in an efficient manner. However, in an online setting, active learning can also be used simply to speed up training. The point is that you can achieve a very good performance without training on an entire dataset. Active learning is a powerful way to decide which samples to train on.

    "},{"location":"recipes/active-learning/#_1","title":"Active learning","text":""},{"location":"recipes/active-learning/#production-considerations","title":"Production considerations","text":"

    In production, you might want to deploy a system where humans may annotate samples queried by an active learning strategy. You have several options at your disposal, all of which go beyond the scope of River.

    The general idea is to have some kind of queue in which queried samples are fed into. Then you would have a user interface which displays the elements in the queue one-by-one. Each time a sample is labeled, the label would be used to update the model. You might have one or more threads/processes doing inference. You'll want to update the model in each one each time the model learns.

    "},{"location":"recipes/bandits-101/","title":"Multi-armed bandits","text":"

    River has a bandit module. It contains several multi-armed bandit policies, bandit environments, and utilities to benchmark policies on bandit problems.

    Bandit environments in River implement the Gym interface. You can thus load them with gym.make. Note that Gym is intended for reinforcement learning algorithms, while bandit policies are the simplest form of reinforcement learing. Bandit policies learn by receiving a reward after each step, while reinforcement learning algorithms have to learn from feedback that may arrive at the end of a (long) sequence of steps.

    import gym\n\nfor k in gym.envs.registry:\n    if k.startswith('river_bandits'):\n        print(k)\n

    River's bandit module offers the bandit.evaluate function to benchmark several policies on a given environment. It takes as input a list of bandit policies, a bandit environment (the problem to solve), and a reward object.

    import gym\nfrom river import bandit\nimport pandas as pd\nfrom tqdm import tqdm\nfrom river import stats\n\npolicies=[\n    bandit.EpsilonGreedy(epsilon=0.1),\n    bandit.EpsilonGreedy(epsilon=0.01),\n    bandit.EpsilonGreedy(epsilon=0),\n]\n\nenv = gym.make(\n    'river_bandits/KArmedTestbed-v0',\n    max_episode_steps=1000\n)\n\ntrace = bandit.evaluate(\n    policies=policies,\n    env=env,\n    reward_stat=stats.Mean(),\n    n_episodes=(n_episodes := 2000),\n)\n

    The bandit.evaluate function returns a generator containing the results at each step of the benchmark. This can be wrapped with a pandas.DataFrame to gather all the results.

    trace_df = pd.DataFrame(tqdm(\n    trace, position=0, total=(\n        n_episodes *\n        len(policies) *\n        env._max_episode_steps\n    )\n))\ntrace_df.sample(5, random_state=42)\n
      0%|                                               | 0/6000000 [00:00<?, ?it/s]/Users/max/.pyenv/versions/3.10/envs/river310/lib/python3.10/site-packages/gym/utils/passive_env_checker.py:233: DeprecationWarning: `np.bool8` is a deprecated alias for `np.bool_`.  (Deprecated NumPy 1.24)\n  if not isinstance(terminated, (bool, np.bool8)):\n100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 6000000/6000000 [00:35<00:00, 169408.20it/s]\n
    episode step policy_idx arm reward reward_stat 1324896 441 632 0 4 -0.036801 0.457068 3566176 1188 725 1 5 1.837321 2.220956 1109043 369 681 0 6 0.616991 1.324610 4286042 1428 680 2 3 0.236458 0.570999 5395174 1798 391 1 1 -0.851223 0.446835

    It is then straightforward to plot the average reward each policy obtains at each step, by averaging over episodes.

    policy_names = {\n    0: '\u03b5 = 0.1',\n    1: '\u03b5 = 0.01',\n    2: '\u03b5 = 0 (greedy)'\n}\n\ncolors = {\n    '\u03b5 = 0.1': 'tab:blue',\n    '\u03b5 = 0.01': 'tab:red',\n    '\u03b5 = 0 (greedy)': 'tab:green'\n}\n\n(\n    trace_df\n    .assign(policy=trace_df.policy_idx.map(policy_names))\n    .groupby(['step', 'policy'])\n    ['reward'].mean()\n    .unstack()\n    .plot(color=colors)\n)\n
    <Axes: xlabel='step'>\n

    "},{"location":"recipes/bandits-101/#controlling-the-evaluation-loop","title":"Controlling the evaluation loop","text":"

    The bandit.evaluate function is useful for benchmarking. But in practice, you'll want to have control over your bandit policy. Indeed you'll want the freedom to pull arms (with the pull method) and update the policy (with the update method) at your discretion.

    As an example, the following is a possible reimplementation of the bandit.evaluate function. Here we'll be measuring the rate at which each policy selects the optimal arm.

    Note how the pull and update methods are used.

    import copy\n\npolicies=[\n    bandit.EpsilonGreedy(epsilon=0.1),\n    bandit.EpsilonGreedy(epsilon=0.01),\n    bandit.EpsilonGreedy(epsilon=0),\n]\n\nenv = gym.make(\n    'river_bandits/KArmedTestbed-v0',\n    max_episode_steps=1000\n)\nn_episodes = 2000\n\ntrace = []\n\nwith tqdm(total=len(policies) * n_episodes * env._max_episode_steps, position=0) as progress:\n    for policy in policies:\n        for episode in range(n_episodes):\n            episode_policy = policy.clone()\n            episode_env = copy.deepcopy(env)\n            episode_env.reset()\n            step = 0\n            while True:\n                action = episode_policy.pull(range(episode_env.action_space.n))\n                observation, reward, terminated, truncated, info = episode_env.step(action)\n                best_action = observation\n                episode_policy.update(action, reward)\n\n                trace.append({\n                    \"episode\": episode,\n                    \"step\": step,\n                    \"policy\": f\"\u03b5 = {policy.epsilon}\",\n                    \"is_action_optimal\": action == best_action\n                })\n                step += 1\n                progress.update()\n\n                if terminated or truncated:\n                    break\n\ntrace_df = pd.DataFrame(trace)\n
      0%|                                               | 0/6000000 [00:00<?, ?it/s]/Users/max/.pyenv/versions/3.10/envs/river310/lib/python3.10/site-packages/gym/utils/passive_env_checker.py:233: DeprecationWarning: `np.bool8` is a deprecated alias for `np.bool_`.  (Deprecated NumPy 1.24)\n  if not isinstance(terminated, (bool, np.bool8)):\n100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 6000000/6000000 [00:34<00:00, 172261.07it/s]\n
    colors = {\n    '\u03b5 = 0.1': 'tab:blue',\n    '\u03b5 = 0.01': 'tab:red',\n    '\u03b5 = 0': 'tab:green'\n}\n\n(\n    trace_df\n    .groupby(['step', 'policy'])\n    ['is_action_optimal'].mean()\n    .unstack()\n    .plot(color=colors)\n)\n
    <Axes: xlabel='step'>\n

    "},{"location":"recipes/bandits-101/#handling-drift","title":"Handling drift","text":"

    The environment used above is a toy situation used for introducing bandits. It is stationary, meaning that the expected reward of each arm does not change over time.

    In practice, arms are dynamic, and their performance can vary over time. A simple example of this is the Candy Cane Contest that was hosted on Kaggle in 2020. The expected reward of each arm diminishes each time it is pulled.

    The way bandit policies in River deal with drift depends on the method. For the bandit.EpsilonGreedy policy, it makes sense to use a rolling average as the reward object. What this means is that the empirical reward the policy calculates for each arm is a rolling average, rather than a global one.

    from river import proba, utils\n\npolicies=[\n    bandit.EpsilonGreedy(\n        epsilon=0.1,\n        seed=42\n    ),\n    bandit.EpsilonGreedy(\n        epsilon=0.3,\n        reward_obj=utils.Rolling(stats.Mean(), window_size=50),\n        seed=42\n    ),\n    bandit.ThompsonSampling(\n        reward_obj=proba.Beta(),\n        seed=42\n    )\n]\n\nenv = gym.make('river_bandits/CandyCaneContest-v0')\n\ntrace = bandit.evaluate(\n    policies=policies,\n    env=env,\n    n_episodes=(n_episodes := 30),\n    seed=42\n)\n\ntrace_df = pd.DataFrame(tqdm(\n    trace, position=0, total=(\n        n_episodes *\n        len(policies) *\n        env._max_episode_steps\n    )\n))\n
    100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 180000/180000 [00:14<00:00, 12305.42it/s]\n

    We can compare the performance of each policy by checking the average reward at the end of each episode.

    (\n    trace_df\n    .groupby(['policy_idx', 'episode'])\n    .last()\n    .groupby('policy_idx')\n    .reward_stat.mean()\n)\n
    policy_idx\n0    736.1\n1    817.0\n2    854.0\nName: reward_stat, dtype: float64\n

    We see that using a rolling average gives a boost to the epsilon greedy strategy. However, we see that the bandit.ThompsonSampling policy performs even better, even though no particular care was given to drift. A natural next step would thus be to see how it could be improved to handle drift. For instance, its dist parameter could be wrapped with a utils.Rolling:

    policy = bandit.ThompsonSampling(\n    reward_obj=utils.Rolling(proba.Beta(), window_size=50),\n    seed=42\n)\n

    Bandits can be used for several tasks. They can be used for content personalization, as well as online model selection (see model_selection.BanditRegressor). The policies in River are therefore designed to be flexible, so that they can be used in conjunction with other River modules. For instance, the reward_obj in bandit.EpsilonGreedy can be a metric, a probability distribution, or a statistic. This works because objects in River adher to a coherent get/update interface.

    "},{"location":"recipes/cloning-and-mutating/","title":"Cloning and mutating","text":"

    Sometimes you might want to reset a model, or edit (what we call mutate) its attributes. This can be useful in an online environment. Indeed, if you detect a drift, then you might want to mutate a model's attributes. Or if you see that a model's performance is plummeting, then you might to reset it to its \"factory settings\".

    Anyway, this is not to convince you, but rather to say that a model's attributes don't have be to set in stone throughout its lifetime. In particular, if you're developping your own model, then you might want to have good tools to do this. This is what this recipe is about.

    "},{"location":"recipes/cloning-and-mutating/#cloning","title":"Cloning","text":"

    The first thing you can do is clone a model. This creates a deep copy of the model. The resulting model is entirely independent of the original model. The clone is fresh, in the sense that it is as if it hasn't seen any data.

    For instance, say you have a linear regression model which you have trained on some data.

    from river import datasets, linear_model, optim, preprocessing\n\nmodel = (\n    preprocessing.StandardScaler() |\n    linear_model.LinearRegression(\n        optimizer=optim.SGD(3e-2)\n    )\n)\n\nfor x, y in datasets.TrumpApproval():\n    model.predict_one(x)\n    model.learn_one(x, y)\n\nmodel[-1].weights\n
    {'ordinal_date': 20.59955380229643,\n 'gallup': 0.39114944304212645,\n 'ipsos': 0.4101918314868111,\n 'morning_consult': 0.12042970179504908,\n 'rasmussen': 0.18951231512561392,\n 'you_gov': 0.04991712783831687}\n

    For whatever reason, we may want to clone this model. This can be done with the clone method.

    clone = model.clone()\nclone[-1].weights\n
    {}\n

    As we can see, there are no weights because the clone is fresh copy that has not seen any data. However, the learning rate we specified is preserved.

    clone[-1].optimizer.learning_rate\n
    0.03\n

    You may also specify parameters you want changed. For instance, let's say we want to clone the model, but we want to change the optimizer:

    clone = model.clone({\"LinearRegression\": {\"optimizer\": optim.Adam()}})\nclone[-1].optimizer\n
    Adam({'lr': Constant({'learning_rate': 0.1}), 'n_iterations': 0, 'beta_1': 0.9, 'beta_2': 0.999, 'eps': 1e-08, 'm': None, 'v': None})\n

    The first key indicates that we want to specify a different parameter for the LinearRegression part of the pipeline. Then the second key accesses the linear regression's optimizer parameter.

    Finally, note that the clone method isn't reserved to models. Indeed, every object in River has it. That's because they all inherit from the Base class in the base module.

    "},{"location":"recipes/cloning-and-mutating/#mutating-attributes","title":"Mutating attributes","text":"

    Cloning a model can be useful, but the fact that it essentially resets the model may not be desired. Instead, you might want to change a attribute while preserving the model's state. For example, let's change the l2 attribute, and the optimizer's lr attribute.

    model.mutate({\n    \"LinearRegression\": {\n        \"l2\": 0.1,\n        \"optimizer\": {\n            \"lr\": optim.schedulers.Constant(25e-3)\n        }\n    }\n})\n\nprint(repr(model))\n
    Pipeline (\n  StandardScaler (\n    with_std=True\n  ),\n  LinearRegression (\n    optimizer=SGD (\n      lr=Constant (\n        learning_rate=0.025\n      )\n    )\n    loss=Squared ()\n    l2=0.1\n    l1=0.\n    intercept_init=0.\n    intercept_lr=Constant (\n      learning_rate=0.01\n    )\n    clip_gradient=1e+12\n    initializer=Zeros ()\n  )\n)\n

    We can see the attributes we specified have changed. However, the model's state is preserved:

    model[-1].weights\n
    {'ordinal_date': 20.59955380229643,\n 'gallup': 0.39114944304212645,\n 'ipsos': 0.4101918314868111,\n 'morning_consult': 0.12042970179504908,\n 'rasmussen': 0.18951231512561392,\n 'you_gov': 0.04991712783831687}\n

    In other words, the mutate method does not create a deep copy of the model. It just sets attributes. At this point you may ask:

    Why can't I just change the attribute directly, without calling mutate?

    The answer is that you're free to do proceed as such, but it's not the way we recommend. The mutate method is safer, in that it prevents you from mutating attributes you shouldn't be touching. We call these immutable attributes. For instance, there's no reason you should be modifying the weights.

    try:\n    model.mutate({\n        \"LinearRegression\": {\n            \"weights\": \"this makes no sense\"\n        }\n    })\nexcept ValueError as e:\n    print(e)\n
    'weights' is not a mutable attribute of LinearRegression\n

    All attributes are immutable by default. Under the hood, each model can specify a set of mutable attributes via the _mutable_attributes property. In theory this can be overriden. But the general idea is that we will progressively add more and more mutable attributes with time.

    And that concludes this recipe. Arguably, this recipe caters to advanced users, and in particular users who are developping their own models. And yet, one could also argue that modifying parameters of a model on-the-fly is a great tool to have at your disposal when you're doing online machine learning.

    "},{"location":"recipes/feature-extraction/","title":"Feature extraction","text":"

    To do.

    "},{"location":"recipes/hyperparameter-tuning/","title":"Hyperparameter tuning","text":"

    To do.

    "},{"location":"recipes/mini-batching/","title":"Mini-batching","text":"

    In its purest form, online machine learning encompasses models which learn with one sample at a time. This is the design which is used in River.

    The main downside of single-instance processing is that it doesn't scale to big data, at least not in the sense of traditional batch learning. Indeed, processing one sample at a time means that we are unable to fully take advantage of vectorisation and other computational tools that are taken for granted in batch learning. On top of this, processing a large dataset in River essentially involves a Python for loop, which might be too slow for some usecases. However, this doesn't mean that River is slow. In fact, for processing a single instance, River is actually a couple of orders of magnitude faster than libraries such as scikit-learn, PyTorch, and Tensorflow. The reason why is because River is designed from the ground up to process a single instance, whereas the majority of other libraries choose to care about batches of data. Both approaches offer different compromises, and the best choice depends on your usecase.

    In order to propose the best of both worlds, River offers some limited support for mini-batch learning. Some of River's estimators implement *_many methods on top of their *_one counterparts. For instance, preprocessing.StandardScaler has a learn_many method as well as a transform_many method, in addition to learn_one and transform_one. Each mini-batch method takes as input a pandas.DataFrame. Supervised estimators also take as input a pandas.Series of target values. We choose to use pandas.DataFrames over numpy.ndarrays because of the simple fact that the former allows us to name each feature. This in turn allows us to offer a uniform interface for both single instance and mini-batch learning.

    As an example, we will build a simple pipeline that scales the data and trains a logistic regression. Indeed, the compose.Pipeline class can be applied to mini-batches, as long as each step is able to do so.

    from river import compose\nfrom river import linear_model\nfrom river import preprocessing\n\nmodel = compose.Pipeline(\n    preprocessing.StandardScaler(),\n    linear_model.LogisticRegression()\n)\n

    For this example, we will use datasets.Higgs.

    from river import datasets\n\ndataset = datasets.Higgs()\nif not dataset.is_downloaded:\n    dataset.download()\ndataset\n
    Higgs dataset.\n\nThe data has been produced using Monte Carlo simulations. The first 21 features (columns 2-22)\nare kinematic properties measured by the particle detectors in the accelerator. The last seven\nfeatures are functions of the first 21 features; these are high-level features derived by\nphysicists to help discriminate between the two classes.\n\n      Name  Higgs                                                                       \n      Task  Binary classification                                                       \n   Samples  11,000,000                                                                  \n  Features  28                                                                          \n    Sparse  False                                                                       \n      Path  /Users/max/river_data/Higgs/HIGGS.csv.gz                                    \n       URL  https://archive.ics.uci.edu/ml/machine-learning-databases/00280/HIGGS.csv.gz\n      Size  2.62 GB                                                                     \nDownloaded  True\n

    The easiest way to read the data in a mini-batch fashion is to use the read_csv from pandas.

    import pandas as pd\n\nnames = [\n    'target', 'lepton pT', 'lepton eta', 'lepton phi',\n    'missing energy magnitude', 'missing energy phi',\n    'jet 1 pt', 'jet 1 eta', 'jet 1 phi', 'jet 1 b-tag',\n    'jet 2 pt', 'jet 2 eta', 'jet 2 phi', 'jet 2 b-tag',\n    'jet 3 pt', 'jet 3 eta', 'jet 3 phi', 'jet 3 b-tag',\n    'jet 4 pt', 'jet 4 eta', 'jet 4 phi', 'jet 4 b-tag',\n    'm_jj', 'm_jjj', 'm_lv', 'm_jlv', 'm_bb', 'm_wbb', 'm_wwbb'\n]\n\nfor x in pd.read_csv(dataset.path, names=names, chunksize=8096, nrows=3e5):\n    y = x.pop('target')\n    y_pred = model.predict_proba_many(x)\n    model.learn_many(x, y)\n

    If you are familiar with scikit-learn, you might be aware that some of their estimators have a partial_fit method, which is similar to river's learn_many method. Here are some advantages that river has over scikit-learn:

    • We guarantee that river's is just as fast, if not faster than scikit-learn. The differences are negligeable, but are slightly in favor of river.
    • We take as input dataframes, which allows us to name each feature. The benefit is that you can add/remove/permute features between batches and everything will keep working.
    • Estimators that support mini-batches also support single instance learning. This means that you can enjoy the best of both worlds. For instance, you can train with mini-batches and use predict_one to make predictions.

    Note that you can check which estimators can process mini-batches programmatically:

    import importlib\nimport inspect\n\ndef can_mini_batch(obj):\n    return hasattr(obj, 'learn_many')\n\nfor module in importlib.import_module('river.api').__all__:\n    if module in ['datasets', 'synth']:\n        continue\n    for name, obj in inspect.getmembers(importlib.import_module(f'river.{module}'), can_mini_batch):\n        print(name)\n
    OneClassSVM\nMiniBatchClassifier\nMiniBatchRegressor\nMiniBatchSupervisedTransformer\nMiniBatchTransformer\nSKL2RiverClassifier\nSKL2RiverRegressor\nFuncTransformer\nPipeline\nSelect\nTransformerProduct\nTransformerUnion\nBagOfWords\nTFIDF\nLinearRegression\nLogisticRegression\nPerceptron\nOneVsRestClassifier\nBernoulliNB\nComplementNB\nMultinomialNB\nMLPRegressor\nOneHotEncoder\nOrdinalEncoder\nStandardScaler\n

    Because mini-batch learning isn't treated as a first-class citizen, some of the river's functionalities require some work in order to play nicely with mini-batches. For instance, the objects from the metrics module have an update method that take as input a single pair (y_true, y_pred). This might change in the future, depending on the demand.

    We plan to promote more models to the mini-batch regime. However, we will only be doing so for the methods that benefit the most from it, as well as those that are most popular. Indeed, River's core philosophy will remain to cater to single instance learning.

    "},{"location":"recipes/model-evaluation/","title":"Model evaluation","text":"

    To do.

    "},{"location":"recipes/on-hoeffding-trees/","title":"Incremental decision trees in river: the Hoeffding Tree case","text":"

    Decision trees (DT) are popular learning models due to their inherently simplicity, flexibility and self-explainable structure. Moreover, when aggregated in ensembles, high predictive power might be achieved. Bagging and gradient boosting-based tree ensembles are very popular solutions in competition platforms such as Kaggle, and also among researchers.

    Although fairly lightweight, traditional batch DTs cannot cope with data stream mining/online learning requirements, as they do multiple passes over the data and have to be retrained from scratch every time a new observation appears.

    The data stream literature has plenty of incremental DT (iDT) families that are better suited to online learning. Nonetheless, Hoeffding Trees (HT) are historically the most popular family of iDTs to date. In fact, HTs have some nice properties:

    • one-pass learning regime;
    • theoretical guarantees to converge to the batch DT model given enough observations and a stationary data distribution;
    • small memory and running time footprint (in most cases);
    • some of their variations can deal with non-stationary distributions.

    And the previous list goes on and on. Besides that, HTs also have the same advantages as batch DTs (C4.5/J48, CART, M5, etc.) do. We can inspect the structure of a HT to understand how decisions were made, which is a nice feature to have in online learning tasks.

    In River, HTs are first-class citizens, so we have multiple realizations of this framework that are suited to different learning tasks and scenarios.

    This brief introduction to HT does not aims at being extensive nor delving into algorithmic or implementation details of the HTs. Instead, we intend to provide a high-level overview of the HTs as they are envisioned in River, as well as their shared properties and important hyperparameters.

    In this guide, we are going to:

    1. summarize the differences accross the multiple HT versions available;
    2. learn how to inspect tree models;
    3. learn how to manage the memory usage of HTs;
    4. compare numerical tree splitters and understand their impact on the iDT induction process.

    Well, without further ado, let's go!

    First things first, we are going to start with some imports.

    import matplotlib.pyplot as plt\nimport datetime as dt\n\nfrom river import datasets\nfrom river import evaluate\nfrom river import metrics\nfrom river import preprocessing  # we are going to use that later\nfrom river.datasets import synth  # we are going to use some synthetic datasets too\nfrom river import tree\n
    "},{"location":"recipes/on-hoeffding-trees/#1-trees-trees-everywhere-gardening-101-with-river","title":"1. Trees, trees everywhere: gardening 101 with river","text":"

    At first glance, the amount of iDT algorithms in River might seem too much to handle, but in reality the distinction among them is easy to grasp. To facilitate our lives, here's a neat table listing the available HT models and summarizing their differences:

    Name Acronym Task Non-stationary? Comments Source Hoeffding Tree Classifier HTC Classification No Basic HT for classification tasks [1] Hoeffding Adaptive Tree Classifier HATC Classification Yes Modifies HTC by adding an instance of ADWIN to each node to detect and react to drift detection [2] Extremely Fast Decision Tree Classifier EFDT Classification No Deploys split decisions as soon as possible and periodically revisit decisions and redo them if necessary. Not as fast in practice as the name implies, but it tends to converge faster than HTC to the model generated by a batch DT [3] Hoeffding Tree Regressor HTR Regression No Basic HT for regression tasks. It is an adaptation of the FIRT/FIMT algorithm that bears some semblance to HTC [4] Hoeffding Adaptive Tree Regressor HATR Regression Yes Modifies HTR by adding an instance of ADWIN to each node to detect and react to drift detection - incremental Structured-Output Prediction Tree Regressor iSOUPT Multi-target regression No Multi-target version of HTR [5] Label Combination Hoeffding Tree Classifier LCHTC Multi-label classification No Creates a numerical code for each combination of the binary labels and uses HTC to learn from this encoded representation. At prediction time, decodes the modified representation to obtain the original label set -

    As we can see, although their application fields might overlap sometimes, the HT variations have specific situations in which they are better suited to work. Moreover, in River we provide a standardized API access to all the HT variants since they share many properties in common.

    "},{"location":"recipes/on-hoeffding-trees/#2-how-to-inspect-tree-models","title":"2. How to inspect tree models?","text":"

    We provide a handful of tools to inspect trained HTs in River. Here, we will provide some examples of how to access their inner structures, get useful information, and plot the iDT structure.

    Firstly, let's pick a toy dataset from which our tree will learn from. Here we are going to focus on the classification case, but the same operations apply to other learning tasks. We will select the Phishing dataset from the datasets module to exemplify the HTs' capabilities.

    dataset = datasets.Phishing()\ndataset\n
    Phishing websites.\n\nThis dataset contains features from web pages that are classified as phishing or not.\n\n    Name  Phishing                                                          \n    Task  Binary classification                                             \n Samples  1,250                                                             \nFeatures  9                                                                 \n  Sparse  False                                                             \n    Path  /Users/max/projects/online-ml/river/river/datasets/phishing.csv.gz\n

    We are going to train an instance of HoeffdingTreeClassifier using this dataset. As everything else in River, training an iDT is a piece of cake!

    %%time\n\nmodel = tree.HoeffdingTreeClassifier(grace_period=50)\n\nfor x, y in dataset:\n    model.learn_one(x, y)\n\nmodel\n
    CPU times: user 56.8 ms, sys: 984 \u00b5s, total: 57.8 ms\nWall time: 58.7 ms\n
    HoeffdingTreeClassifier
    HoeffdingTreeClassifier ( grace_period=50 max_depth=inf split_criterion=\"info_gain\" delta=1e-07 tau=0.05 leaf_prediction=\"nba\" nb_threshold=0 nominal_attributes=None splitter=GaussianSplitter ( n_splits=10 ) binary_split=False min_branch_fraction=0.01 max_share_to_split=0.99 max_size=100. memory_estimate_period=1000000 stop_mem_management=False remove_poor_attrs=False merit_preprune=True )

    That's it! We are not going to enter into details about some of the available parameters of HTC here. The user can refer to the documentation page for more information about that. Let's talk about model inspection :D

    At any time, we can easily get some statistics about our trained model by using the summary property:

    model.summary\n
    {'n_nodes': 5,\n 'n_branches': 2,\n 'n_leaves': 3,\n 'n_active_leaves': 3,\n 'n_inactive_leaves': 0,\n 'height': 3,\n 'total_observed_weight': 1250.0}\n

    This property show us the internal structure of the tree, including data concerning the memory-management routines that we are going to check later in this guide. We can also get a representation of the tree model as a pandas.DataFrame object:

    model.to_dataframe().iloc[:5, :5]\n
    parent is_leaf depth stats feature node 0 <NA> False 0 {True: 260.0, False: 390.0} empty_server_form_handler 1 0 True 1 {True: 443.4163997711022, False: 59.8769131081... NaN 2 0 False 1 {True: 71.58360022889781, False: 404.123086891... popup_window 3 2 True 2 {False: 31.426538522574834, True: 33.0} NaN 4 2 True 2 {False: 250.57346147742516, True: 6.0} NaN

    Hmm, maybe not the clearest of the representations. What about drawing the tree structure instead?

    model.draw()\n

    Much better, huh?

    Lastly, we can check how the tree predicts one specific instance by using the debug_one method:

    x, y = next(iter(dataset))  # Let's select the first example in the stream\nx, y\n
    ({'empty_server_form_handler': 0.0,\n  'popup_window': 0.0,\n  'https': 0.0,\n  'request_from_other_domain': 0.0,\n  'anchor_from_other_domain': 0.0,\n  'is_popular': 0.5,\n  'long_url': 1.0,\n  'age_of_domain': 1,\n  'ip_in_url': 1},\n True)\n
    print(model.debug_one(x))\n
    empty_server_form_handler \u2264 0.5454545454545454\nClass True:\n    P(False) = 0.1\n    P(True) = 0.9\n

    Our tree got this one right! The method debug_one is especially useful when we are dealing with a big tree model where drawing might not be the wisest of the choices (we will end up with a tree chart that has too much information to visually understand).

    Some additional hints:

    • the max_depth parameter is our friend when building HTs that need to be constantly inspected. This parameter, which is available for every HT variant, triggers a pre-pruning mechanism that stops tree growth when the given depth is reached.
    • we can also limit the depth when using the draw method.
    • in the case of tree ensembles, individual trees can be accessed using the [index] operator. Then, the same set of inspection tools are available to play with!
    "},{"location":"recipes/on-hoeffding-trees/#3-advanced-gardening-with-river-grab-your-pruning-shears-and-lets-limit-memory-usage","title":"3. Advanced gardening with river: grab your pruning shears and let's limit memory usage","text":"

    Online learning is well-suited to highly scalable processing centers with petabytes of data arriving intermittently, but it can also work with Internet of Things (IoT) devices operating at low power and with limited processing capability. Hence, making sure our trees are not going to use too much memory is a nice feature that can impact on both energy usage and the running time. HTs have memory-management routines that put the user in the control of computational resources that are available.

    In this brief guide, we are going to use a regression tree, since this kind of iDT typically spends more memory than the classification counterparts. However, the user can control the memory usage in the exact same way in River, regardless of the HT variant!

    We will rely on the Friedman synthetic dataset (data generator) from the synth module in our evaluation. Since data generators can produce instances indefinitely, we will select a sample of size 10K for our tests.

    We are almost ready to go. Let's first define a simple function that plots the results obtained from a given dataset, metric and

    def plot_performance(dataset, metric, models):\n    metric_name = metric.__class__.__name__\n\n    # To make the generated data reusable\n    dataset = list(dataset)\n    fig, ax = plt.subplots(figsize=(10, 5), nrows=3, dpi=300)\n    for model_name, model in models.items():\n        step = []\n        error = []\n        r_time = []\n        memory = []\n\n        for checkpoint in evaluate.iter_progressive_val_score(\n            dataset, model, metric, measure_time=True, measure_memory=True, step=100\n        ):\n            step.append(checkpoint[\"Step\"])\n            error.append(checkpoint[metric_name].get())\n\n            # Convert timedelta object into seconds\n            r_time.append(checkpoint[\"Time\"].total_seconds())\n            # Make sure the memory measurements are in MB\n            raw_memory = checkpoint[\"Memory\"]\n            memory.append(raw_memory * 2**-20)\n\n        ax[0].plot(step, error, label=model_name)\n        ax[1].plot(step, r_time, label=model_name)\n        ax[2].plot(step, memory, label=model_name)\n\n    ax[0].set_ylabel(metric_name)\n    ax[1].set_ylabel('Time (seconds)')\n    ax[2].set_ylabel('Memory (MB)')\n    ax[2].set_xlabel('Instances')\n\n    ax[0].grid(True)\n    ax[1].grid(True)\n    ax[2].grid(True)\n\n    ax[0].legend(\n        loc='upper center', bbox_to_anchor=(0.5, 1.25),\n        ncol=3, fancybox=True, shadow=True\n    )\n    plt.tight_layout()\n    plt.close()\n\n    return fig\n
    plot_performance(\n    synth.Friedman(seed=42).take(10_000),\n    metrics.MAE(),\n    {\n        \"Unbounded HTR\": (\n            preprocessing.StandardScaler() |\n            tree.HoeffdingTreeRegressor(splitter=tree.splitter.EBSTSplitter())\n        )\n    }\n)\n

    In our example we use the EBSTSplitter, which is going to discussed later. For now, is enough to know that it is a mechanism to evaluate split candidates in the trees.

    As we can see, our tree uses almost 10 MB to keep its structure. Let's say we wanted to limit our memory usage to 5 MB. How could we do that?

    Note that we are using a illustration case here. In real applications, data may be unbounded, so the trees might grow indefinitely.

    HTs expose some parameters related to memory management. The user can refer to the documentation for more details on that matter. Here, we are going to focus on two parameters:

    • max_size: determines the maximum amount of memory (in MB) that the HT can use.
    • memory_estimate_period: intervals after which the memory-management is triggered.

    We are going to limit our HTR to 5 MB and perform memory checks at intervals of 500 instances.

    plot_performance(\n    synth.Friedman(seed=42).take(10_000),\n    metrics.MAE(),\n    {\n        \"Restricted HTR\": (\n            preprocessing.StandardScaler()\n            | tree.HoeffdingTreeRegressor(\n                splitter=tree.splitter.EBSTSplitter(),\n                max_size=5,\n                memory_estimate_period=500\n            )\n        )\n    }\n)\n

    Note that as soon the memory usage reaches the limit that we determined (at the memory check intervals), HTR starts managing its resource usage to reduce the size. As a consequence, the running time also decreases. For more accurate management, the intervals between memory checks should be decreased. This action, however, has costs since the tree stops the learning process to estimate its size and alter its own structure. Too frequent memory checks might end up result in a slow learning process. Besides, by using fewer resources, the predictive performance can be negatively impacted. So, use this tool with caution!

    But how that works at all?

    HTs monitor the incoming feature values to perform split attempts. To do so, they rely on a class of algorithms called Attribute Observers (AO) or Splitters (spoiler alert!). Each leaf node in an HT keeps one AO per incoming feature. After pre-determined intervals (grace_period parameter), leaves query their AOs for split candidates. Well, there are costs to monitor input features (mainly the numerical ones). In fact, AOs correspond to one of the most time and memory-consuming portions of the HTs. To manage memory usage, an HT firstly determines its least promising leaves, w.r.t. how likely they will be split. Then, these leaves' AOs are removed, and the tree nodes are said to be \"deactivated.\" That's it! The deactivated leaves do not perform split attempts anymore, but they continue to be updated to provide responses. They will be kept as leaves as long as there are not available resources to enable tree growth. These leaves can be activated again (meaning that new AOs will be created for them) if there is available memory, so don't worry!

    Hint: another indirect way to bound memory usage is to limit the tree depth. By default, the trees can grow indefinitely, but the max_depth parameter can control this behavior.

    plot_performance(\n    synth.Friedman(seed=42).take(10_000),\n    metrics.MAE(),\n    {\n        \"HTR with at most 5 levels\": (\n            preprocessing.StandardScaler()\n            | tree.HoeffdingTreeRegressor(\n                splitter=tree.splitter.EBSTSplitter(),\n                max_depth=5\n            )\n        )\n    }\n)\n

    "},{"location":"recipes/on-hoeffding-trees/#4-branching-and-growth-splitters-the-heart-of-the-trees","title":"4. Branching and growth: splitters, the heart of the trees","text":"

    As previously stated, one of the core operations of iDT is, well, to grow. Plants and gardening-related jokes apart, growth in HTs is guided by their AOs or splitters, as mentioned in the end of Section 3.

    Nominal features can be easily monitored, since the feature partitions are well-defined beforehand. Numerical features, on the other hand, do not have an explicit best cut point. Still, numerical features are typically split by using a binary test: \\(\\le\\) or \\(>\\). Therefore, numerical splitters must somehow summarize the incoming feature values and be able to evaluate the merit of split point candidates.

    There are diverse strategies to monitor numerical features and choices related to them, including which data structure will be used to keep a summary of the incoming feature and also how many split points are going to be evaluated during split attempts. Again, this guide does not intend to be an exhaustive delve into the iDT subject. In fact, each of the following aspects of the iDTs could be considered a separate research area: AOs, intervals between split attempts, split heuristics (e.g., info gain, variance reduction, and so on), tree depth and max size, and much more!

    Let's focus a bit into the AO matter. River provides a handful of splitters for classification and regression trees, which can be chosen using the parameter splitter. We will list the available tree splitters in the following sections and compare some of their chacteristics.

    Some notation:

    • \\(n\\): Number of observations seen so far.
    • \\(c\\): the number of classes.
    • \\(s\\): the number of split points to evaluate (which means that this is a user-given parameter).
    • \\(h\\): the number of histogram bins or hash slots. Tipically, \\(h \\ll n\\).
    "},{"location":"recipes/on-hoeffding-trees/#41-classification-tree-splitters","title":"4.1. Classification tree splitters","text":"

    The following table summarizes the available classification splitters. The user might refer to the documentation of each splitter for more details about their functioning.

    Splitter Description Insertion Memory Split candidate query Works with Naive Bayes leaves? Exhaustive Keeps all the observed input values and class counts in a Binary Search Tree (BST) \\(O(\\log n)\\) (average) or \\(O(n)\\) (worst case) \\(O(n)\\) \\(O(n)\\) No Histogram Builds a histogram for each class in order to discretize the input feature \\(O(\\log h)\\) \\(O(c h)\\) \\(O(c h)\\) Yes Gaussian Approximates the class distributions using Gaussian distributions \\(O(1)\\) \\(O(c)\\) \\(O(cs)\\) Yes

    Note that some of the splitters have configurable parameters that directly impact not only on their time and memory costs, but also on the final predictive performance. Examples:

    • The number of split points can be configured in the Gaussian splitter. Increasing this number makes this splitter slower, but it also potentially increases the quality of the obtained query points, implying enhanced tree accuracy.
    • The number of stored bins can be selected in the Histogram splitter. Increasing this number increases the memory footprint and running time of this splitter, but it also potentially makes its split candidates more accurate and positively impacts on the tree's final predictive performance.

    Next, we provide a brief comparison of the classification splitters using 10K instances of the Random RBF synthetic dataset. Note that the tree equiped with the Exhaustive splitter does not use Naive Bayes leaves.

    plot_performance(\n    synth.RandomRBF(seed_model=7, seed_sample=42).take(10_000),\n    metrics.Accuracy(),\n    {\n        \"HTC + Exhaustive splitter\": tree.HoeffdingTreeClassifier(\n            splitter=tree.splitter.ExhaustiveSplitter(),\n            leaf_prediction=\"mc\"\n        ),\n        \"HTC + Histogram splitter\": tree.HoeffdingTreeClassifier(\n            splitter=tree.splitter.HistogramSplitter()\n        ),\n        \"HTC + Gaussian splitter\": tree.HoeffdingTreeClassifier(\n            splitter=tree.splitter.GaussianSplitter()\n        )\n    }\n)\n

    "},{"location":"recipes/on-hoeffding-trees/#42-regression-tree-splitters","title":"4.2 Regression tree splitters","text":"

    The available regression tree splitters are summarized in the next table. The TE-BST costs are expressed in terms of \\(n^*\\) because the number of stored elements can be smaller than or equal to \\(n\\).

    Splitter Description Insertion Memory Split candidate query Extended Binary Search Tree (E-BST) Stores all the observations and target statistics in a BST \\(O(\\log n)\\) (average) or \\(O(n)\\) (worst case) \\(O(n)\\) \\(O(n)\\) Truncated E-BST (TE-BST) Rounds the incoming data before passing it to the BST \\(O(\\log n^*)\\) (average) or \\(O(n^*)\\) (worst case) \\(O(n^*)\\) \\(O(n^*)\\) Quantization Observer (QO) Uses a hash-like structure to quantize the incoming data \\(O(1)\\) \\(O(h)\\) \\(O(h \\log h)\\)

    E-BST is an exhaustive algorithm, i.e., it works as batch solutions usually do, which might be prohibitive in real-world online scenarios. TE-BST and QO apply approximations to alleviate the costs involved in monitoring numerical data and performing split attempts. The number of desired decimal places to round the data (TE-BST) and the quantization radius (QO) are directly related to the running time, memory footprint, and error of the resulting tree model.

    We present a brief comparison of the available regression tree splitters using the 10K instances of the Friedman synthetic dataset.

    plot_performance(\n    synth.Friedman(seed=42).take(10_000),\n    metrics.MAE(),\n    {\n        \"HTR + E-BST\": (\n            preprocessing.StandardScaler() | tree.HoeffdingTreeRegressor(\n                splitter=tree.splitter.EBSTSplitter()\n            )\n        ),\n        \"HTR + TE-BST\": (\n            preprocessing.StandardScaler() | tree.HoeffdingTreeRegressor(\n                splitter=tree.splitter.TEBSTSplitter()\n            )\n        ),\n        \"HTR + QO\": (\n            preprocessing.StandardScaler() | tree.HoeffdingTreeRegressor(\n                splitter=tree.splitter.QOSplitter()\n            )\n        ),\n\n    }\n)\n

    "},{"location":"recipes/on-hoeffding-trees/#wrapping-up","title":"Wrapping up","text":"

    This guide provides a walkthrough in the HTs available in River. We discussed about model inspection, memory management, and feature splits. Keep in mind that each HT variant has specific details and capabilities that are out-of-the-scope of this introductory material. The user is advised to check the documentation page of the tree models for detailed information.

    "},{"location":"recipes/pipelines/","title":"Pipelines","text":"

    Pipelines are an integral part of River. We encourage their usage and apply them in many of their examples.

    The compose.Pipeline contains all the logic for building and applying pipelines. A pipeline is essentially a list of estimators that are applied in sequence. The only requirement is that the first n - 1 steps be transformers. The last step can be a regressor, a classifier, a clusterer, a transformer, etc. Here is an example:

    from river import compose\nfrom river import linear_model\nfrom river import preprocessing\nfrom river import feature_extraction\n\nmodel = compose.Pipeline(\n    preprocessing.StandardScaler(),\n    feature_extraction.PolynomialExtender(),\n    linear_model.LinearRegression()\n)\n

    You can also use the | operator, as so:

    model = (\n    preprocessing.StandardScaler() |\n    feature_extraction.PolynomialExtender() |\n    linear_model.LinearRegression()\n)\n

    Or, equally:

    model = preprocessing.StandardScaler() \nmodel |= feature_extraction.PolynomialExtender()\nmodel |= linear_model.LinearRegression()\n

    A pipeline has a draw method that can be used to visualize it:

    model\n
    StandardScaler
    StandardScaler ( with_std=True )
    PolynomialExtender
    PolynomialExtender ( degree=2 interaction_only=False include_bias=False bias_name=\"bias\" )
    LinearRegression
    LinearRegression ( optimizer=SGD ( lr=Constant ( learning_rate=0.01 ) ) loss=Squared () l2=0. l1=0. intercept_init=0. intercept_lr=Constant ( learning_rate=0.01 ) clip_gradient=1e+12 initializer=Zeros () )

    compose.Pipeline inherits from base.Estimator, which means that it has a learn_one method. You would expect learn_one to update each estimator, but that's not actually what happens. Instead, the transformers are updated when predict_one (or predict_proba_one for that matter) is called. Indeed, in online machine learning, we can update the unsupervised parts of our model when a sample arrives. We don't have to wait for the ground truth to arrive in order to update unsupervised estimators that don't depend on it. In other words, in a pipeline, learn_one updates the supervised parts, whilst predict_one updates the unsupervised parts. It's important to be aware of this behavior, as it is quite different to what is done in other libraries that rely on batch machine learning.

    Here is a small example to illustrate the previous point:

    from river import datasets\n\ndataset = datasets.TrumpApproval()\nx, y = next(iter(dataset))\nx, y\n
    ({'ordinal_date': 736389,\n  'gallup': 43.843213,\n  'ipsos': 46.19925042857143,\n  'morning_consult': 48.318749,\n  'rasmussen': 44.104692,\n  'you_gov': 43.636914000000004},\n 43.75505)\n

    Let us call predict_one, which will update each transformer, but won't update the linear regression.

    model.predict_one(x)\n
    0.0\n

    The prediction is nil because each weight of the linear regression is equal to 0.

    model['StandardScaler'].means\n
    defaultdict(float,\n            {'ordinal_date': 0.0,\n             'gallup': 0.0,\n             'ipsos': 0.0,\n             'morning_consult': 0.0,\n             'rasmussen': 0.0,\n             'you_gov': 0.0})\n

    As we can see, the means of each feature have been updated, even though we called predict_one and not learn_one.

    Note that if you call transform_one with a pipeline who's last step is not a transformer, then the output from the last transformer (which is thus the penultimate step) will be returned:

    model.transform_one(x)\n
    {'ordinal_date': 0.0,\n 'gallup': 0.0,\n 'ipsos': 0.0,\n 'morning_consult': 0.0,\n 'rasmussen': 0.0,\n 'you_gov': 0.0,\n 'ordinal_date*ordinal_date': 0.0,\n 'gallup*ordinal_date': 0.0,\n 'ipsos*ordinal_date': 0.0,\n 'morning_consult*ordinal_date': 0.0,\n 'ordinal_date*rasmussen': 0.0,\n 'ordinal_date*you_gov': 0.0,\n 'gallup*gallup': 0.0,\n 'gallup*ipsos': 0.0,\n 'gallup*morning_consult': 0.0,\n 'gallup*rasmussen': 0.0,\n 'gallup*you_gov': 0.0,\n 'ipsos*ipsos': 0.0,\n 'ipsos*morning_consult': 0.0,\n 'ipsos*rasmussen': 0.0,\n 'ipsos*you_gov': 0.0,\n 'morning_consult*morning_consult': 0.0,\n 'morning_consult*rasmussen': 0.0,\n 'morning_consult*you_gov': 0.0,\n 'rasmussen*rasmussen': 0.0,\n 'rasmussen*you_gov': 0.0,\n 'you_gov*you_gov': 0.0}\n

    In many cases, you might want to connect a step to multiple steps. For instance, you might to extract different kinds of features from a single input. An elegant way to do this is to use a compose.TransformerUnion. Essentially, the latter is a list of transformers who's results will be merged into a single dict when transform_one is called. As an example let's say that we want to apply a feature_extraction.RBFSampler as well as the feature_extraction.PolynomialExtender. This may be done as so:

    model = (\n    preprocessing.StandardScaler() |\n    (feature_extraction.PolynomialExtender() + feature_extraction.RBFSampler()) |\n    linear_model.LinearRegression()\n)\n\nmodel\n
    StandardScaler
    StandardScaler ( with_std=True )
    PolynomialExtender
    PolynomialExtender ( degree=2 interaction_only=False include_bias=False bias_name=\"bias\" )
    RBFSampler
    RBFSampler ( gamma=1. n_components=100 seed=None )
    LinearRegression
    LinearRegression ( optimizer=SGD ( lr=Constant ( learning_rate=0.01 ) ) loss=Squared () l2=0. l1=0. intercept_init=0. intercept_lr=Constant ( learning_rate=0.01 ) clip_gradient=1e+12 initializer=Zeros () )

    Note that the + symbol acts as a shorthand notation for creating a compose.TransformerUnion, which means that we could have declared the above pipeline as so:

    model = (\n    preprocessing.StandardScaler() |\n    compose.TransformerUnion(\n        feature_extraction.PolynomialExtender(),\n        feature_extraction.RBFSampler()\n    ) |\n    linear_model.LinearRegression()\n)\n

    Pipelines provide the benefit of removing a lot of cruft by taking care of tedious details for you. They also enable to clearly define what steps your model is made of. Finally, having your model in a single object means that you can move it around more easily. Note that you can include user-defined functions in a pipeline by using a compose.FuncTransformer.

    "},{"location":"recipes/reading-data/","title":"Reading data","text":"

    In River, the features of a sample are stored inside a dictionary, which in Python is called a dict and is a native data structure. In other words, we don't use any sophisticated data structure, such as a numpy.ndarray or a pandas.DataFrame.

    The main advantage of using plain dicts is that it removes the overhead that comes with using the aforementioned data structures. This is important in a streaming context because we want to be able to process many individual samples in rapid succession. Another advantage is that dicts allow us to give names to our features. Finally, dicts are not typed, and can therefore store heterogeneous data.

    Another advantage which we haven't mentioned is that dicts play nicely with Python's standard library. Indeed, Python contains many tools that allow manipulating dicts. For instance, the csv.DictReader can be used to read a CSV file and convert each row to a dict. In fact, the stream.iter_csv method from River is just a wrapper on top of csv.DictReader that adds a few bells and whistles.

    River provides some out-of-the-box datasets to get you started.

    from river import datasets\n\ndataset = datasets.Bikes()\ndataset\n
    Bike sharing station information from the city of Toulouse.\n\nThe goal is to predict the number of bikes in 5 different bike stations from the city of\nToulouse.\n\n      Name  Bikes                                                         \n      Task  Regression                                                    \n   Samples  182,470                                                       \n  Features  8                                                             \n    Sparse  False                                                         \n      Path  /Users/max/river_data/Bikes/toulouse_bikes.csv                \n       URL  https://maxhalford.github.io/files/datasets/toulouse_bikes.zip\n      Size  12.52 MB                                                      \nDownloaded  True\n

    Note that when we say \"loaded\", we don't mean that the actual data is read from the disk. On the contrary, the dataset is a streaming data that can be iterated over one sample at a time. In Python lingo, it's a generator.

    Let's take a look at the first sample:

    x, y = next(iter(dataset))\nx\n
    {'moment': datetime.datetime(2016, 4, 1, 0, 0, 7),\n 'station': 'metro-canal-du-midi',\n 'clouds': 75,\n 'description': 'light rain',\n 'humidity': 81,\n 'pressure': 1017.0,\n 'temperature': 6.54,\n 'wind': 9.3}\n

    Each dataset is iterable, which means we can also do:

    for x, y in dataset:\n    break\nx\n
    {'moment': datetime.datetime(2016, 4, 1, 0, 0, 7),\n 'station': 'metro-canal-du-midi',\n 'clouds': 75,\n 'description': 'light rain',\n 'humidity': 81,\n 'pressure': 1017.0,\n 'temperature': 6.54,\n 'wind': 9.3}\n

    As we can see, the values have different types.

    Under the hood, calling for x, y in dataset simply iterates over a file and parses each value appropriately. We can do this ourselves by using stream.iter_csv:

    from river import stream\n\nX_y = stream.iter_csv(dataset.path)\nx, y = next(X_y)\nx, y\n
    ({'moment': '2016-04-01 00:00:07',\n  'bikes': '1',\n  'station': 'metro-canal-du-midi',\n  'clouds': '75',\n  'description': 'light rain',\n  'humidity': '81',\n  'pressure': '1017.0',\n  'temperature': '6.54',\n  'wind': '9.3'},\n None)\n

    There are a couple things that are wrong. First of all, the numeric features have not been casted into numbers. Indeed, by default, stream.iter_csv assumes that everything is a string. A related issue is that the moment field hasn't been parsed into a datetime. Finally, the target field, which is bikes, hasn't been separated from the rest of the features. We can remedy to these issues by setting a few parameters:

    X_y = stream.iter_csv(\n    dataset.path,\n    converters={\n        'bikes': int,\n        'clouds': int,\n        'humidity': int,\n        'pressure': float,\n        'temperature': float,\n        'wind': float\n    },\n    parse_dates={'moment': '%Y-%m-%d %H:%M:%S'},\n    target='bikes'\n)\nx, y = next(X_y)\nx, y\n
    ({'moment': datetime.datetime(2016, 4, 1, 0, 0, 7),\n  'station': 'metro-canal-du-midi',\n  'clouds': 75,\n  'description': 'light rain',\n  'humidity': 81,\n  'pressure': 1017.0,\n  'temperature': 6.54,\n  'wind': 9.3},\n 1)\n

    That's much better. We invite you to take a look at the stream module to see for yourself what other methods are available. Note that River is first and foremost a machine learning library, and therefore isn't as much concerned about reading data as it is about statistical algorithms. We do however believe that the fact that we use dictionary gives you, the user, a lot of freedom and flexibility.

    The stream module provides helper functions to read data from different formats. For instance, you can use the stream.iter_sklearn_dataset function to turn any scikit-learn dataset into a stream.

    from sklearn import datasets\n\ndataset = datasets.load_diabetes()\n\nfor x, y in stream.iter_sklearn_dataset(dataset):\n    break\n\nx, y\n
    ({'age': 0.0380759064334241,\n  'sex': 0.0506801187398187,\n  'bmi': 0.0616962065186885,\n  'bp': 0.0218723549949558,\n  's1': -0.0442234984244464,\n  's2': -0.0348207628376986,\n  's3': -0.0434008456520269,\n  's4': -0.00259226199818282,\n  's5': 0.0199084208763183,\n  's6': -0.0176461251598052},\n 151.0)\n

    To conclude, let us shortly mention the difference between proactive learning and reactive learning in the specific context of online machine learning. When we loop over a data with a for loop, we have the control over the data and the order in which it arrives. We are proactive in the sense that we, the user, are asking for the data to arrive.

    In contract, in a reactive situation, we don't have control on the data arrival. A typical example of such a situation is a web server, where web requests arrive in an arbitrary order. This is a situation where River shines. For instance, in a Flask application, you could define a route to make predictions with a River model as so:

    import flask\n\napp = flask.Flask(__name__)\n\n@app.route('/', methods=['GET'])\ndef predict():\n    payload = flask.request.json\n    river_model = load_model()\n    return river_model.predict_proba_one(payload)\n

    Likewise, a model can be updated whenever a request arrives as so:

    @app.route('/', methods=['POST'])\ndef learn():\n    payload = flask.request.json\n    river_model = load_model()\n    river_model.learn_one(payload['features'], payload['target'])\n    return {}, 201\n

    To summarize, River can be used in many different ways. The fact that it uses dictionaries to represent features provides a lot of flexibility and space for creativity.

    "},{"location":"recipes/rolling-computations/","title":"Rolling computations","text":"

    You might wonder which classes in River can be wrapped with a utils.Rolling. This can be answered with a bit of metaprogramming.

    import importlib\nimport inspect\nfrom river.utils.rolling import Rollable\n\nfor submodule in importlib.import_module(\"river.api\").__all__:\n    for _, obj in inspect.getmembers(\n        importlib.import_module(f\"river.{submodule}\"), lambda x: isinstance(x, Rollable)\n    ):\n        print(f'{submodule}.{obj.__name__}')\n
    [covariance.EmpiricalCovariance](../../api/covariance/EmpiricalCovariance)\n[metrics.Accuracy](../../api/metrics/Accuracy)\n[metrics.AdjustedMutualInfo](../../api/metrics/AdjustedMutualInfo)\n[metrics.AdjustedRand](../../api/metrics/AdjustedRand)\n[metrics.BalancedAccuracy](../../api/metrics/BalancedAccuracy)\n[metrics.ClassificationReport](../../api/metrics/ClassificationReport)\n[metrics.CohenKappa](../../api/metrics/CohenKappa)\n[metrics.Completeness](../../api/metrics/Completeness)\n[metrics.ConfusionMatrix](../../api/metrics/ConfusionMatrix)\n[metrics.CrossEntropy](../../api/metrics/CrossEntropy)\n[metrics.F1](../../api/metrics/F1)\n[metrics.FBeta](../../api/metrics/FBeta)\n[metrics.FowlkesMallows](../../api/metrics/FowlkesMallows)\n[metrics.GeometricMean](../../api/metrics/GeometricMean)\n[metrics.Homogeneity](../../api/metrics/Homogeneity)\n[metrics.Jaccard](../../api/metrics/Jaccard)\n[metrics.LogLoss](../../api/metrics/LogLoss)\n[metrics.MAE](../../api/metrics/MAE)\n[metrics.MAPE](../../api/metrics/MAPE)\n[metrics.MCC](../../api/metrics/MCC)\n[metrics.MSE](../../api/metrics/MSE)\n[metrics.MacroF1](../../api/metrics/MacroF1)\n[metrics.MacroFBeta](../../api/metrics/MacroFBeta)\n[metrics.MacroJaccard](../../api/metrics/MacroJaccard)\n[metrics.MacroPrecision](../../api/metrics/MacroPrecision)\n[metrics.MacroRecall](../../api/metrics/MacroRecall)\n[metrics.MicroF1](../../api/metrics/MicroF1)\n[metrics.MicroFBeta](../../api/metrics/MicroFBeta)\n[metrics.MicroJaccard](../../api/metrics/MicroJaccard)\n[metrics.MicroPrecision](../../api/metrics/MicroPrecision)\n[metrics.MicroRecall](../../api/metrics/MicroRecall)\n[metrics.MultiFBeta](../../api/metrics/MultiFBeta)\n[metrics.MutualInfo](../../api/metrics/MutualInfo)\n[metrics.NormalizedMutualInfo](../../api/metrics/NormalizedMutualInfo)\n[metrics.Precision](../../api/metrics/Precision)\n[metrics.R2](../../api/metrics/R2)\n[metrics.RMSE](../../api/metrics/RMSE)\n[metrics.RMSLE](../../api/metrics/RMSLE)\n[metrics.ROCAUC](../../api/metrics/ROCAUC)\n[metrics.Rand](../../api/metrics/Rand)\n[metrics.Recall](../../api/metrics/Recall)\n[metrics.RollingROCAUC](../../api/metrics/RollingROCAUC)\n[metrics.SMAPE](../../api/metrics/SMAPE)\n[metrics.Silhouette](../../api/metrics/Silhouette)\n[metrics.VBeta](../../api/metrics/VBeta)\n[metrics.WeightedF1](../../api/metrics/WeightedF1)\n[metrics.WeightedFBeta](../../api/metrics/WeightedFBeta)\n[metrics.WeightedJaccard](../../api/metrics/WeightedJaccard)\n[metrics.WeightedPrecision](../../api/metrics/WeightedPrecision)\n[metrics.WeightedRecall](../../api/metrics/WeightedRecall)\n[proba.Beta](../../api/proba/Beta)\n[proba.Gaussian](../../api/proba/Gaussian)\n[proba.Multinomial](../../api/proba/Multinomial)\n[proba.MultivariateGaussian](../../api/proba/MultivariateGaussian)\n[stats.BayesianMean](../../api/stats/BayesianMean)\n[stats.Cov](../../api/stats/Cov)\n[stats.Mean](../../api/stats/Mean)\n[stats.PearsonCorr](../../api/stats/PearsonCorr)\n[stats.SEM](../../api/stats/SEM)\n[stats.Sum](../../api/stats/Sum)\n[stats.Var](../../api/stats/Var)\n
    "},{"location":"releases/0.0.2/","title":"0.0.2 - 2019-02-13","text":"
    • PyPI
    • GitHub
    "},{"location":"releases/0.0.2/#compat","title":"compat","text":"
    • Added sklearn wrappers.
    "},{"location":"releases/0.0.2/#ensemble","title":"ensemble","text":"
    • Added ensemble.HedgeClassifier.
    "},{"location":"releases/0.0.2/#feature_selection","title":"feature_selection","text":"
    • Added feature_selection.RandomDiscarder.
    "},{"location":"releases/0.0.2/#feature_extraction","title":"feature_extraction","text":"
    • Added feature_extraction.TargetEncoder.
    "},{"location":"releases/0.0.2/#impute","title":"impute","text":"
    • Added impute.NumericImputer.
    "},{"location":"releases/0.0.2/#optim","title":"optim","text":"
    • Added optim.AbsoluteLoss.
    • Added optim.HingeLoss.
    • Added optim.EpsilonInsensitiveHingeLoss.
    "},{"location":"releases/0.0.2/#stats","title":"stats","text":"
    • Added stats.NUnique.
    • Added stats.Min.
    • Added stats.Max.
    • Added stats.PeakToPeak.
    • Added stats.Kurtosis.
    • Added stats.Skew.
    • Added stats.Sum.
    • Added stats.EWMean.
    • Made sure the running statistics produce the same results as pandas.DataFrame.rolling method.
    "},{"location":"releases/0.0.3/","title":"0.0.3 - 2019-03-21","text":"
    • PyPI
    • GitHub
    "},{"location":"releases/0.0.3/#base","title":"base","text":"
    • Calling fit_one now returns the calling instance, not the out-of-fold prediction/transform; fit_predict_one, fit_predict_proba_one, and fit_transform_one are available to reproduce the previous behavior.
    • Binary classifiers now output a dict with probabilities for False and True when calling predict_proba_one, which solves the interface issues of having multi-class classifiers do binary classification.
    "},{"location":"releases/0.0.3/#compat","title":"compat","text":"
    • Added compat.convert_river_to_sklearn.
    "},{"location":"releases/0.0.3/#compose","title":"compose","text":"
    • Added compose.BoxCoxTransformRegressor.
    • Added compose.TargetModifierRegressor.
    "},{"location":"releases/0.0.3/#datasets","title":"datasets","text":"
    • Added datasets.fetch_restaurants.
    • Added datasets.load_airline.
    "},{"location":"releases/0.0.3/#dist","title":"dist","text":"
    • Added dist.Multinomial.
    • Added dist.Normal.
    "},{"location":"releases/0.0.3/#ensemble","title":"ensemble","text":"
    • Added ensemble.BaggingRegressor.
    "},{"location":"releases/0.0.3/#feature_extraction","title":"feature_extraction","text":"
    • Added feature_extraction.TargetGroupBy.
    "},{"location":"releases/0.0.3/#impute","title":"impute","text":"
    • Added impute.CategoricalImputer.
    "},{"location":"releases/0.0.3/#linear_model","title":"linear_model","text":"
    • Added linear_model.FMRegressor.
    • Removed all the passive-aggressive estimators.
    "},{"location":"releases/0.0.3/#metrics","title":"metrics","text":"
    • Added metrics.Accuracy.
    • Added metrics.MAE.
    • Added metrics.MSE.
    • Added metrics.RMSE.
    • Added metrics.RMSLE.
    • Added metrics.SMAPE.
    • Added metrics.Precision.
    • Added metrics.Recall.
    • Added metrics.F1.
    "},{"location":"releases/0.0.3/#model_selection","title":"model_selection","text":"
    • model_selection.online_score can now be passed a metrics.Metric instead of an sklearn metric; it also checks that the provided metric can be used with the accompanying model.
    "},{"location":"releases/0.0.3/#naive_bayes","title":"naive_bayes","text":"
    • Added naive_bayes.GaussianNB.
    "},{"location":"releases/0.0.3/#optim","title":"optim","text":"
    • Added optim.PassiveAggressiveI.
    • Added optim.PassiveAggressiveII.
    "},{"location":"releases/0.0.3/#preprocessing","title":"preprocessing","text":"
    • Added preprocessing.Discarder.
    • Added preprocessing.PolynomialExtender.
    • Added preprocessing.FuncTransformer.
    "},{"location":"releases/0.0.3/#reco","title":"reco","text":"
    • Added reco.SVD.
    "},{"location":"releases/0.0.3/#stats","title":"stats","text":"
    • Added stats.Mode.
    • Added stats.Quantile.
    • Added stats.RollingQuantile.
    • Added stats.Entropy.
    • Added stats.RollingMin.
    • Added stats.RollingMax.
    • Added stats.RollingMode.
    • Added stats.RollingSum.
    • Added stats.RollingPeakToPeak.
    "},{"location":"releases/0.0.3/#stream","title":"stream","text":"
    • Added stream.iter_csv.
    "},{"location":"releases/0.0.3/#tree","title":"tree","text":"
    • Added tree.MondrianTreeClassifier.
    • Added tree.MondrianTreeRegressor.
    "},{"location":"releases/0.1.0/","title":"0.1.0 - 2019-05-08","text":"
    • PyPI
    • GitHub
    "},{"location":"releases/0.1.0/#base","title":"base","text":"
    • Removed the fit_predict_one estimator method.
    • Removed the fit_predict_proba_one estimator method.
    • Removed the fit_transform_one estimator method.
    "},{"location":"releases/0.1.0/#compat","title":"compat","text":"
    • Added compat.convert_sklearn_to_river.
    • compat.convert_river_to_sklearn now returns an sklearn.pipeline.Pipeline when provided with a compose.Pipeline.
    "},{"location":"releases/0.1.0/#compose","title":"compose","text":"
    • Added compose.Discard.
    • Added compose.Select.
    • Added compose.SplitRegressor.
    • The draw method of compose.Pipeline now works properly for arbitrary amounts of nesting, including multiple nested compose.FeatureUnion.
    "},{"location":"releases/0.1.0/#datasets","title":"datasets","text":"
    • Added datasets.fetch_electricity.
    "},{"location":"releases/0.1.0/#dummy","title":"dummy","text":"
    • Added dummy.NoChangeClassifier.
    • Added dummy.PriorClassifier.
    • Added dummy.StatisticRegressor.
    "},{"location":"releases/0.1.0/#feature_extraction","title":"feature_extraction","text":"
    • Added feature_extraction.Differ.
    • Renamed feature_extraction.GroupBy to feature_extraction.Agg.
    • Renamed feature_extraction.TargetGroupBy to feature_extraction.TargetAgg.
    "},{"location":"releases/0.1.0/#feature_selection","title":"feature_selection","text":"
    • Added feature_selection.SelectKBest.
    • Added feature_selection.VarianceThreshold.
    "},{"location":"releases/0.1.0/#impute","title":"impute","text":"
    • Added impute.StatImputer.
    • Removed impute.CategoricalImputer.
    • Removed impute.NumericImputer.
    "},{"location":"releases/0.1.0/#linear_model","title":"linear_model","text":"
    • Added linear_model.PAClassifier.
    • Added linear_model.PARegressor.
    • Added linear_model.SoftmaxRegression.
    "},{"location":"releases/0.1.0/#metrics","title":"metrics","text":"
    • Added metrics.ConfusionMatrix.
    • Added metrics.CrossEntropy.
    • Added metrics.MacroF1.
    • Added metrics.MacroPrecision.
    • Added metrics.MacroRecall.
    • Added metrics.MicroF1.
    • Added metrics.MicroPrecision.
    • Added metrics.MicroRecall.
    • Each metric now has a bigger_is_better property to indicate if a high value is better than a low one or not.
    "},{"location":"releases/0.1.0/#optim","title":"optim","text":"
    • Added optim.OptimalLR.
    • Added optim.CrossEntropy.
    • Removed optim.PassiveAggressiveI.
    • Removed optim.PassiveAggressiveII.
    "},{"location":"releases/0.1.0/#preprocessing","title":"preprocessing","text":"
    • Removed preprocessing.Discarder.
    • Added on and sparse parameters to preprocessing.OneHotEncoder.
    "},{"location":"releases/0.1.0/#stats","title":"stats","text":"
    • Added stats.Covariance.
    • Added stats.PearsonCorrelation.
    • Added stats.SmoothMean.
    "},{"location":"releases/0.1.0/#utils","title":"utils","text":"
    • Added utils.check_estimator.
    • Added utils.Histogram.
    • Added utils.SortedWindow.
    • Added utils.Window.
    "},{"location":"releases/0.10.0/","title":"0.10.0 - 2022-02-04","text":""},{"location":"releases/0.10.0/#base","title":"base","text":"
    • Introduce base.MiniBatchTransformer. Add support for mini-batches to compose.TransformerUnion, compose.Select, and preprocessing.OneHotEncoder.
    "},{"location":"releases/0.10.0/#checks","title":"checks","text":"
    • Created this module to store estimator unit testing, rather than having it in the utils module.
    "},{"location":"releases/0.10.0/#compose","title":"compose","text":"
    • Split compose.Renamer into compose.Prefixer and compose.Suffixer that respectively prepend and append a string to the features' name.
    • Changed compose.Renamer to allow feature renaming following a mapping.
    "},{"location":"releases/0.10.0/#evaluate","title":"evaluate","text":"
    • Refactored evaluate.progressive_validation to work with api.anomaly.base.AnomalyDetectors.
    "},{"location":"releases/0.10.0/#facto","title":"facto","text":"
    • Added debug_one method to BaseFM.
    "},{"location":"releases/0.10.0/#feature_extraction","title":"feature_extraction","text":"
    • Make the by parameter in feature_extraction.Agg and feature_extraction.TargetAgg to be optional, allowing to calculate aggregates over the whole data.
    • Removed feature_extraction.Lagger and feature_extraction.TargetLagger. Their functionality can be reproduced by combining feature_extraction.Agg and stats.Shift.
    • feature_extraction.Agg and feature_extraction.Target now have a state property. It returns a pandas.Series representing the current aggregates values within each group.
    "},{"location":"releases/0.10.0/#metrics","title":"metrics","text":"
    • metrics.ROCAUC works with base.AnomalyDetectorss.
    "},{"location":"releases/0.10.0/#misc","title":"misc","text":"
    • Created this module to store some stuff that was in the utils module but wasn't necessarily shared between modules.
    • Implement misc.CovMatrix.
    "},{"location":"releases/0.10.0/#reco","title":"reco","text":"
    • Renamed the Recommender base class into Ranker.
    • Added a rank method to each recommender.
    • Removed reco.SurpriseWrapper as it wasn't really useful.
    • Added an is_contextual property to each ranker to indicate if a model makes use of contextual features or not.
    "},{"location":"releases/0.10.0/#stats","title":"stats","text":"
    • stats.Mean, stats.Var, and stats.Cov each now have an update_many method which accepts numpy arrays.
    "},{"location":"releases/0.10.0/#utils","title":"utils","text":"
    • Removed utils.Window and use collections.deque instead where necessary.
    "},{"location":"releases/0.10.1/","title":"0.10.1 - 2022-02-05","text":""},{"location":"releases/0.10.1/#evaluate","title":"evaluate","text":"

    evaluate.progressive_val_score can now handle models which use **kwargs in their learn_one and predict_one methods. For instance, this is useful for reco.Ranker models which require passing a user and an item.

    "},{"location":"releases/0.11.0/","title":"0.11.0 - 2022-05-28","text":"
    • Moved all metrics in metrics.cluster except metrics.Silhouette to river-extra.
    "},{"location":"releases/0.11.0/#anomaly","title":"anomaly","text":"
    • There is now a anomaly.base.SupervisedAnomalyDetector base class for supervised anomaly detection.
    • Added api.anomaly.GaussianScorer, which is the first supervised anomaly detector.
    • There is now a anomaly.base.AnomalyFilter base class for anomaly filtering methods. These allow to classify anomaly scores. They can also prevent models from learning on anomalous data, for instance by putting them as an initial step of a pipeline.
    • Added anomaly.ConstantFilter and QuantileFilter, which are the first anomaly filters.
    • Removed anomaly.ConstantThresholder and anomaly.QuantileThresholder, as they overlap with the new anomaly filtering mechanism.
    "},{"location":"releases/0.11.0/#base","title":"base","text":"
    • Fixed an issue where the _raw_memory_usage property would spin into an infinite loop if a model's property was an itertools.count.
    "},{"location":"releases/0.11.0/#dataset","title":"dataset","text":"
    • Added the datasets.WaterFlow dataset.
    "},{"location":"releases/0.11.0/#dist","title":"dist","text":"
    • A revert method has been added to stats.Gaussian.
    • A revert method has been added to stats.Multinomial.
    • Added dist.TimeRolling to measure probability distributions over windows of time.
    "},{"location":"releases/0.11.0/#drift","title":"drift","text":"
    • Add the PeriodicTrigger detector, a baseline capable of producing drift signals in regular or random intervals.
    • The numpy usage was removed in drift.KSWIN in favor of collections.deque. Appending or deleting elements to numpy arrays imply creating another object.
    • Added the seed parameter to drift.KSWIN to control reproducibility.
    • The Kolmogorov-Smirnov test mode was changed to the default (\"auto\") to suppress warnings (drift.KSWIN).
    • Unnecessary usage of numpy was also removed in other concept drift detectors.
    "},{"location":"releases/0.11.0/#ensemble","title":"ensemble","text":"
    • Streamline SRP{Classifier,Regressor}, remove unneeded numpy usage, make SRP variants robust against missing features, and fix bugs.
    • Remove unneeded numpy usage AdaptiveRandomForest{Classifier,Regressor}.
    "},{"location":"releases/0.11.0/#evaluate","title":"evaluate","text":"
    • Added a iter_progressive_val_score function, which does the same as progressive_val_score, except that it yields rather than prints results at each step, which give more control to the user.
    "},{"location":"releases/0.11.0/#imblearn","title":"imblearn","text":"
    • Added imblearn.ChebyshevUnderSampler and imblearn.ChebyshevOverSampler for imbalanced regression.
    "},{"location":"releases/0.11.0/#linear_model","title":"linear_model","text":"
    • linear_model.LinearRegression and linear_model.LogisticRegression now correctly apply the l2 regularization when their learn_many method is used.
    • Added l1 regularization (implementation with cumulative penalty, see paper) for linear_model.LinearRegression and linear_model.LogisticRegression
    "},{"location":"releases/0.11.0/#neighbors","title":"neighbors","text":"
    • neighbors.KNNADWINClassifier and neighbors.SAMKNNClassifier have been deprecated.
    • Introduced neighbors.NearestNeighbors for searching nearest neighbors.
    • Vastly refactored and simplified the nearest neighbors logic.
    "},{"location":"releases/0.11.0/#proba","title":"proba","text":"
    • Added proba.Rolling to measure a probability distribution over a window.
    "},{"location":"releases/0.11.0/#rules","title":"rules","text":"
    • AMRules's debug_one explicitly indicates the prediction strategy used by each rule.
    • Fix bug in debug_one (AMRules) where prediction explanations were incorrectly displayed when ordered_rule_set=True.
    "},{"location":"releases/0.11.0/#time_series","title":"time_series","text":"
    • Added an iter_evaluate function to trace the evaluation at each sample in a dataset.
    "},{"location":"releases/0.11.0/#tree","title":"tree","text":"
    • Fix bug in Naive Bayes-based leaf prediction.
    • Remove unneeded numpy usage in HoeffdingAdaptiveTree{Classifier,Regressor}.
    "},{"location":"releases/0.11.0/#stats","title":"stats","text":"
    • A revert method has been added to stats.Var.
    "},{"location":"releases/0.11.1/","title":"0.11.1 - 2022-06-06","text":"

    A small release to introduce benchmarks.

    "},{"location":"releases/0.11.1/#anomaly","title":"anomaly","text":"
    • Fixed a bug where anomaly filters were never updated.
    "},{"location":"releases/0.12.0/","title":"0.12.0 - 2022-09-02","text":"
    • Moved all the public modules imports from river/__init__.py to river/api.py and removed unnecessary dependencies between modules enabling faster cherry-picked import times (~3x).
    • Adding wheels for Python 3.11.
    "},{"location":"releases/0.12.0/#base","title":"base","text":"
    • Introduced an mutate method to the base.Base class. This allows setting attributes in a controlled manner, which paves the way for online AutoML. See the recipe for more information.
    "},{"location":"releases/0.12.0/#compat","title":"compat","text":"
    • Moved the PyTorch wrappers to river-extra.
    "},{"location":"releases/0.12.0/#covariance","title":"covariance","text":"
    • Created a new covariance module to hold everything related to covariance and inversion covariance matrix estimation.
    • Moved misc.CovarianceMatrix to covariance.EmpiricalCovariance.
    • Added covariance.EmpiricalPrecision to estimate the inverse covariance matrix.
    "},{"location":"releases/0.12.0/#compose","title":"compose","text":"
    • Moved utils.pure_inference_mode to compose.pure_inference_mode and utils.warm_up_mode to compose.warm_up_mode.
    • Pipeline parts can now be accessed by integer positions as well as by name.
    "},{"location":"releases/0.12.0/#datasets","title":"datasets","text":"
    • Imports synth, enabling `from river import datasets; datasets.synth.
    "},{"location":"releases/0.12.0/#drift","title":"drift","text":"
    • Refactor the concept drift detectors to match the remaining of River's API. Warnings are only issued by detectors that support this feature.
    • Drifts can be assessed via the property drift_detected. Warning signals can be acessed by the property warning_detected. The update now returns self.
    • Ensure all detectors automatically reset their inner states after a concept drift detection.
    • Streamline DDM, EDDM, HDDM_A, and HDDM_W. Make the configurable parameters names match their respective papers.
    • Fix bugs in EDDM and HDDM_W.
    • Enable two-sided tests in PageHinkley.
    • Improve documentation and update tests.
    "},{"location":"releases/0.12.0/#feature_extraction","title":"feature_extraction","text":"
    • Added a tokenizer_pattern parameter to feature_extraction.BagOfWords and feature_extraction.TFIDF to override the default pattern used for tokenizing text.
    • Added a stop_words parameter to feature_extraction.BagOfWords and feature_extraction.TFIDF for removing stop words once the text has been tokenized.
    "},{"location":"releases/0.12.0/#linear_model","title":"linear_model","text":"
    • After long ado, we've finally implemented linear_model.BayesianLinearRegression.
    "},{"location":"releases/0.12.0/#metrics","title":"metrics","text":"
    • Removed dependency to optim.
    • Removed metrics.Rolling, due to the addition of utils.Rolling.
    • Removed metrics.TimeRolling, due to the addition of utils.Rolling.
    "},{"location":"releases/0.12.0/#proba","title":"proba","text":"
    • Removed proba.Rolling, due to the addition of utils.Rolling.
    • Removed proba.TimeRolling, due to the addition of utils.Rolling.
    "},{"location":"releases/0.12.0/#rule","title":"rule","text":"
    • The default splitter was changed to tree.splitter.TEBST for memory and running time efficiency.
    "},{"location":"releases/0.12.0/#stats","title":"stats","text":"
    • Removed stats.RollingMean, due to the addition of utils.Rolling.
    • Removed stats.RollingVar, due to the addition of utils.Rolling.
    • Removed stats.RollingCov, due to the addition of utils.Rolling.
    • Removed stats.RollingPearsonCorr, due to the addition of utils.Rolling.
    "},{"location":"releases/0.12.0/#stream","title":"stream","text":"
    • stream.iter_array now handles text data.
    • Added stream.TwitterLiveStream, to listen to a filtered live stream of Tweets.
    "},{"location":"releases/0.12.0/#time_series","title":"time_series","text":"
    • Added time_series.HorizonAggMetric.
    • Fixed a bug in time_series.SNARIMAX where the number of seasonal components was not correct when sp or sq were specified.
    • Fixed the differencing logic in time_series.SNARIMAX when d or sd were specified.
    "},{"location":"releases/0.12.0/#tree","title":"tree","text":"
    • Rename split_confidence and tie_threshold to delta and tau, respectively. This way, the parameters are not misleading and match what the research papers have used for decades.
    • Refactor HoeffdingAdaptiveTree{Classifier,Regressor} to allow the usage of any drift detector. Expose the significance level of the test used to switch between subtrees as a user-defined parameter.
    • Correct test used to switch between foreground and background subtrees in HoeffdingAdaptiveTreeRegressor. Due to the continuous and unbounded nature of the monitored errors, a z-test is now performed to decide which subtree to keep.
    • The default leaf_prediction value was changed to \"adaptive\", as this often results in the smallest errors in practice.
    • The default splitter was changed to tree.splitter.TEBST for memory and running time efficiency.
    "},{"location":"releases/0.12.0/#utils","title":"utils","text":"
    • Removed dependencies to anomaly and compose.
    • Added utils.Rolling and utils.TimeRolling, which are generic wrappers for computing over a window (of time).
    • Use binary search to speed-up element removal in utils.SortedWindow.
    "},{"location":"releases/0.12.1/","title":"0.12.1 - 2022-09-02","text":""},{"location":"releases/0.12.1/#base","title":"base","text":"
    • Fix the way the clone method handles positional arguments.
    "},{"location":"releases/0.13.0/","title":"0.13.0 - 2022-09-15","text":""},{"location":"releases/0.13.0/#compose","title":"compose","text":"
    • compose.TransformerUnion parts can now be accessed by index as well as by name.
    "},{"location":"releases/0.13.0/#stats","title":"stats","text":"
    • Added the LossyCount for tracking frequent itemsets. This implementation also supports a forgetting factor to reduce the influence of old elements.
    • The following statistics are now implemented in Rust:
    • Quantile
    • EWMean
    • EWVar
    • IQR
    • Kurtosis
    • PeaktoPeak
    • Skew
    • RollingQuantile
    • RollingIQR
    "},{"location":"releases/0.13.0/#stream","title":"stream","text":"
    • Implemented stream.TwitchChatStream.
    "},{"location":"releases/0.14.0/","title":"0.14.0 - 2022-10-26","text":"
    • Introducing the bandit module for running multi-armed bandits
    • Introducing the sketch module with summarization tools and data sketches working in a streaming fashion!
    "},{"location":"releases/0.14.0/#bandit","title":"bandit","text":"
    • Added bandit.EpsilonGreedy.
    • Added bandit.UCB.
    • Added bandit.ThomsonSampling.
    • Added a bandit.base module.
    • Added bandit.envs.CandyCaneContest, which implements the Gym interface.
    • Added bandit.envs.KArmedTestbed, which implements the Gym interface.
    • Added bandit.evaluate for basic benchmarking of bandit policies on a Gym environment.
    "},{"location":"releases/0.14.0/#drift","title":"drift","text":"
    • Exposed more parameters in ADWIN: clock, max_buckets, min_window_length, and grace_period.
    "},{"location":"releases/0.14.0/#model_selection","title":"model_selection","text":"
    • Added model_selection.BanditRegressor, which is a generic model selection method that works with any bandit policy.
    • Removed model_selection.EpsilonGreedyRegressor due to the addition of model_selection.BanditRegressor.
    • Removed model_selection.UCBRegressor due to the addition of model_selection.BanditRegressor.
    "},{"location":"releases/0.14.0/#proba","title":"proba","text":"
    • Added proba.Beta.
    • Added a sample method to each distribution.
    • Added a mode property to each distribution.
    • Replaced the pmf and pdf methods with a __call__ method.
    "},{"location":"releases/0.14.0/#sketch","title":"sketch","text":"
    • Moved misc.Histogram to sketch.Histogram.
    • Moved stats.LossyCount to sketch.HeavyHitters and update its API to better match collections.Counter.
    • Added missing return self in HeavyHitters.
    • Added the Count-Min Sketch (sketch.Counter) algorithm for approximate element counting.
    • Added an implementation of Bloom filter (sketch.Set) to provide approximate set-like operations.
    "},{"location":"releases/0.15.0/","title":"0.15.0 - 2023-01-29","text":""},{"location":"releases/0.15.0/#active","title":"active","text":"
    • Created this module dedicated to online active learning.
    • Added api.active.EntropySampler.
    "},{"location":"releases/0.15.0/#base","title":"base","text":"
    • Fixed an issue where an estimator that has attribute a pipeline could not be cloned.
    • Added a base.DriftAndWarningDetector to clarify the difference between drift detectors that have a warning_detected property and those that don't.
    • Added MultiLabelClassifier.
    • Added MultiTargetRegressor.
    • Added drift.BinaryDriftDetector.
    • Added drift.BinaryDriftAndWarningDetector.
    "},{"location":"releases/0.15.0/#conf","title":"conf","text":"
    • Introduced this new module to perform conformal predictions.
    • Added a conf.Interval dataclass to represent predictive intervals.
    • Added conf.RegressionJackknife.
    "},{"location":"releases/0.15.0/#datasets","title":"datasets","text":"
    • Removed unnecessary Numpy usage in the synth submodule.
    • Changed np.random.RandomState to np.random.default_rng where necessary.
    "},{"location":"releases/0.15.0/#drift","title":"drift","text":"
    • Added drift.DriftRetrainingClassifier.
    • Renamed drift.PeriodicTrigger to drift.DummyDriftDetector to clarify it is a naive baseline.
    • Created a binary submodule to organize all drift detectors which only apply to binary inputs.
    "},{"location":"releases/0.15.0/#ensemble","title":"ensemble","text":"
    • Added ensemble.ADWINBoostingClassifier.
    • Added ensemble.BOLEClassifier.
    "},{"location":"releases/0.15.0/#evaluate","title":"evaluate","text":"
    • evaluate.progressive_val_score and evaluate.iter_progressive_val_score will now also produce a report once the last sample has been processed, in addition to every print_every steps.
    "},{"location":"releases/0.15.0/#feature_extraction","title":"feature_extraction","text":"
    • feature_extraction.BagOfWords now outputs a dictionary, and not a collections.Counter.
    "},{"location":"releases/0.15.0/#forest","title":"forest","text":"
    • Created this new module to host all models based on an ensemble of decision trees.
    • Moved ensemble.AdaptiveRandomForestClassifier to forest.ARFClassifier.
    • Moved ensemble.AdaptiveRandomForestRegressor to forest.ARFRegressor.
    • Added forest.AMFClassifier.
    • Added forest.OXTRegressor.
    "},{"location":"releases/0.15.0/#linear_model","title":"linear_model","text":"
    • Renamed use_dist to with_dist in linear_model.BayesianLinearRegression's predict_one method.
    "},{"location":"releases/0.15.0/#multiclass","title":"multiclass","text":"
    • Added a coding_method method to multiclass.OCC to control how the codes are randomly generated.
    "},{"location":"releases/0.15.0/#multioutput","title":"multioutput","text":"
    • Added MultiClassEncoder to convert multi-label tasks into multi-class problems.
    "},{"location":"releases/0.15.0/#preprocessing","title":"preprocessing","text":"
    • Renamed alpha to fading_factor in preprocessing.AdaptiveStandardScaler.
    "},{"location":"releases/0.15.0/#rules","title":"rules","text":"
    • Renamed alpha to fading_factor in rules.AMRules.
    "},{"location":"releases/0.15.0/#sketch","title":"sketch","text":"
    • Renamed alpha to fading_factor in sketch.HeavyHitters.
    "},{"location":"releases/0.15.0/#stats","title":"stats","text":"
    • Renamed alpha to fading_factor in stats.Entropy.
    • Renamed alpha to fading_factor in stats.EWMean.
    • Renamed alpha to fading_factor in stats.EWVar.
    "},{"location":"releases/0.15.0/#stream","title":"stream","text":"
    • Upgraded stream.iter_sql to SQLAlchemy 2.0.
    "},{"location":"releases/0.15.0/#tree","title":"tree","text":"
    • Remove LabelCombinationHoeffdingTreeClassifier. New code should use multioutput.MulticlassEncoder instead.
    "},{"location":"releases/0.15.0/#utils","title":"utils","text":"
    • Removed artifacts from the merger.
    "},{"location":"releases/0.16.0/","title":"0.16.0 - 2023-05-08","text":"

    Added wheels for Python 3.11.

    "},{"location":"releases/0.16.0/#feature_extraction","title":"feature_extraction","text":"
    • feature_extraction.Agg and feature_extraction.TargetAgg can now be passed an optional t in its learn_one method, which allows it to work with utils.TimeRolling.
    "},{"location":"releases/0.16.0/#metrics","title":"metrics","text":"
    • Added metrics.MAPE.
    • Added metrics.RollingROCAUC.
    "},{"location":"releases/0.16.0/#preprocessing","title":"preprocessing","text":"
    • Added preprocessing.GaussianRandomProjector.
    • Added preprocessing.SparseRandomProjector.
    "},{"location":"releases/0.16.0/#stats","title":"stats","text":"
    • Fixed randomness issue with the first few outputs of stats.Quantile.
    "},{"location":"releases/0.17.0/","title":"0.17.0 - 2023-05-27","text":""},{"location":"releases/0.17.0/#bandit","title":"bandit","text":"
    • Bandit policies now return a single arm when the pull method is called, instead of yielding or one more arms at a time. This is simpler to understand. We will move back to multi-armed pulls in the future.
    • Added bandit.Exp3.
    • bandit.UCB and bandit.Exp3 have an extra reward_scaler parameter, which can be any object that inherits from compose.TargetTransformRegressor. This allows scaling rewards before updating arms.
    "},{"location":"releases/0.17.0/#compose","title":"compose","text":"
    • compose.TransformerProduct now correctly returns a compose.TransformerUnion when a transformer is added to it.
    • Fixed compose.TransformerProduct's transform_many behavior.
    • compose.TransformerUnion and compose.TransformerProduct will now clone the provided estimators, so that shallow copies aren't shared in different places.
    "},{"location":"releases/0.17.0/#model_selection","title":"model_selection","text":"
    • Added model_selection.BanditClassifier, which is the classification equivalent to bandit.BanditRegressor. Both are methods to perform online model selection via a bandit policy.
    "},{"location":"releases/0.17.0/#multioutput","title":"multioutput","text":"
    • metrics.multioutput.MacroAverage and metrics.multioutput.MicroAverage now loop over the keys of y_true instead of y_pred. This ensures a KeyError is correctly raised if y_pred is missing an output that is present in y_true.
    "},{"location":"releases/0.17.0/#preprocessing","title":"preprocessing","text":"
    • Added preprocessing.TargetMinMaxScaler, which operates the same as preprocessing.TargetStandardScaler, but instead uses min-max scaling.
    "},{"location":"releases/0.18.0/","title":"0.18.0 - 2023-06-26","text":""},{"location":"releases/0.18.0/#bandit","title":"bandit","text":"
    • Added bandit.BayesUCB.
    • Added bandit.evaluate_offline, for evaluating bandits on historical (logged) data.
    "},{"location":"releases/0.18.0/#cluster","title":"cluster","text":"
    • DBStream will now only recluster on demand, rather than at every call to learn_one.
    "},{"location":"releases/0.18.0/#compat","title":"compat","text":"
    • The predict_many method scikit-learn models wrapped with compat.convert_sklearn_to_river raised an exception if the model had not been fitted on any data yet. Instead, default predictions will be produced, which is consistent with the rest of River.
    • compat.SKL2RiverRegressor and compat.SKL2RiverClassifier didn't check whether features were ordered in the same way at each method call. They now store the list of feature names at the first function call, and align subsequent inputs in the same order.
    "},{"location":"releases/0.18.0/#compose","title":"compose","text":"
    • compose.TransformerProduct will now preserve the density of sparse columns.
    • Added a transform_many method to compose.FuncTransformer, allowing it to be used in mini-batch pipelines.
    • The compose.pure_inference_mode now works with mini-batching.
    "},{"location":"releases/0.18.0/#neighbors","title":"neighbors","text":"
    • Added neighbors.SWINN to power-up approximate nearest neighbor search. SWINN uses graphs to speed up nearest neighbor search in large sliding windows of data.
    • Renamed neighbors.NearestNeighbors to neighbors.LazySearch.
    • Standardize and create base classes for generic nearest neighbor search utilities.
    • The user can now select the nearest neighbor search engine to use in neighbors.KNNClassifier and neighbors.KNNRegressor.
    "},{"location":"releases/0.18.0/#preprocessing","title":"preprocessing","text":"
    • Rename sparse parameter to drop_zeros in preprocessing.OneHotEncoder.
    • The transform_many method of preprocessing.OneHotEncoder will now return a sparse dataframe, rather than a dense one, which will consume much less memory.
    "},{"location":"releases/0.18.0/#proba","title":"proba","text":"
    • Added a cdf method to proba.Beta.
    "},{"location":"releases/0.18.0/#tree","title":"tree","text":"
    • Expose the min_branch_fraction parameter to avoid splits where most of the data goes to a single branch. Affects classification trees.
    • Added the max_share_to_split parameter to Hoeffding Tree classifiers. This parameters avoids splitting when the majority class has most of the data.
    "},{"location":"releases/0.18.0/#utils","title":"utils","text":"
    • Fixed utils.math.minkowski_distance.
    "},{"location":"releases/0.19.0/","title":"0.19.0 - 2023-08-02","text":"

    Calling learn_one in a pipeline will now update each part of the pipeline in turn. Before the unsupervised parts of the pipeline were updated during predict_one. This is more intuitive for new users. The old behavior, which yields better results, can be restored by calling learn_one with the new compose.learn_during_predict context manager.

    "},{"location":"releases/0.19.0/#bandit","title":"bandit","text":"
    • Added a bandit.datasets submodule, which is meant to contain contextual bandit datasets.
    • Added bandit.base.ContextualPolicy.
    • Added bandit.datasets.NewsArticles.
    • Added bandit.LinUCBDisjoint, which is River's first contextual bandit policy.
    • Added bandit.RandomPolicy.
    "},{"location":"releases/0.19.0/#compose","title":"compose","text":"
    • Removed the compose.warm_up_mode context manager.
    • Removed the compose.pure_inference_mode context manager.
    • The last step of a pipeline will be correctly updated if it is unsupervised, which wasn't the case before.
    • Fixed an edge-case where compose.TransformerProduct would not work when chained more than twice.
    "},{"location":"releases/0.19.0/#drift","title":"drift","text":"
    • Added a datasets submodule, which contains datasets that are useful for concept drift experiments.
    • Fix bugs in drift.binary.HDDM_A and drift.binary.HDDM_W.
    "},{"location":"releases/0.19.0/#linear_model","title":"linear_model","text":"
    • Added a predict_many method to linear_model.BayesianLinearRegression.
    • Added a smoothing parameter to linear_model.BayesianLinearRegression, which allows it to cope with concept drift.
    "},{"location":"releases/0.19.0/#forest","title":"forest","text":"
    • Fixed issue with forest.ARFClassifier which couldn't be passed a CrossEntropy metric.
    • Fixed a bug in forest.AMFClassifier which slightly improves predictive accurary.
    • Added forest.AMFRegressor.
    "},{"location":"releases/0.19.0/#multioutput","title":"multioutput","text":"
    • Added metrics.multioutput.SampleAverage, which is equivalent to using average='samples' in scikit-learn.
    "},{"location":"releases/0.19.0/#preprocessing","title":"preprocessing","text":"
    • Added preprocessing.OrdinalEncoder, to map string features to integers.
    • The transform_many method of preprocessing.StandardScaler now uses the dtype of the input for the output.
    "},{"location":"releases/0.19.0/#proba","title":"proba","text":"
    • Added proba.MultivariateGaussian.
    "},{"location":"releases/0.19.0/#stream","title":"stream","text":"
    • stream.iter_arff now supports sparse data.
    • stream.iter_arff now supports multi-output targets.
    • stream.iter_arff now supports missing values indicated with question marks.
    "},{"location":"releases/0.19.0/#utils","title":"utils","text":"
    • Added utils.random.exponential to retrieve random samples following an exponential distribution.
    "},{"location":"releases/0.2.0/","title":"0.2.0 - 2019-05-27","text":"
    • PyPI
    • GitHub
    "},{"location":"releases/0.2.0/#compose","title":"compose","text":"
    • compose.Pipeline now has a debug_one.
    • compose.Discard and compose.Select now take variadic inputs, which means you don't have to provide a list of features to exclude/include.
    "},{"location":"releases/0.2.0/#datasets","title":"datasets","text":"
    • Added datasets.fetch_bikes
    "},{"location":"releases/0.2.0/#feature_extraction","title":"feature_extraction","text":"
    • Classes that inherit from feature_extraction.VectorizerMixin can now directly be passed str instances instead of dict instances.
    • feature_extraction.Agg and feature_extraction.TargetAgg can now aggregate on multiple attributes.
    "},{"location":"releases/0.2.0/#metrics","title":"metrics","text":"
    • Added RollingAccuracy
    • Added RollingCrossEntropy
    • Added RollingF1
    • Added RollingLogLoss
    • Added RollingMacroF1
    • Added RollingMacroPrecision
    • Added RollingMacroRecall
    • Added RollingMAE
    • Added RollingMicroF1
    • Added RollingMicroPrecision
    • Added RollingMicroRecall
    • Added RollingMSE
    • Added RollingPrecision
    • Added RollingRecall
    • Added RollingRMSE
    • Added RollingRMSLE
    • Added RollingSMAPE
    "},{"location":"releases/0.2.0/#model_selection","title":"model_selection","text":"
    • Added model_selection.online_qa_score.
    "},{"location":"releases/0.2.0/#proba","title":"proba","text":"

    The dist module has been renamed to proba and is now public, for the moment it contains a single distribution called proba.Gaussian.

    "},{"location":"releases/0.2.0/#naive_bayes","title":"naive_bayes","text":"
    • Added naive_bayes.BernoulliNB.
    • Added naive_bayes.ComplementNB.
    "},{"location":"releases/0.2.0/#optim","title":"optim","text":"
    • Added optim.AdaBound.
    "},{"location":"releases/0.2.0/#tree","title":"tree","text":"
    • Added tree.DecisionTreeClassifier.
    • Removed tree.MondrianTreeClassifier and tree.MondrianTreeRegressor because their performance wasn't good enough.
    "},{"location":"releases/0.2.0/#stats","title":"stats","text":"
    • Added stats.AutoCorrelation.
    • Added stats.EWVar.
    • Rename stats.Variance to stats.Var and stats.RollingVariance to stats.RollingVar.
    "},{"location":"releases/0.2.0/#stream","title":"stream","text":"
    • Added stream.simulate_qa.
    "},{"location":"releases/0.2.0/#utils","title":"utils","text":"
    • Added utils.SDFT.
    • Added utils.Skyline.
    • Renamed the window_size parameter to size in utils.Window and utils.SortedWindow.
    "},{"location":"releases/0.3.0/","title":"0.3.0 - 2019-06-23","text":"
    • PyPI
    • GitHub
    "},{"location":"releases/0.3.0/#datasets","title":"datasets","text":"
    • Added datasets.load_chick_weights.
    "},{"location":"releases/0.3.0/#decomposition","title":"decomposition","text":"
    • Added decomposition.LDA.
    "},{"location":"releases/0.3.0/#ensemble","title":"ensemble","text":"
    • Added ensemble.HedgeRegressor.
    • Added ensemble.StackingBinaryClassifier.
    "},{"location":"releases/0.3.0/#metrics","title":"metrics","text":"
    • Added metrics.FBeta
    • Added metrics.MacroFBeta
    • Added metrics.MicroFBeta
    • Added metrics.MultiFBeta
    • Added metrics.RollingFBeta
    • Added metrics.RollingMacroFBeta
    • Added metrics.RollingMicroFBeta
    • Added metrics.RollingMultiFBeta
    • Added metrics.Jaccard
    • Added metrics.RollingConfusionMatrix
    • Added metrics.RegressionMultiOutput
    • Added metrics.MCC
    • Added metrics.RollingMCC
    • Added metrics.ROCAUC
    • Renamed metrics.F1Score to metrics.F1.
    "},{"location":"releases/0.3.0/#multioutput","title":"multioutput","text":"
    • Added multioutput.ClassifierChain.
    • Added multioutput.RegressorChain.
    "},{"location":"releases/0.3.0/#optim","title":"optim","text":"
    • Added optim.QuantileLoss
    • Added optim.MiniBatcher.
    "},{"location":"releases/0.3.0/#preprocessing","title":"preprocessing","text":"
    • Added preprocessing.Normalizer.
    "},{"location":"releases/0.3.0/#proba","title":"proba","text":"
    • Added proba.Multinomial.
    "},{"location":"releases/0.4.1/","title":"0.4.1 - 2019-10-23","text":"
    • PyPI
    • GitHub
    "},{"location":"releases/0.4.1/#base","title":"base","text":"
    • Tests are now much more extensive, thanks mostly to the newly added estimator tags.
    "},{"location":"releases/0.4.1/#compose","title":"compose","text":"
    • Added compose.Renamer.
    "},{"location":"releases/0.4.1/#datasets","title":"datasets","text":"
    • Added fetch_kdd99_http.
    • Added fetch_sms.
    • Added fetch_trec07p.
    "},{"location":"releases/0.4.1/#ensemble","title":"ensemble","text":"
    • Removed ensemble.HedgeBinaryClassifier because it's performance was subpar.
    • Removed ensemble.GroupRegressor, as this should be a special case of ensemble.StackingRegressor.
    "},{"location":"releases/0.4.1/#feature_extraction","title":"feature_extraction","text":"
    • Fixed a bug where feature_extraction.CountVectorizer and feature_extraction.TFIDFVectorizer couldn't be pickled.
    "},{"location":"releases/0.4.1/#linear_model","title":"linear_model","text":"
    • linear_model.LogisticRegression and linear_model.LinearRegression now have an intercept_lr parameter.
    "},{"location":"releases/0.4.1/#metrics","title":"metrics","text":"
    • Metrics can now be composed using the + operator, which is useful for evaluating multiple metrics at the same time.
    • Added metrics.Rolling, which eliminates the need for a specific rolling implementation for each metric.
    • Each metric can now be passed a sample_weight argument.
    • Added metrics.WeightedF1.
    • Added metrics.WeightedFBeta.
    • Added metrics.WeightedPrecision.
    • Added metrics.WeightedRecall.
    "},{"location":"releases/0.4.1/#neighbors","title":"neighbors","text":"
    • Added neighbors.KNeighborsRegressor.
    • Added neighbors.KNeighborsClassifier.
    "},{"location":"releases/0.4.1/#optim","title":"optim","text":"
    • Added optim.AdaMax.
    • The optim module has been reorganized into submodules; namely optim.schedulers, optim.initializers, and optim.losses. The top-level now only contains optimizers. Some classes have been renamed accordingly. See the documentation for details.
    • Renamed optim.VanillaSGD to optim.SGD.
    "},{"location":"releases/0.4.1/#stats","title":"stats","text":"
    • Added stats.IQR.
    • Added stats.RollingIQR.
    • Cythonized stats.Mean and stats.Var.
    "},{"location":"releases/0.4.1/#stream","title":"stream","text":"
    • Added stream.shuffle.
    • stream.iter_csv now has fraction and seed parameters to sample rows, deterministically or not.
    • Renamed stream.iter_numpy to stream.iter_array.
    • stream.iter_csv can now read from gzipped files.
    "},{"location":"releases/0.4.1/#time_series","title":"time_series","text":"
    • time_series.Detrender now has a window_size parameter for detrending with a rolling mean.
    "},{"location":"releases/0.4.1/#tree","title":"tree","text":"
    • Added tree.RandomForestClassifier.
    "},{"location":"releases/0.4.1/#utils","title":"utils","text":"
    • Fixed a bug where utils.dot could take longer than necessary.
    "},{"location":"releases/0.4.3/","title":"0.4.3 - 2019-10-27","text":"
    • PyPI
    • GitHub
    "},{"location":"releases/0.4.3/#base","title":"base","text":"
    • Model that inherit from base.Wrapper (e.g. tree.RandomForestClassifier) can now be pickled.
    "},{"location":"releases/0.4.3/#datasets","title":"datasets","text":"
    • Added datasets.fetch_credit_card.
    "},{"location":"releases/0.4.3/#utils","title":"utils","text":"
    • Added the utils.math sub-module.
    "},{"location":"releases/0.4.3/#tree","title":"tree","text":"
    • Fixed the debug_one method of tree.DecisionTreeClassifier.
    "},{"location":"releases/0.4.4/","title":"0.4.4 - 2019-11-11","text":"
    • PyPI
    • GitHub

    This release was mainly made to provide access to wheels <https://pythonwheels.com/>_ for Windows and MacOS.

    "},{"location":"releases/0.4.4/#ensemble","title":"ensemble","text":"
    • Added ensemble.AdaBoostClassifier.
    "},{"location":"releases/0.4.4/#linear_model","title":"linear_model","text":"
    • Added a clip_gradient parameter to linear_model.LinearRegression and linear_model.LogisticRegression. Gradient clipping was already implemented, but the maximum absolute value can now be set by the user.
    • The intercept_lr parameter of linear_model.LinearRegression and linear_model.LogisticRegression can now be passed an instance of optim.schedulers.Scheduler as well as a float.
    "},{"location":"releases/0.4.4/#metrics","title":"metrics","text":"
    • Fixed metrics.SMAPE, the implementation was missing a multiplication by 2.
    "},{"location":"releases/0.4.4/#optim","title":"optim","text":"
    • Added optim.schedulers.Optimal produces results that are identical to sklearn.linear_model.SGDRegressor and sklearn.linear_model.SGDClassifier when setting their learning_rate parameter to 'optimal'.
    "},{"location":"releases/0.4.4/#time_series","title":"time_series","text":"
    • Added time_series.SNARIMAX, a generic model which encompasses well-known time series models such as ARIMA and NARX.
    "},{"location":"releases/0.5.0/","title":"0.5.0 - 2020-03-13","text":"
    • PyPI
    • GitHub
    "},{"location":"releases/0.5.0/#compat","title":"compat","text":"
    • Added compat.PyTorch2CremeRegressor.
    • compat.SKL2CremeRegressor and compat.SKL2CremeClassifier now have an optional batch_size parameter in order to perform mini-batching.
    "},{"location":"releases/0.5.0/#compose","title":"compose","text":"
    • Renamed compose.Whitelister to compose.Select.
    • Renamed compose.Blacklister to compose.Discard.
    "},{"location":"releases/0.5.0/#facto","title":"facto","text":"
    • Added facto.FFMClassifier.
    • Added facto.FFMRegressor.
    • Added facto.FwFMClassifier.
    • Added facto.FwFMRegressor.
    • Added facto.HOFMClassifier.
    • Added facto.HOFMRegressor.
    • Refactored facto.FMClassifier.
    • Refactored facto.FMRegressor.
    "},{"location":"releases/0.5.0/#feature_selection","title":"feature_selection","text":"
    • Added feature_selection.PoissonInclusion.
    • Removed feature_selection.RandomDiscarder as it didn't make much sense.
    "},{"location":"releases/0.5.0/#feature_extraction","title":"feature_extraction","text":"
    • Renamed feature_extraction.CountVectorizer to feature_extraction.BagOfWords.
    • Renamed feature_extraction.TFIDFVectorizer to feature_extraction.TFIDF.
    • Added preprocessor and ngram_range parameters to feature_extraction.BagOfWords.
    • Added preprocessor and ngram_range parameters to feature_extraction.TFIDF.
    "},{"location":"releases/0.5.0/#datasets","title":"datasets","text":"
    • The datasets module has been overhauled. Each dataset is now a class (e.g. fetch_electricity has become datasets.Elec2).
    • Added datasets.TrumpApproval.
    • Added datasets.MaliciousURL.
    • Added datasets.gen.SEA.
    • Added datasets.Higgs.
    • Added datasets.MovieLens100K.
    • Added datasets.Bananas.
    • Added datasets.Taxis.
    • Added datasets.ImageSegments.
    • Added datasets.SMTP
    "},{"location":"releases/0.5.0/#impute","title":"impute","text":"
    • Added impute.PreviousImputer.
    "},{"location":"releases/0.5.0/#linear_model","title":"linear_model","text":"
    • linear_model.FMClassifier has been moved to the facto module.
    • linear_model.FMRegressor has been moved to the facto module.
    • Added linear_model.ALMAClassifier.
    "},{"location":"releases/0.5.0/#metrics","title":"metrics","text":"
    • Added metrics.ClassificationReport.
    • Added metrics.TimeRolling.
    • The implementation of metrics.ROCAUC was incorrect. Using the trapezoidal rule instead of Simpson's rule seems to be more robust.
    • metrics.PerClass has been removed; it is recommended that you use metrics.ClassificationReport instead as it gives a better overview.
    "},{"location":"releases/0.5.0/#meta","title":"meta","text":"
    • Moved meta.TransformedTargetRegressor and meta.BoxCoxRegressor to this module (they were previously in the compose module).
    • Added meta.PredClipper
    "},{"location":"releases/0.5.0/#model_selection","title":"model_selection","text":"
    • Added model_selection.expand_param_grid to generate a list of models from a grid of parameters.
    • Added the model_selection.successive_halving method for selecting hyperparameters.
    • The online_score and online_qa_score methods have been merged into a single method named model_selection.progressive_val_score.
    "},{"location":"releases/0.5.0/#preprocessing","title":"preprocessing","text":"
    • Added preprocessing.RBFSampler.
    • Added preprocessing.MaxAbsScaler.
    • Added preprocessing.RobustScaler.
    • Added preprocessing.Binarizer.
    • Added with_mean and with_std parameters to preprocessing.StandardScaler.
    "},{"location":"releases/0.5.0/#optim","title":"optim","text":"
    • Added optim.losses.BinaryFocalLoss.
    • Added the optim.AMSGrad optimizer.
    • Added the optim.Nadam optimizer.
    • Added optim.losses.Poisson.
    • Fixed a performance bug in optim.NesterovMomentum.
    "},{"location":"releases/0.5.0/#reco","title":"reco","text":"
    • Added reco.FunkMF.
    • Renamed reco.SVD to reco.BiasedMF.
    • Renamed reco.SGDBaseline to reco.Baseline.
    • Models now expect a dict input with user and item fields.
    "},{"location":"releases/0.5.0/#sampling","title":"sampling","text":"
    • Added sampling.RandomUnderSampler.
    • Added sampling.RandomOverSampler.
    • Added sampling.RandomSampler.
    • Added sampling.HardSamplingClassifier.
    • Added sampling.HardSamplingRegressor.
    "},{"location":"releases/0.5.0/#stats","title":"stats","text":"
    • Added stats.AbsMax.
    • Added stats.RollingAbsMax.
    "},{"location":"releases/0.5.0/#stream","title":"stream","text":"
    • Added stream.iter_libsvm.
    • stream.iter_csv now supports reading from '.zip' files.
    • Added stream.Cache.
    • Added a drop parameter to stream.iter_csv to discard fields.
    "},{"location":"releases/0.5.1/","title":"0.5.1 - 2020-03-29","text":"
    • PyPI
    • GitHub
    "},{"location":"releases/0.5.1/#compose","title":"compose","text":"
    • compose.Pipeline and compose.TransformerUnion now variadic arguments as input instead of a list. This doesn't change anything when using the shorthand operators | and +.
    "},{"location":"releases/0.5.1/#model_selection","title":"model_selection","text":"
    • Removed model_selection.successive_halving
    • Added model_selection.SuccessiveHalvingRegressor and model_selection.SuccessiveHalvingClassifier
    "},{"location":"releases/0.5.1/#stream","title":"stream","text":"
    • Added a copy parameter to stream.simulate_qa in order to handle unwanted feature modifications.
    "},{"location":"releases/0.5.1/#tree","title":"tree","text":"
    • Added a curtail_under parameter to tree.DecisionTreeClassifier.
    • The speed and accuracy of both tree.DecisionTreeClassifier and tree.RandomForestClassifier has been slightly improved for numerical attributes.
    • The esthetics of the tree.DecisionTreeClassifier.draw method have been improved.
    "},{"location":"releases/0.6.0/","title":"0.6.0 - 2020-06-09","text":""},{"location":"releases/0.6.0/#base","title":"base","text":"
    • Added a new base class called SupervisedTransformer from which supervised transformers inherit from. Before this, supervised transformers has a is_supervised property.
    "},{"location":"releases/0.6.0/#compose","title":"compose","text":"
    • Added compose.SelectType, which allows selecting feature subsets based on their type.
    • Added a score_one method to compose.Pipeline so that estimators from the anomaly module can be pipelined.
    • Added compose.Grouper, which allows applying transformers within different subgroups.
    "},{"location":"releases/0.6.0/#datasets","title":"datasets","text":"
    • Added datasets.Music, which is a dataset for multi-output binary classification.
    • Added datasets.synth.Friedman, which is synthetic regression dataset.
    • The datasets.gen module has been renamed to datasets.synth
    • Each dataset now has a __repr__ method which displays some descriptive information.
    • Added datasets.Insects, which has 10 variants.
    "},{"location":"releases/0.6.0/#feature_extraction","title":"feature_extraction","text":"
    • feature_extraction.Differ has been deprecated. We might put it back in a future if we find a better design.
    "},{"location":"releases/0.6.0/#impute","title":"impute","text":"
    • impute.StatImputer has been completely refactored.
    "},{"location":"releases/0.6.0/#metrics","title":"metrics","text":"
    • In metrics.SMAPE, instead of raising a ZeroDivisionError, the convention is now to use 0 when both y_true and y_pred are equal to 0.
    "},{"location":"releases/0.6.0/#model_selection","title":"model_selection","text":"
    • Added the possibility to configure how the progress is printed in model_selection.progressive_val_score. For instance, the progress can now be printed to a file by providing the file argument.
    "},{"location":"releases/0.6.0/#multiclass","title":"multiclass","text":"
    • Added multiclass.OutputCodeClassifier.
    • Added multiclass.OneVsOneClassifier.
    "},{"location":"releases/0.6.0/#multioutput","title":"multioutput","text":"
    • Fixed a bug where multioutput.ClassifierChain and multioutput.RegressorChain could not be pickled.
    "},{"location":"releases/0.6.0/#stats","title":"stats","text":"
    • Added stats.Shift, which can be used to compute statistics over a shifted version of a variable.
    • Added stats.Link, which can be used to compose univariate statistics. Univariate statistics can now be composed via the | operator.
    • Renamed stats.Covariance to stats.Cov.
    • Renamed stats.PearsonCorrelation to stats.PearsonCorr.
    • Renamed stats.AutoCorrelation to stats.AutoCorr.
    • Added stats.RollingCov, which computes covariance between two variables over a window.
    • Added stats.RollingPearsonCorr, which computes the Pearson correlation over a window.
    "},{"location":"releases/0.6.0/#stream","title":"stream","text":"
    • Added a stream.iter_sql utility method to work with SQLAlchemy.
    • The target_name parameter of stream.iter_csv has been renamed to target. It can now be passed a list of values in order to support multi-output scenarios.
    • Added stream.iter_arff for handling ARFF files.
    "},{"location":"releases/0.6.0/#tree","title":"tree","text":"
    • Cancelled the behavior where tree.DecisionTreeRegressor would raise an exception when no split was found.
    "},{"location":"releases/0.6.1/","title":"0.6.1 - 2020-06-10","text":""},{"location":"releases/0.6.1/#compose","title":"compose","text":"
    • Fixed a bug that occurred when part of a compose.Transformer was a compose.Pipeline and wasn't properly handled.
    "},{"location":"releases/0.7.0/","title":"0.7.0 - 2021-04-16","text":"

    Alas, no release notes for this one.

    "},{"location":"releases/0.7.1/","title":"0.7.1 - 2021-06-13","text":"

    Fixed an issue where scikit-learn was imported in sam_knn.py but wasn't specified as a dependency.

    "},{"location":"releases/0.7.1/#expert","title":"expert","text":"
    • Each expert model will now raise a NotEnoughModels exception if only a single model is passed.
    "},{"location":"releases/0.7.1/#stream","title":"stream","text":"
    • Added drop_nones parameter to stream.iter_csv.
    "},{"location":"releases/0.8.0/","title":"0.8.0 - 2021-08-31","text":""},{"location":"releases/0.8.0/#base","title":"base","text":"
    • The predict_many and predict_proba_many methods have been removed from base.Classifier. They're part of base.MiniBatchClassifier.
    "},{"location":"releases/0.8.0/#ensemble","title":"ensemble","text":"
    • Implemented ensemble.VotingClassifier.
    • Implemented ensemble.SRPRegressor.
    "},{"location":"releases/0.8.0/#meta","title":"meta","text":"
    • Renamed meta.TransformedTargetRegressor to meta.TargetTransformRegressor.
    • Added meta.TargetStandardScaler.
    "},{"location":"releases/0.8.0/#preprocessing","title":"preprocessing","text":"
    • Added a with_std parameter to StandardScaler.
    "},{"location":"releases/0.8.0/#rules","title":"rules","text":"
    • Added rules.AMRules
    "},{"location":"releases/0.8.0/#stats","title":"stats","text":"
    • Make stats.RollingQuantile match the default behavior of Numpy's quantile function.
    "},{"location":"releases/0.8.0/#tree","title":"tree","text":"
    • Unifed base class structure applied to all tree models.
    • Bug fixes.
    • Added tree.SGTClassifier and tree.SGTRegressor.
    "},{"location":"releases/0.9.0/","title":"0.9.0 - 2021-11-30","text":"
    • Wheels for Python 3.6 have been dropped.
    • Wheels for Python 3.9 have been added.
    "},{"location":"releases/0.9.0/#anomaly","title":"anomaly","text":"
    • Moved api.anomaly.base.AnomalyDetector to anomaly.AnomalyDetector.
    • Implemented anomaly.ConstantThresholder.
    • Implemented anomaly.QuantileThresholder.
    • Implemented api.anomaly.OneClassSVM.
    "},{"location":"releases/0.9.0/#base","title":"base","text":"
    • Renamed base.WrapperMixin to base.Wrapper.
    • Introduced base.WrapperEnsemble.
    • Clarified the difference between a base.typing.Dataset and a base.typing.Stream. A Stream is an instance of a Dataset and is stateful. A Dataset is stateless. It's essentially the same difference between an Iterable and an Iterator in the Python standard library.
    "},{"location":"releases/0.9.0/#compat","title":"compat","text":"
    • Added compat.PyTorch2RiverClassifier
    • Implemented median absolute deviation in stats.MAD.
    • Refactored compat.PyTorch2RiverRegressor
    • Fixed an issue where some statistics could not be printed if they had not seen any data yet.
    "},{"location":"releases/0.9.0/#compose","title":"compose","text":"
    • You can now use a list as a shorthand to build a TransformerUnion.
    • Fixed a visualization issue when using a pipeline with multiple feature unions.
    • The prejudiced terms blacklist and whitelist have both been renamed to keys.
    • Removed learn_unsupervised parameter from pipeline methods.
    • Implemented compose.TransformerProduct.
    "},{"location":"releases/0.9.0/#datasets","title":"datasets","text":"
    • Added datasets.Keystroke.
    "},{"location":"releases/0.9.0/#ensemble","title":"ensemble","text":"
    • Bug fixes in ensemble.SRPClassifier and ensemble.SRPRegressor.
    • Some estimators have been moved into the ensemble module.
    "},{"location":"releases/0.9.0/#feature_extraction","title":"feature_extraction","text":"
    • Implemented feature_extraction.Lagger.
    • Implemented feature_extraction.TargetLagger.
    "},{"location":"releases/0.9.0/#meta","title":"meta","text":"

    This module has been deleted.

    • Move meta.PredClipper to the preprocessing module.
    • Removed meta.BoxCoxRegressor.
    • Moved meta.TargetTransformRegressor to compose.TargetTransformRegressor.
    • Moved meta.TargetStandardScaler to preprocessing.TargetStandardScaler.
    "},{"location":"releases/0.9.0/#model_selection","title":"model_selection","text":"
    • This new module replaces the expert module.
    • Implemented model_selection.GreedyRegressor.
    • Added ModelSelector base class.
    "},{"location":"releases/0.9.0/#optim","title":"optim","text":"
    • optim.Adam and optim.RMSProp now work with utils.VectorDicts as well as numpy.ndarrays.
    • Added optim.losses.Huber.
    "},{"location":"releases/0.9.0/#preprocessing","title":"preprocessing","text":"
    • Enabled preprocessing.OneHotEncoder to one-hot encode values that are list or sets.
    "},{"location":"releases/0.9.0/#reco","title":"reco","text":"
    • Added a debug_one method to reco.FMRegressor.
    "},{"location":"releases/0.9.0/#selection","title":"selection","text":"
    • This new module replaces the expert module.
    • Implemented selection.GreedyExpertRegressor.
    "},{"location":"releases/0.9.0/#stats","title":"stats","text":"
    • Fixed an issue where some statistics could not be printed if they had not seen any data yet.
    • Implemented median absolute deviation in stats.MAD.
    • The stats.Mean and stats.Var implementations have been made more numerically stable.
    "},{"location":"releases/0.9.0/#time_series","title":"time_series","text":"
    • time_series.Detrender and time_series.GroupDetrender have been removed as they overlap with preprocessing.TargetStandardScaler.
    • Implemented a time_series.evaluate method, which performs progressive validation for time series scenarios.
    • Implemented time_series.HorizonMetric class to evaluate the performance of a forecasting model at each time step along a horizon.
    • Implemented time_series.HoltWinters.
    "},{"location":"releases/0.9.0/#utils","title":"utils","text":"
    • Moved model_selection.expand_param_grid to utils.expand_param_grid.
    • Added utils.poisson.
    • Added the utils.log_method_calls context manager.
    • Added the utils.warm_up_mode context manager.
    • Added the utils.pure_inference_model context manager.
    "},{"location":"releases/unreleased/","title":"Unreleased","text":"

    River's mini-batch methods now support pandas v2. In particular, River conforms to pandas' new sparse API.

    "},{"location":"releases/unreleased/#anomaly","title":"anomaly","text":"
    • Added api.anomaly.LocalOutlierFactor, which is an online version of the LOF algorithm for anomaly detection that matches the scikit-learn implementation.
    "}]} \ No newline at end of file +{"config":{"lang":["en"],"separator":"[\\s\\-]+","pipeline":["stopWordFilter"]},"docs":[{"location":"api/overview/","title":"Overview","text":""},{"location":"api/overview/#active","title":"active","text":"

    Online active learning.

    • EntropySampler
    "},{"location":"api/overview/#base","title":"base","text":"
    • ActiveLearningClassifier
    "},{"location":"api/overview/#anomaly","title":"anomaly","text":"

    Anomaly detection.

    Estimators in the anomaly module have a bespoke API. Each anomaly detector has a score_one method instead of a predict_one method. This method returns an anomaly score. Normal observations should have a low score, whereas anomalous observations should have a high score. The range of the scores is relative to each estimator.

    Anomaly detectors are usually unsupervised, in that they analyze the distribution of the features they are shown. But River also has a notion of supervised anomaly detectors. These analyze the distribution of a target variable, and optionally include the distribution of the features as well. They are useful for detecting labelling anomalies, which can be detrimental if they learned by a model.

    • GaussianScorer
    • HalfSpaceTrees
    • LocalOutlierFactor
    • OneClassSVM
    • QuantileFilter
    • ThresholdFilter
    "},{"location":"api/overview/#base_1","title":"base","text":"
    • AnomalyDetector
    • AnomalyFilter
    • SupervisedAnomalyDetector
    "},{"location":"api/overview/#bandit","title":"bandit","text":"

    Multi-armed bandit (MAB) policies.

    The bandit policies in River have a generic API. This allows them to be used in a variety of situations. For instance, they can be used for model selection (see model_selection.BanditRegressor).

    Classes

    • BayesUCB
    • EpsilonGreedy
    • Exp3
    • LinUCBDisjoint
    • RandomPolicy
    • ThompsonSampling
    • UCB

    Functions

    • evaluate
    • evaluate_offline
    "},{"location":"api/overview/#base_2","title":"base","text":"
    • ContextualPolicy
    • Policy
    "},{"location":"api/overview/#datasets","title":"datasets","text":"
    • BanditDataset
    • NewsArticles
    "},{"location":"api/overview/#envs","title":"envs","text":"
    • CandyCaneContest
    • KArmedTestbed
    "},{"location":"api/overview/#base_3","title":"base","text":"

    Base interfaces.

    Every estimator in River is a class, and as such inherits from at least one base interface. These are used to categorize, organize, and standardize the many estimators that River contains.

    This module contains mixin classes, which are all suffixed by Mixin. Their purpose is to provide additional functionality to an estimator, and thus need to be used in conjunction with a non-mixin base class.

    This module also contains utilities for type hinting and tagging estimators.

    • Base
    • BinaryDriftAndWarningDetector
    • BinaryDriftDetector
    • Classifier
    • Clusterer
    • DriftAndWarningDetector
    • DriftDetector
    • Ensemble
    • Estimator
    • MiniBatchClassifier
    • MiniBatchRegressor
    • MiniBatchSupervisedTransformer
    • MiniBatchTransformer
    • MultiLabelClassifier
    • MultiTargetRegressor
    • Regressor
    • SupervisedTransformer
    • Transformer
    • Wrapper
    • WrapperEnsemble
    "},{"location":"api/overview/#cluster","title":"cluster","text":"

    Unsupervised clustering.

    • CluStream
    • DBSTREAM
    • DenStream
    • KMeans
    • STREAMKMeans
    • TextClust
    "},{"location":"api/overview/#compat","title":"compat","text":"

    Compatibility tools.

    This module contains adapters for making River estimators compatible with other libraries, and vice-versa whenever possible. The relevant adapters will only be usable if you have installed the necessary library. For instance, you have to install scikit-learn in order to use the compat.convert_sklearn_to_river function.

    Classes

    • River2SKLClassifier
    • River2SKLClusterer
    • River2SKLRegressor
    • River2SKLTransformer
    • SKL2RiverClassifier
    • SKL2RiverRegressor

    Functions

    • convert_river_to_sklearn
    • convert_sklearn_to_river
    "},{"location":"api/overview/#compose","title":"compose","text":"

    Model composition.

    This module contains utilities for merging multiple modeling steps into a single pipeline. Although pipelines are not the only way to process a stream of data, we highly encourage you to use them.

    Classes

    • Discard
    • FuncTransformer
    • Grouper
    • Pipeline
    • Prefixer
    • Renamer
    • Select
    • SelectType
    • Suffixer
    • TargetTransformRegressor
    • TransformerProduct
    • TransformerUnion

    Functions

    • learn_during_predict
    "},{"location":"api/overview/#conf","title":"conf","text":"

    Conformal predictions. This modules contains wrappers to enable conformal predictions on any regressor or classifier.

    • Interval
    • RegressionJackknife
    "},{"location":"api/overview/#covariance","title":"covariance","text":"

    Online estimation of covariance and precision matrices.

    • EmpiricalCovariance
    • EmpiricalPrecision
    "},{"location":"api/overview/#datasets_1","title":"datasets","text":"

    Datasets.

    This module contains a collection of datasets for multiple tasks: classification, regression, etc. The data corresponds to popular datasets and are conveniently wrapped to easily iterate over the data in a stream fashion. All datasets have fixed size. Please refer to river.synth if you are interested in infinite synthetic data generators.

    Regression

    Name Samples Features AirlinePassengers 144 1 Bikes 182,470 8 ChickWeights 578 3 MovieLens100K 100,000 10 Restaurants 252,108 7 Taxis 1,458,644 8 TrumpApproval 1,001 6 WaterFlow 1,268 1

    Binary classification

    Name Samples Features Sparse Bananas 5,300 2 CreditCard 284,807 30 Elec2 45,312 8 Higgs 11,000,000 28 HTTP 567,498 3 MaliciousURL 2,396,130 3,231,961 \u2714\ufe0f Phishing 1,250 9 SMSSpam 5,574 1 SMTP 95,156 3 TREC07 75,419 5

    Multi-class classification

    Name Samples Features Classes ImageSegments 2,310 18 7 Insects 52,848 33 6 Keystroke 20,400 31 51

    Multi-output binary classification

    Name Samples Features Outputs Music 593 72 6

    Multi-output regression

    Name Samples Features Outputs SolarFlare 1,066 10 3"},{"location":"api/overview/#base_4","title":"base","text":"
    • Dataset
    • FileDataset
    • RemoteDataset
    • SyntheticDataset
    "},{"location":"api/overview/#synth","title":"synth","text":"

    Synthetic datasets.

    Each synthetic dataset is a stream generator. The benefit of using a generator is that they do not store the data and each data sample is generated on the fly. Except for a couple of methods, the majority of these methods are infinite data generators.

    Binary classification

    Name Features Agrawal 9 AnomalySine 2 ConceptDriftStream 9 Hyperplane 10 Mixed 4 SEA 3 Sine 2 STAGGER 3

    Regression

    Name Features Friedman 10 FriedmanDrift 10 Mv 10 Planes2D 10

    Multi-class classification

    Name Features Classes LED 7 10 LEDDrift 7 10 RandomRBF 10 2 RandomRBFDrift 10 2 RandomTree 10 2 Waveform 21 3

    Multi-output binary classification

    Name Features Outputs Logical 2 3"},{"location":"api/overview/#drift","title":"drift","text":"

    Concept Drift Detection.

    This module contains concept drift detection methods. The purpose of a drift detector is to raise an alarm if the data distribution changes. A good drift detector method is the one that maximizes the true positives while keeping the number of false positives to a minimum.

    • ADWIN
    • DriftRetrainingClassifier
    • DummyDriftDetector
    • KSWIN
    • PageHinkley
    "},{"location":"api/overview/#binary","title":"binary","text":"

    Drift detection for binary data.

    • DDM
    • EDDM
    • HDDM_A
    • HDDM_W
    "},{"location":"api/overview/#datasets_2","title":"datasets","text":"
    • AirlinePassengers
    • Apple
    • Bitcoin
    • BrentSpotPrice
    • Occupancy
    • RunLog
    • UKCoalEmploy
    "},{"location":"api/overview/#dummy","title":"dummy","text":"

    Dummy estimators.

    This module is here for testing purposes, as well as providing baseline performances.

    • NoChangeClassifier
    • PriorClassifier
    • StatisticRegressor
    "},{"location":"api/overview/#ensemble","title":"ensemble","text":"

    Ensemble learning.

    Broadly speaking, there are two kinds of ensemble approaches. There are those that copy a single model several times and aggregate the predictions of said copies. This includes bagging as well as boosting. Then there are those that are composed of an arbitrary list of models, and can therefore aggregate predictions from different kinds of models.

    • ADWINBaggingClassifier
    • ADWINBoostingClassifier
    • AdaBoostClassifier
    • BOLEClassifier
    • BaggingClassifier
    • BaggingRegressor
    • EWARegressor
    • LeveragingBaggingClassifier
    • SRPClassifier
    • SRPRegressor
    • StackingClassifier
    • VotingClassifier
    "},{"location":"api/overview/#evaluate","title":"evaluate","text":"

    Model evaluation.

    This module provides utilities to evaluate an online model. The goal is to reproduce a real-world scenario with high fidelity. The core function of this module is progressive_val_score, which allows to evaluate a model via progressive validation.

    This module also exposes \"tracks\". A track is a predefined combination of a dataset and one or more metrics. This allows a principled manner to compare models with each other. For instance, the RegressionTrack contains several datasets and metrics to evaluate regression models. There is also a bare Track class to implement a custom track. The benchmarks directory at the root of the River repository uses these tracks.

    Classes

    • BinaryClassificationTrack
    • MultiClassClassificationTrack
    • RegressionTrack
    • Track

    Functions

    • iter_progressive_val_score
    • progressive_val_score
    "},{"location":"api/overview/#facto","title":"facto","text":"

    Factorization machines.

    • FFMClassifier
    • FFMRegressor
    • FMClassifier
    • FMRegressor
    • FwFMClassifier
    • FwFMRegressor
    • HOFMClassifier
    • HOFMRegressor
    "},{"location":"api/overview/#feature_extraction","title":"feature_extraction","text":"

    Feature extraction.

    This module can be used to extract information from raw features. This includes encoding categorical data as well as looking at interactions between existing features. This differs from the preprocessing module, in that the latter's purpose is rather to clean the data so that it may be processed by a particular machine learning algorithm.

    • Agg
    • BagOfWords
    • PolynomialExtender
    • RBFSampler
    • TFIDF
    • TargetAgg
    "},{"location":"api/overview/#feature_selection","title":"feature_selection","text":"

    Feature selection.

    • PoissonInclusion
    • SelectKBest
    • VarianceThreshold
    "},{"location":"api/overview/#forest","title":"forest","text":"

    This module implements forest-based classifiers and regressors.

    • AMFClassifier
    • AMFRegressor
    • ARFClassifier
    • ARFRegressor
    • OXTRegressor
    "},{"location":"api/overview/#imblearn","title":"imblearn","text":"

    Sampling methods.

    • ChebyshevOverSampler
    • ChebyshevUnderSampler
    • HardSamplingClassifier
    • HardSamplingRegressor
    • RandomOverSampler
    • RandomSampler
    • RandomUnderSampler
    "},{"location":"api/overview/#linear_model","title":"linear_model","text":"

    Linear models.

    • ALMAClassifier
    • BayesianLinearRegression
    • LinearRegression
    • LogisticRegression
    • PAClassifier
    • PARegressor
    • Perceptron
    • SoftmaxRegression
    "},{"location":"api/overview/#base_5","title":"base","text":"
    • GLM
    "},{"location":"api/overview/#metrics","title":"metrics","text":"

    Evaluation metrics.

    All the metrics are updated one sample at a time. This way we can track performance of predictive methods over time.

    Note that all metrics have a revert method, enabling them to be wrapped in utils.Rolling. This allows computirng rolling metrics:

    from river import metrics, utils\n\ny_true = [True, False, True, True]\ny_pred = [False, False, True, True]\n\nmetric = utils.Rolling(metrics.Accuracy(), window_size=3)\n\nfor yt, yp in zip(y_true, y_pred):\n    print(metric.update(yt, yp))\n
    Accuracy: 0.00%\nAccuracy: 50.00%\nAccuracy: 66.67%\nAccuracy: 100.00%\n
    • Accuracy
    • AdjustedMutualInfo
    • AdjustedRand
    • BalancedAccuracy
    • ClassificationReport
    • CohenKappa
    • Completeness
    • ConfusionMatrix
    • CrossEntropy
    • F1
    • FBeta
    • FowlkesMallows
    • GeometricMean
    • Homogeneity
    • Jaccard
    • LogLoss
    • MAE
    • MAPE
    • MCC
    • MSE
    • MacroF1
    • MacroFBeta
    • MacroJaccard
    • MacroPrecision
    • MacroRecall
    • MicroF1
    • MicroFBeta
    • MicroJaccard
    • MicroPrecision
    • MicroRecall
    • MultiFBeta
    • MutualInfo
    • NormalizedMutualInfo
    • Precision
    • R2
    • RMSE
    • RMSLE
    • ROCAUC
    • Rand
    • Recall
    • RollingROCAUC
    • SMAPE
    • Silhouette
    • VBeta
    • WeightedF1
    • WeightedFBeta
    • WeightedJaccard
    • WeightedPrecision
    • WeightedRecall
    "},{"location":"api/overview/#base_6","title":"base","text":"
    • BinaryMetric
    • ClassificationMetric
    • Metric
    • Metrics
    • MultiClassMetric
    • RegressionMetric
    • WrapperMetric
    "},{"location":"api/overview/#multioutput","title":"multioutput","text":"

    Metrics for multi-output learning.

    • ExactMatch
    • MacroAverage
    • MicroAverage
    • MultiLabelConfusionMatrix
    • PerOutput
    • SampleAverage
    "},{"location":"api/overview/#base_7","title":"base","text":"
    • MultiOutputClassificationMetric
    • MultiOutputRegressionMetric
    "},{"location":"api/overview/#misc","title":"misc","text":"

    Miscellaneous.

    This module essentially regroups some implementations that have nowhere else to go.

    • SDFT
    • Skyline
    "},{"location":"api/overview/#model_selection","title":"model_selection","text":"

    Model selection.

    This module regroups a variety of methods that may be used for performing model selection. An model selector is provided with a list of models. These are called \"experts\" in the expert learning literature. The model selector's goal is to perform at least as well as the best model. Indeed, initially, the best model is not known. The performance of each model becomes more apparent as time goes by. Different strategies are possible, each one offering a different tradeoff in terms of accuracy and computational performance.

    Model selection can be used for tuning the hyperparameters of a model. This may be done by creating a copy of the model for each set of hyperparameters, and treating each copy as a separate model. The utils.expand_param_grid function can be used for this purpose.

    • BanditClassifier
    • BanditRegressor
    • GreedyRegressor
    • SuccessiveHalvingClassifier
    • SuccessiveHalvingRegressor
    "},{"location":"api/overview/#base_8","title":"base","text":"
    • ModelSelectionClassifier
    • ModelSelectionRegressor
    "},{"location":"api/overview/#multiclass","title":"multiclass","text":"

    Multi-class classification.

    • OneVsOneClassifier
    • OneVsRestClassifier
    • OutputCodeClassifier
    "},{"location":"api/overview/#multioutput_1","title":"multioutput","text":"

    Multi-output models.

    • ClassifierChain
    • MonteCarloClassifierChain
    • MultiClassEncoder
    • ProbabilisticClassifierChain
    • RegressorChain
    "},{"location":"api/overview/#naive_bayes","title":"naive_bayes","text":"

    Naive Bayes algorithms.

    • BernoulliNB
    • ComplementNB
    • GaussianNB
    • MultinomialNB
    "},{"location":"api/overview/#neighbors","title":"neighbors","text":"

    Neighbors-based learning.

    Also known as lazy methods. In these methods, generalisation of the training data is delayed until a query is received.

    • KNNClassifier
    • KNNRegressor
    • LazySearch
    • SWINN
    "},{"location":"api/overview/#neural_net","title":"neural_net","text":"

    Neural networks.

    • MLPRegressor
    "},{"location":"api/overview/#activations","title":"activations","text":"
    • Identity
    • ReLU
    • Sigmoid
    "},{"location":"api/overview/#optim","title":"optim","text":"

    Stochastic optimization.

    • AMSGrad
    • AdaBound
    • AdaDelta
    • AdaGrad
    • AdaMax
    • Adam
    • Averager
    • FTRLProximal
    • Momentum
    • Nadam
    • NesterovMomentum
    • RMSProp
    • SGD
    "},{"location":"api/overview/#base_9","title":"base","text":"
    • Initializer
    • Loss
    • Optimizer
    • Scheduler
    "},{"location":"api/overview/#initializers","title":"initializers","text":"

    Weight initializers.

    • Constant
    • Normal
    • Zeros
    "},{"location":"api/overview/#losses","title":"losses","text":"

    Loss functions.

    Each loss function is intended to work with both single values as well as numpy vectors.

    • Absolute
    • BinaryFocalLoss
    • BinaryLoss
    • Cauchy
    • CrossEntropy
    • EpsilonInsensitiveHinge
    • Hinge
    • Huber
    • Log
    • MultiClassLoss
    • Poisson
    • Quantile
    • RegressionLoss
    • Squared
    "},{"location":"api/overview/#schedulers","title":"schedulers","text":"

    Learning rate schedulers.

    • Constant
    • InverseScaling
    • Optimal
    "},{"location":"api/overview/#preprocessing","title":"preprocessing","text":"

    Feature preprocessing.

    The purpose of this module is to modify an existing set of features so that they can be processed by a machine learning algorithm. This may be done by scaling numeric parts of the data or by one-hot encoding categorical features. The difference with the feature_extraction module is that the latter extracts new information from the data

    • AdaptiveStandardScaler
    • Binarizer
    • FeatureHasher
    • GaussianRandomProjector
    • LDA
    • MaxAbsScaler
    • MinMaxScaler
    • Normalizer
    • OneHotEncoder
    • OrdinalEncoder
    • PredClipper
    • PreviousImputer
    • RobustScaler
    • SparseRandomProjector
    • StandardScaler
    • StatImputer
    • TargetMinMaxScaler
    • TargetStandardScaler
    "},{"location":"api/overview/#proba","title":"proba","text":"

    Probability distributions.

    • Beta
    • Gaussian
    • Multinomial
    • MultivariateGaussian
    "},{"location":"api/overview/#base_10","title":"base","text":"
    • BinaryDistribution
    • ContinuousDistribution
    • DiscreteDistribution
    • Distribution
    "},{"location":"api/overview/#reco","title":"reco","text":"

    Recommender systems module.

    Recommender systems (recsys for short) is a large topic. This module is far from comprehensive. It simply provides models which can contribute towards building a recommender system.

    A typical recommender system is made up of a retrieval phase, followed by a ranking phase. The output of the retrieval phase is a shortlist of the catalogue of items. The items in the shortlist are then usually ranked according to the expected preference the user will have for each item. This module focuses on the ranking phase.

    Models which inherit from the Ranker class have a rank method. This allows sorting a set of items for a given user. Each model also has a learn_one(user, item, y, context) which allows learning user preferences. The y parameter is a reward value, the nature of which depends is specific to each and every recommendation task. Typically the reward is a number or a boolean value. It is up to the user to determine how to translate a user session into training data.

    • Baseline
    • BiasedMF
    • FunkMF
    • RandomNormal
    "},{"location":"api/overview/#base_11","title":"base","text":"
    • Ranker
    "},{"location":"api/overview/#rules","title":"rules","text":"

    Decision rules-based algorithms.

    • AMRules
    "},{"location":"api/overview/#sketch","title":"sketch","text":"

    Data containers and collections for sequential data.

    This module has summary and sketch structures that operate with constrained amounts of memory and processing time.

    • Counter
    • HeavyHitters
    • Histogram
    • Set
    "},{"location":"api/overview/#stats","title":"stats","text":"

    Running statistics

    • AbsMax
    • AutoCorr
    • BayesianMean
    • Count
    • Cov
    • EWMean
    • EWVar
    • Entropy
    • IQR
    • Kurtosis
    • Link
    • MAD
    • Max
    • Mean
    • Min
    • Mode
    • NUnique
    • PeakToPeak
    • PearsonCorr
    • Quantile
    • RollingAbsMax
    • RollingIQR
    • RollingMax
    • RollingMin
    • RollingMode
    • RollingPeakToPeak
    • RollingQuantile
    • SEM
    • Shift
    • Skew
    • Sum
    • Var
    "},{"location":"api/overview/#base_12","title":"base","text":"
    • Bivariate
    • Univariate
    "},{"location":"api/overview/#stream","title":"stream","text":"

    Streaming utilities.

    The module includes tools to iterate over data streams.

    Classes

    • Cache
    • TwitchChatStream
    • TwitterLiveStream

    Functions

    • iter_arff
    • iter_array
    • iter_csv
    • iter_libsvm
    • iter_pandas
    • iter_sklearn_dataset
    • iter_sql
    • shuffle
    • simulate_qa
    "},{"location":"api/overview/#time_series","title":"time_series","text":"

    Time series forecasting.

    Classes

    • ForecastingMetric
    • HoltWinters
    • HorizonAggMetric
    • HorizonMetric
    • SNARIMAX

    Functions

    • evaluate
    • iter_evaluate
    "},{"location":"api/overview/#base_13","title":"base","text":"
    • Forecaster
    "},{"location":"api/overview/#tree","title":"tree","text":"

    This module implements incremental Decision Tree (iDT) algorithms for handling classification and regression tasks.

    Each family of iDT will be presented in a dedicated section.

    At any moment, iDT might face situations where an input feature previously used to make a split decision is missing in an incoming sample. In this case, the most traversed path is selected to pass down the instance. Moreover, in the case of nominal features, if a new category arises and the feature is used in a decision node, a new branch is created to accommodate the new value.

    1. Hoeffding Trees

    This family of iDT algorithms use the Hoeffding Bound to determine whether or not the incrementally computed best split candidates would be equivalent to the ones obtained in a batch-processing fashion.

    All the available Hoeffding Tree (HT) implementation share some common functionalities:

    • Set the maximum tree depth allowed (max_depth).

    • Handle Active and Inactive nodes: Active learning nodes update their own internal state to improve predictions and monitor input features to perform split attempts. Inactive learning nodes do not update their internal state and only keep the predictors; they are used to save memory in the tree (max_size).

    • Enable/disable memory management.

    • Define strategies to sort leaves according to how likely they are going to be split. This enables deactivating non-promising leaves to save memory.

    • Disabling \u2018poor\u2019 attributes to save memory and speed up tree construction. A poor attribute is an input feature whose split merit is much smaller than the current best candidate. Once a feature is disabled, the tree stops saving statistics necessary to split such a feature.

    • Define properties to access leaf prediction strategies, split criteria, and other relevant characteristics.

    2. Stochastic Gradient Trees

    Stochastic Gradient Trees (SGT) directly optimize a loss function, rather than relying on split heuristics to guide the tree growth. F-tests are performed do decide whether a leaf should be expanded or its prediction value should be updated.

    SGTs can deal with binary classification and single-target regression. They also support dynamic and static feature quantizers to deal with numerical inputs.

    • ExtremelyFastDecisionTreeClassifier
    • HoeffdingAdaptiveTreeClassifier
    • HoeffdingAdaptiveTreeRegressor
    • HoeffdingTreeClassifier
    • HoeffdingTreeRegressor
    • SGTClassifier
    • SGTRegressor
    • iSOUPTreeRegressor
    "},{"location":"api/overview/#base_14","title":"base","text":"

    This module defines generic branch and leaf implementations. These should be used in River by each tree-based model. Using these classes makes the code more DRY. The only exception for not doing so would be for performance, whereby a tree-based model uses a bespoke implementation.

    This module defines a bunch of methods to ease the manipulation and diagnostic of trees. Its intention is to provide utilities for walking over a tree and visualizing it.

    • Branch
    • Leaf
    "},{"location":"api/overview/#splitter","title":"splitter","text":"

    This module implements the Attribute Observers (AO) (or tree splitters) that are used by the Hoeffding Trees (HT). It also implements the feature quantizers (FQ) used by Stochastic Gradient Trees (SGT). AOs are a core aspect of the HTs construction, and might represent one of the major bottlenecks when building the trees. The same holds for SGTs and FQs. The correct choice and setup of a splitter might result in significant differences in the running time and memory usage of the incremental decision trees.

    AOs for classification and regression trees can be differentiated by using the property is_target_class (True for splitters designed to classification tasks). An error will be raised if one tries to use a classification splitter in a regression tree and vice-versa. Lastly, AOs cannot be used in SGT and FQs cannot be used in Hoeffding Trees. So, care must be taken when choosing the correct feature splitter.

    • DynamicQuantizer
    • EBSTSplitter
    • ExhaustiveSplitter
    • GaussianSplitter
    • HistogramSplitter
    • QOSplitter
    • Quantizer
    • Splitter
    • StaticQuantizer
    • TEBSTSplitter
    "},{"location":"api/overview/#utils","title":"utils","text":"

    Shared utility classes and functions

    Classes

    • Rolling
    • SortedWindow
    • TimeRolling
    • VectorDict

    Functions

    • dict2numpy
    • expand_param_grid
    • log_method_calls
    • numpy2dict
    "},{"location":"api/overview/#math","title":"math","text":"

    Mathematical utility functions (intended for internal purposes).

    A lot of this is experimental and has a high probability of changing in the future.

    • argmax
    • chain_dot
    • clamp
    • dot
    • dotvecmat
    • log_sum_2_exp
    • matmul2d
    • minkowski_distance
    • norm
    • outer
    • prod
    • sherman_morrison
    • sigmoid
    • sign
    • softmax
    • woodbury_matrix
    "},{"location":"api/overview/#norm","title":"norm","text":"
    • normalize_values_in_dict
    • scale_values_in_dict
    "},{"location":"api/overview/#pretty","title":"pretty","text":"

    Helper functions for making things readable by humans.

    • humanize_bytes
    • print_table
    "},{"location":"api/overview/#random","title":"random","text":"
    • exponential
    • poisson
    "},{"location":"api/active/EntropySampler/","title":"EntropySampler","text":"

    Active learning classifier based on entropy measures.

    The entropy sampler selects samples for labeling based on the entropy of the prediction. The higher the entropy, the more likely the sample will be selected for labeling. The entropy measure is normalized to [0, 1] and then raised to the power of the discount factor.

    "},{"location":"api/active/EntropySampler/#parameters","title":"Parameters","text":"
    • classifier

      Type \u2192 base.Classifier

      The classifier to wrap.

    • discount_factor

      Type \u2192 float

      Default \u2192 3

      The discount factor to apply to the entropy measure. A value of 1 won't affect the entropy. The higher the discount factor, the more the entropy will be discounted, and the less likely samples will be selected for labeling. A value of 0 will select all samples for labeling. The discount factor is thus a way to control how many samples are selected for labeling.

    • seed

      Default \u2192 None

      Random number generator seed for reproducibility.

    "},{"location":"api/active/EntropySampler/#examples","title":"Examples","text":"

    from river import active\nfrom river import datasets\nfrom river import feature_extraction\nfrom river import linear_model\nfrom river import metrics\n\ndataset = datasets.SMSSpam()\nmetric = metrics.Accuracy()\nmodel = (\n    feature_extraction.TFIDF(on='body') |\n    linear_model.LogisticRegression()\n)\nmodel = active.EntropySampler(model, seed=42)\n\nn_samples_used = 0\nfor x, y in dataset:\n    y_pred, ask = model.predict_one(x)\n    metric = metric.update(y, y_pred)\n    if ask:\n        n_samples_used += 1\n        model = model.learn_one(x, y)\n\nmetric\n
    Accuracy: 86.60%\n

    dataset.n_samples, n_samples_used\n
    (5574, 1921)\n

    print(f\"{n_samples_used / dataset.n_samples:.2%}\")\n
    34.46%\n

    "},{"location":"api/active/EntropySampler/#methods","title":"Methods","text":"learn_one

    Update the model with a set of features x and a label y.

    Parameters

    • x
    • y
    • kwargs

    Returns

    self

    predict_one

    Predict the label of x and indicate whether a label is needed.

    Parameters

    • x
    • kwargs

    Returns

    The predicted label.

    predict_proba_one

    Predict the probability of each label for x and indicate whether a label is needed.

    Parameters

    • x
    • kwargs

    Returns

    A dictionary that associates a probability which each label.

    "},{"location":"api/active/base/ActiveLearningClassifier/","title":"ActiveLearningClassifier","text":"

    Base class for active learning classifiers.

    "},{"location":"api/active/base/ActiveLearningClassifier/#parameters","title":"Parameters","text":"
    • classifier

      Type \u2192 base.Classifier

      The classifier to wrap.

    • seed

      Type \u2192 int | None

      Default \u2192 None

      Random number generator seed for reproducibility.

    "},{"location":"api/active/base/ActiveLearningClassifier/#methods","title":"Methods","text":"learn_one

    Update the model with a set of features x and a label y.

    Parameters

    • x
    • y
    • kwargs

    Returns

    self

    predict_one

    Predict the label of x and indicate whether a label is needed.

    Parameters

    • x
    • kwargs

    Returns

    The predicted label.

    predict_proba_one

    Predict the probability of each label for x and indicate whether a label is needed.

    Parameters

    • x
    • kwargs

    Returns

    A dictionary that associates a probability which each label.

    "},{"location":"api/anomaly/GaussianScorer/","title":"GaussianScorer","text":"

    Univariate Gaussian anomaly detector.

    This is a supervised anomaly detector. It fits a Gaussian distribution to the target values. The anomaly score is then computed as so:

    \\[score = 2 \\mid CDF(y) - 0.5 \\mid\\]

    This makes it so that the anomaly score is between 0 and 1.

    "},{"location":"api/anomaly/GaussianScorer/#parameters","title":"Parameters","text":"
    • window_size

      Default \u2192 None

      Set this to fit the Gaussian distribution over a window of recent values.

    • grace_period

      Default \u2192 100

      Number of samples before which a 0 is always returned. This is handy because the Gaussian distribution needs time to stabilize, and will likely produce overly high anomaly score for the first samples.

    "},{"location":"api/anomaly/GaussianScorer/#examples","title":"Examples","text":"

    import random\nfrom river import anomaly\n\nrng = random.Random(42)\ndetector = anomaly.GaussianScorer()\n\nfor y in (rng.gauss(0, 1) for _ in range(100)):\n    detector = detector.learn_one(None, y)\n\ndetector.score_one(None, -3)\n
    0.999477...\n

    detector.score_one(None, 3)\n
    0.999153...\n

    detector.score_one(None, 0)\n
    0.052665...\n

    detector.score_one(None, 0.5)\n
    0.383717...\n

    "},{"location":"api/anomaly/GaussianScorer/#methods","title":"Methods","text":"learn_one

    Update the model.

    Parameters

    • x \u2014 'dict'
    • y \u2014 'base.typing.Target'

    Returns

    SupervisedAnomalyDetector: self

    score_one

    Return an outlier score.

    A high score is indicative of an anomaly. A low score corresponds a normal observation.

    Parameters

    • x \u2014 'dict'
    • y \u2014 'base.typing.Target'

    Returns

    float: An anomaly score. A high score is indicative of an anomaly. A low score corresponds a

    "},{"location":"api/anomaly/HalfSpaceTrees/","title":"HalfSpaceTrees","text":"

    Half-Space Trees (HST).

    Half-space trees are an online variant of isolation forests. They work well when anomalies are spread out. However, they do not work well if anomalies are packed together in windows.

    By default, this implementation assumes that each feature has values that are comprised between 0 and 1. If this isn't the case, then you can manually specify the limits via the limits argument. If you do not know the limits in advance, then you can use a preprocessing.MinMaxScaler as an initial preprocessing step.

    The current implementation builds the trees the first time the learn_one method is called. Therefore, the first learn_one call might be slow, whereas subsequent calls will be very fast in comparison. In general, the computation time of both learn_one and score_one scales linearly with the number of trees, and exponentially with the height of each tree.

    Note that high scores indicate anomalies, whereas low scores indicate normal observations.

    "},{"location":"api/anomaly/HalfSpaceTrees/#parameters","title":"Parameters","text":"
    • n_trees

      Default \u2192 10

      Number of trees to use.

    • height

      Default \u2192 8

      Height of each tree. Note that a tree of height h is made up of h + 1 levels and therefore contains 2 ** (h + 1) - 1 nodes.

    • window_size

      Default \u2192 250

      Number of observations to use for calculating the mass at each node in each tree.

    • limits

      Type \u2192 dict[base.typing.FeatureName, tuple[float, float]] | None

      Default \u2192 None

      Specifies the range of each feature. By default each feature is assumed to be in range [0, 1].

    • seed

      Type \u2192 int | None

      Default \u2192 None

      Random number seed.

    "},{"location":"api/anomaly/HalfSpaceTrees/#attributes","title":"Attributes","text":"
    • size_limit

      This is the threshold under which the node search stops during the scoring phase. The value .1 is a magic constant indicated in the original paper.

    "},{"location":"api/anomaly/HalfSpaceTrees/#examples","title":"Examples","text":"

    from river import anomaly\n\nX = [0.5, 0.45, 0.43, 0.44, 0.445, 0.45, 0.0]\nhst = anomaly.HalfSpaceTrees(\n    n_trees=5,\n    height=3,\n    window_size=3,\n    seed=42\n)\n\nfor x in X[:3]:\n    hst = hst.learn_one({'x': x})  # Warming up\n\nfor x in X:\n    features = {'x': x}\n    hst = hst.learn_one(features)\n    print(f'Anomaly score for x={x:.3f}: {hst.score_one(features):.3f}')\n
    Anomaly score for x=0.500: 0.107\nAnomaly score for x=0.450: 0.071\nAnomaly score for x=0.430: 0.107\nAnomaly score for x=0.440: 0.107\nAnomaly score for x=0.445: 0.107\nAnomaly score for x=0.450: 0.071\nAnomaly score for x=0.000: 0.853\n

    The feature values are all comprised between 0 and 1. This is what is assumed by the model by default. In the following example, we construct a pipeline that scales the data online and ensures that the values of each feature are comprised between 0 and 1.

    from river import compose\nfrom river import datasets\nfrom river import metrics\nfrom river import preprocessing\n\nmodel = compose.Pipeline(\n    preprocessing.MinMaxScaler(),\n    anomaly.HalfSpaceTrees(seed=42)\n)\n\nauc = metrics.ROCAUC()\n\nfor x, y in datasets.CreditCard().take(2500):\n    score = model.score_one(x)\n    model = model.learn_one(x)\n    auc = auc.update(y, score)\n\nauc\n
    ROCAUC: 91.15%\n

    You can also use the evaluate.progressive_val_score function to evaluate the model on a data stream.

    from river import evaluate\n\nmodel = model.clone()\n\nevaluate.progressive_val_score(\n    dataset=datasets.CreditCard().take(2500),\n    model=model,\n    metric=metrics.ROCAUC(),\n    print_every=1000\n)\n
    [1,000] ROCAUC: 88.43%\n[2,000] ROCAUC: 89.28%\n[2,500] ROCAUC: 91.15%\nROCAUC: 91.15%\n

    "},{"location":"api/anomaly/HalfSpaceTrees/#methods","title":"Methods","text":"learn_one

    Update the model.

    Parameters

    • x \u2014 'dict'

    Returns

    AnomalyDetector: self

    score_one

    Return an outlier score.

    A high score is indicative of an anomaly. A low score corresponds to a normal observation.

    Parameters

    • x \u2014 'dict'

    Returns

    float: An anomaly score. A high score is indicative of an anomaly. A low score corresponds a

    1. Tan, S.C., Ting, K.M. and Liu, T.F., 2011, June. Fast anomaly detection for streaming data. In Twenty-Second International Joint Conference on Artificial Intelligence. \u21a9

    "},{"location":"api/anomaly/LocalOutlierFactor/","title":"LocalOutlierFactor","text":"

    Incremental Local Outlier Factor (Incremental LOF).

    The Incremental Local Outlier Factor (ILOF) is an online version of the Local Outlier Factor (LOF), proposed by Pokrajac et al. (2017), and is used to identify outliers based on density of local neighbors.

    The algorithm take into account the following elements: - NewPoints: new points;

    - `kNN(p)`: the k-nearest neighboors of `p` (the k-closest points to `p`);\n\n- `RkNN(p)`: the reverse-k-nearest neighboors of `p` (points that have `p` as one of their neighboors);\n\n- `set_upd_lrd`: Set of points that need to have the local reachability distance updated;\n\n- `set_upd_lof`: Set of points that need to have the local outlier factor updated.\n

    This current implementation within River, based on the original one in the paper, follows the following steps: 1) Insert new data points (NewPoints) and calculate its distance to existing points; 2) Update the nreaest neighboors and reverse nearest neighboors of all the points; 3) Define sets of affected points that required updates; 4) Calculate the reachability-distance from new point to neighboors (NewPoints -> kNN(NewPoints)) and from rev-neighboors to new point (RkNN(NewPoints) -> NewPoints); 5) Update the reachability-distance for affected points: RkNN(RkNN(NewPoints)) -> RkNN(NewPoints) 6) Update local reachability distance of affected points: lrd(set_upd_lrd); 7) Update local outlier factor: lof(set_upd_lof).

    The incremental LOF algorithm is expected to provide equivalent detection performance as the iterated static LOF algroithm (applied after insertion of each data record), while requiring significantly less computational time. Moreover, the insertion of a new data point as well as deletion of an old data point influence only a limited number of their closest neighbors, which means that the number of updates per such insertion/deletion does not depend on the total number of instances learned/in the data set.

    "},{"location":"api/anomaly/LocalOutlierFactor/#parameters","title":"Parameters","text":"
    • n_neighbors

      Type \u2192 int

      Default \u2192 10

      The number of nearest neighbors to use for density estimation.

    • distance_func

      Type \u2192 DistanceFunc

      Default \u2192 None

      Distance function to be used. By default, the Euclidean distance is used.

    "},{"location":"api/anomaly/LocalOutlierFactor/#attributes","title":"Attributes","text":"
    • x_list

      A list of stored observations.

    • x_batch

      A buffer to hold incoming observations until it's time to update the model.

    • x_scores

      A buffer to hold incoming observations until it's time to score them.

    • dist_dict

      A dictionary to hold distances between observations.

    • neighborhoods

      A dictionary to hold neighborhoods for each observation.

    • rev_neighborhoods

      A dictionary to hold reverse neighborhoods for each observation.

    • k_dist

      A dictionary to hold k-distances for each observation.

    • reach_dist

      A dictionary to hold reachability distances for each observation.

    • lof

      A dictionary to hold Local Outlier Factors for each observation.

    • local_reach

      A dictionary to hold local reachability distances for each observation.

    "},{"location":"api/anomaly/LocalOutlierFactor/#examples","title":"Examples","text":"

    import pandas as pd\nfrom river import anomaly\nfrom river import datasets\n\ncc_df = pd.DataFrame(datasets.CreditCard())\n\nlof = anomaly.LocalOutlierFactor(n_neighbors=20)\n\nfor x, _ in datasets.CreditCard().take(200):\n    lof.learn_one(x)\n\nlof.learn_many(cc_df[201:401])\n\nscores = []\nfor x in cc_df[0][401:406]:\n    scores.append(lof.score_one(x))\n\n[round(score, 3) for score in scores]\n
    [1.802, 1.937, 1.567, 1.181, 1.28]\n

    "},{"location":"api/anomaly/LocalOutlierFactor/#methods","title":"Methods","text":"learn learn_many learn_one

    Update the model.

    Parameters

    • x \u2014 'dict'

    Returns

    AnomalyDetector: self

    score_one

    Return an outlier score.

    A high score is indicative of an anomaly. A low score corresponds to a normal observation.

    Parameters

    • x \u2014 'dict'

    Returns

    float: An anomaly score. A high score is indicative of an anomaly. A low score corresponds a

    David Pokrajac, Aleksandar Lazarevic, and Longin Jan Latecki (2007). Incremental Local Outlier Detection for Data Streams. In: Proceedings of the 2007 IEEE Symposium on Computational Intelligence and Data Mining (CIDM 2007). 504-515. DOI: 10.1109/CIDM.2007.368917.

    "},{"location":"api/anomaly/OneClassSVM/","title":"OneClassSVM","text":"

    One-class SVM for anomaly detection.

    This is a stochastic implementation of the one-class SVM algorithm, and will not exactly match its batch formulation.

    It is encouraged to scale the data upstream with preprocessing.StandardScaler, as well as use feature_extraction.RBFSampler to capture non-linearities.

    "},{"location":"api/anomaly/OneClassSVM/#parameters","title":"Parameters","text":"
    • nu

      Default \u2192 0.1

      An upper bound on the fraction of training errors and a lower bound of the fraction of support vectors. You can think of it as the expected fraction of anomalies.

    • optimizer

      Type \u2192 optim.base.Optimizer | None

      Default \u2192 None

      The sequential optimizer used for updating the weights.

    • intercept_lr

      Type \u2192 optim.base.Scheduler | float

      Default \u2192 0.01

      Learning rate scheduler used for updating the intercept. A optim.schedulers.Constant is used if a float is provided. The intercept is not updated when this is set to 0.

    • clip_gradient

      Default \u2192 1000000000000.0

      Clips the absolute value of each gradient value.

    • initializer

      Type \u2192 optim.base.Initializer | None

      Default \u2192 None

      Weights initialization scheme.

    "},{"location":"api/anomaly/OneClassSVM/#attributes","title":"Attributes","text":"
    • weights
    "},{"location":"api/anomaly/OneClassSVM/#examples","title":"Examples","text":"

    from river import anomaly\nfrom river import compose\nfrom river import datasets\nfrom river import metrics\nfrom river import preprocessing\n\nmodel = anomaly.QuantileFilter(\n    anomaly.OneClassSVM(nu=0.2),\n    q=0.995\n)\n\nauc = metrics.ROCAUC()\n\nfor x, y in datasets.CreditCard().take(2500):\n    score = model.score_one(x)\n    is_anomaly = model.classify(score)\n    model = model.learn_one(x)\n    auc = auc.update(y, is_anomaly)\n\nauc\n
    ROCAUC: 74.68%\n

    You can also use the evaluate.progressive_val_score function to evaluate the model on a data stream.

    from river import evaluate\n\nmodel = model.clone()\n\nevaluate.progressive_val_score(\n    dataset=datasets.CreditCard().take(2500),\n    model=model,\n    metric=metrics.ROCAUC(),\n    print_every=1000\n)\n
    [1,000] ROCAUC: 74.40%\n[2,000] ROCAUC: 74.60%\n[2,500] ROCAUC: 74.68%\nROCAUC: 74.68%\n

    "},{"location":"api/anomaly/OneClassSVM/#methods","title":"Methods","text":"learn_many learn_one

    Update the model.

    Parameters

    • x \u2014 'dict'

    Returns

    AnomalyDetector: self

    score_one

    Return an outlier score.

    A high score is indicative of an anomaly. A low score corresponds to a normal observation.

    Parameters

    • x

    Returns

    An anomaly score. A high score is indicative of an anomaly. A low score corresponds a

    "},{"location":"api/anomaly/QuantileFilter/","title":"QuantileFilter","text":"

    Threshold anomaly filter.

    "},{"location":"api/anomaly/QuantileFilter/#parameters","title":"Parameters","text":"
    • anomaly_detector

      An anomaly detector.

    • q

      Type \u2192 float

      The quantile level above which to classify an anomaly score as anomalous.

    • protect_anomaly_detector

      Default \u2192 True

      Indicates whether or not the anomaly detector should be updated when the anomaly score is anomalous. If the data contains sporadic anomalies, then the anomaly detector should likely not be updated. Indeed, if it learns the anomaly score, then it will slowly start to consider anomalous anomaly scores as normal. This might be desirable, for instance in the case of drift.

    "},{"location":"api/anomaly/QuantileFilter/#attributes","title":"Attributes","text":"
    • q
    "},{"location":"api/anomaly/QuantileFilter/#examples","title":"Examples","text":"

    from river import anomaly\nfrom river import compose\nfrom river import datasets\nfrom river import metrics\nfrom river import preprocessing\n\nmodel = compose.Pipeline(\n    preprocessing.MinMaxScaler(),\n    anomaly.QuantileFilter(\n        anomaly.HalfSpaceTrees(seed=42),\n        q=0.95\n    )\n)\n\nreport = metrics.ClassificationReport()\n\nfor x, y in datasets.CreditCard().take(2000):\n    score = model.score_one(x)\n    is_anomaly = model['QuantileFilter'].classify(score)\n    model = model.learn_one(x)\n    report = report.update(y, is_anomaly)\n\nreport\n
                   Precision   Recall   F1       Support\n<BLANKLINE>\n       0      99.95%   94.49%   97.14%      1998\n       1       0.90%   50.00%    1.77%         2\n<BLANKLINE>\n   Macro      50.42%   72.25%   49.46%\n   Micro      94.45%   94.45%   94.45%\nWeighted      99.85%   94.45%   97.05%\n<BLANKLINE>\n                 94.45% accuracy\n

    "},{"location":"api/anomaly/QuantileFilter/#methods","title":"Methods","text":"classify

    Classify an anomaly score as anomalous or not.

    Parameters

    • score \u2014 'float'

    Returns

    bool: A boolean value indicating whether the anomaly score is anomalous or not.

    learn_one

    Update the anomaly filter and the underlying anomaly detector.

    Parameters

    • args
    • learn_kwargs

    Returns

    self

    score_one

    Return an outlier score.

    A high score is indicative of an anomaly. A low score corresponds to a normal observation.

    Parameters

    • args
    • kwargs

    Returns

    An anomaly score. A high score is indicative of an anomaly. A low score corresponds a

    "},{"location":"api/anomaly/ThresholdFilter/","title":"ThresholdFilter","text":"

    Threshold anomaly filter.

    "},{"location":"api/anomaly/ThresholdFilter/#parameters","title":"Parameters","text":"
    • anomaly_detector

      An anomaly detector.

    • threshold

      Type \u2192 float

      A threshold above which to classify an anomaly score as anomalous.

    • protect_anomaly_detector

      Default \u2192 True

      Indicates whether or not the anomaly detector should be updated when the anomaly score is anomalous. If the data contains sporadic anomalies, then the anomaly detector should likely not be updated. Indeed, if it learns the anomaly score, then it will slowly start to consider anomalous anomaly scores as normal. This might be desirable, for instance in the case of drift.

    "},{"location":"api/anomaly/ThresholdFilter/#examples","title":"Examples","text":"

    Anomaly filters can be used as part of a pipeline. For instance, we might want to filter out anomalous observations so as not to corrupt a supervised model. As an example, let's take the datasets.WaterFlow dataset. Some of the samples have anomalous target variables because of human interventions. We don't want our model to learn these values.

    from river import datasets\nfrom river import metrics\nfrom river import time_series\n\ndataset = datasets.WaterFlow()\nmetric = metrics.SMAPE()\n\nperiod = 24  # 24 samples per day\n\nmodel = (\n    anomaly.ThresholdFilter(\n        anomaly.GaussianScorer(\n            window_size=period * 7,  # 7 days\n            grace_period=30\n        ),\n        threshold=0.995\n    ) |\n    time_series.HoltWinters(\n        alpha=0.3,\n        beta=0.1,\n        multiplicative=False\n    )\n)\n\ntime_series.evaluate(\n    dataset,\n    model,\n    metric,\n    horizon=period\n)\n
    +1  SMAPE: 4.220171\n+2  SMAPE: 4.322648\n+3  SMAPE: 4.418546\n+4  SMAPE: 4.504986\n+5  SMAPE: 4.57924\n+6  SMAPE: 4.64123\n+7  SMAPE: 4.694042\n+8  SMAPE: 4.740753\n+9  SMAPE: 4.777291\n+10 SMAPE: 4.804558\n+11 SMAPE: 4.828114\n+12 SMAPE: 4.849823\n+13 SMAPE: 4.865871\n+14 SMAPE: 4.871972\n+15 SMAPE: 4.866274\n+16 SMAPE: 4.842614\n+17 SMAPE: 4.806214\n+18 SMAPE: 4.763355\n+19 SMAPE: 4.713455\n+20 SMAPE: 4.672062\n+21 SMAPE: 4.659102\n+22 SMAPE: 4.693496\n+23 SMAPE: 4.773707\n+24 SMAPE: 4.880654\n

    "},{"location":"api/anomaly/ThresholdFilter/#methods","title":"Methods","text":"classify

    Classify an anomaly score as anomalous or not.

    Parameters

    • score \u2014 'float'

    Returns

    bool: A boolean value indicating whether the anomaly score is anomalous or not.

    learn_one

    Update the anomaly filter and the underlying anomaly detector.

    Parameters

    • args
    • learn_kwargs

    Returns

    self

    score_one

    Return an outlier score.

    A high score is indicative of an anomaly. A low score corresponds to a normal observation.

    Parameters

    • args
    • kwargs

    Returns

    An anomaly score. A high score is indicative of an anomaly. A low score corresponds a

    "},{"location":"api/anomaly/base/AnomalyDetector/","title":"AnomalyDetector","text":"

    An anomaly detector.

    "},{"location":"api/anomaly/base/AnomalyDetector/#methods","title":"Methods","text":"learn_one

    Update the model.

    Parameters

    • x \u2014 'dict'

    Returns

    AnomalyDetector: self

    score_one

    Return an outlier score.

    A high score is indicative of an anomaly. A low score corresponds to a normal observation.

    Parameters

    • x \u2014 'dict'

    Returns

    float: An anomaly score. A high score is indicative of an anomaly. A low score corresponds a

    "},{"location":"api/anomaly/base/AnomalyFilter/","title":"AnomalyFilter","text":"

    Anomaly filter base class.

    An anomaly filter has the ability to classify an anomaly score as anomalous or not. It can then be used to filter anomalies, in particular as part of a pipeline.

    "},{"location":"api/anomaly/base/AnomalyFilter/#parameters","title":"Parameters","text":"
    • anomaly_detector

      Type \u2192 AnomalyDetector

      An anomaly detector wrapped by the anomaly filter.

    • protect_anomaly_detector

      Default \u2192 True

      Indicates whether or not the anomaly detector should be updated when the anomaly score is anomalous. If the data contains sporadic anomalies, then the anomaly detector should likely not be updated. Indeed, if it learns the anomaly score, then it will slowly start to consider anomalous anomaly scores as normal. This might be desirable, for instance in the case of drift.

    "},{"location":"api/anomaly/base/AnomalyFilter/#methods","title":"Methods","text":"classify

    Classify an anomaly score as anomalous or not.

    Parameters

    • score \u2014 'float'

    Returns

    bool: A boolean value indicating whether the anomaly score is anomalous or not.

    learn_one

    Update the anomaly filter and the underlying anomaly detector.

    Parameters

    • args
    • learn_kwargs

    Returns

    self

    score_one

    Return an outlier score.

    A high score is indicative of an anomaly. A low score corresponds to a normal observation.

    Parameters

    • args
    • kwargs

    Returns

    An anomaly score. A high score is indicative of an anomaly. A low score corresponds a

    "},{"location":"api/anomaly/base/SupervisedAnomalyDetector/","title":"SupervisedAnomalyDetector","text":"

    A supervised anomaly detector.

    "},{"location":"api/anomaly/base/SupervisedAnomalyDetector/#methods","title":"Methods","text":"learn_one

    Update the model.

    Parameters

    • x \u2014 'dict'
    • y \u2014 'base.typing.Target'

    Returns

    SupervisedAnomalyDetector: self

    score_one

    Return an outlier score.

    A high score is indicative of an anomaly. A low score corresponds a normal observation.

    Parameters

    • x \u2014 'dict'
    • y \u2014 'base.typing.Target'

    Returns

    float: An anomaly score. A high score is indicative of an anomaly. A low score corresponds a

    "},{"location":"api/bandit/BayesUCB/","title":"BayesUCB","text":"

    Bayes-UCB bandit policy.

    Bayes-UCB is a Bayesian algorithm for the multi-armed bandit problem. It uses the posterior distribution of the reward of each arm to compute an upper confidence bound (UCB) on the expected reward of each arm. The arm with the highest UCB is then pulled. The posterior distribution is updated after each pull. The algorithm is described in [^1].

    "},{"location":"api/bandit/BayesUCB/#parameters","title":"Parameters","text":"
    • reward_obj

      Default \u2192 None

      The reward object that is used to update the posterior distribution.

    • burn_in

      Default \u2192 0

      Number of initial observations per arm before using the posterior distribution.

    • seed

      Type \u2192 int | None

      Default \u2192 None

      Random number generator seed for reproducibility.

    "},{"location":"api/bandit/BayesUCB/#attributes","title":"Attributes","text":"
    • ranking

      Return the list of arms in descending order of performance.

    "},{"location":"api/bandit/BayesUCB/#examples","title":"Examples","text":"

    import gym\nfrom river import bandit\nfrom river import proba\nfrom river import stats\n\nenv = gym.make(\n    'river_bandits/CandyCaneContest-v0'\n)\n_ = env.reset(seed=42)\n_ = env.action_space.seed(123)\n\npolicy = bandit.BayesUCB(seed=123)\n\nmetric = stats.Sum()\nwhile True:\n    action = policy.pull(range(env.action_space.n))\n    observation, reward, terminated, truncated, info = env.step(action)\n    policy = policy.update(action, reward)\n    metric = metric.update(reward)\n    if terminated or truncated:\n        break\n\nmetric\n
    Sum: 841.\n

    "},{"location":"api/bandit/BayesUCB/#methods","title":"Methods","text":"compute_index

    the p-th quantile of the beta distribution for the arm

    Parameters

    • arm_id

    pull

    Pull arm(s).

    This method is a generator that yields the arm(s) that should be pulled. During the burn-in phase, all the arms that have not been pulled enough times are yielded. Once the burn-in phase is over, the policy is allowed to choose the arm(s) that should be pulled. If you only want to pull one arm at a time during the burn-in phase, simply call next(policy.pull(arms)).

    Parameters

    • arm_ids \u2014 'list[ArmID]'

    Returns

    ArmID: A single arm.

    update

    Rewrite update function

    Parameters

    • arm_id
    • reward_args
    • reward_kwargs

    "},{"location":"api/bandit/EpsilonGreedy/","title":"EpsilonGreedy","text":"

    \\(\\varepsilon\\)-greedy bandit policy.

    Performs arm selection by using an \\(\\varepsilon\\)-greedy bandit strategy. An arm is selected at each step. The best arm is selected (1 - \\(\\varepsilon\\))% of the time.

    Selection bias is a common problem when using bandits. This bias can be mitigated by using burn-in phase. Each model is given the chance to learn during the first burn_in steps.

    "},{"location":"api/bandit/EpsilonGreedy/#parameters","title":"Parameters","text":"
    • epsilon

      Type \u2192 float

      The probability of exploring.

    • decay

      Default \u2192 0.0

      The decay rate of epsilon.

    • reward_obj

      Default \u2192 None

      The reward object used to measure the performance of each arm. This can be a metric, a statistic, or a distribution.

    • burn_in

      Default \u2192 0

      The number of steps to use for the burn-in phase. Each arm is given the chance to be pulled during the burn-in phase. This is useful to mitigate selection bias.

    • seed

      Type \u2192 int | None

      Default \u2192 None

      Random number generator seed for reproducibility.

    "},{"location":"api/bandit/EpsilonGreedy/#attributes","title":"Attributes","text":"
    • current_epsilon

      The value of epsilon after factoring in the decay rate.

    • ranking

      Return the list of arms in descending order of performance.

    "},{"location":"api/bandit/EpsilonGreedy/#examples","title":"Examples","text":"

    import gym\nfrom river import bandit\nfrom river import stats\n\nenv = gym.make(\n    'river_bandits/CandyCaneContest-v0'\n)\n_ = env.reset(seed=42)\n_ = env.action_space.seed(123)\n\npolicy = bandit.EpsilonGreedy(epsilon=0.9, seed=101)\n\nmetric = stats.Sum()\nwhile True:\n    arm = policy.pull(range(env.action_space.n))\n    observation, reward, terminated, truncated, info = env.step(arm)\n    policy = policy.update(arm, reward)\n    metric = metric.update(reward)\n    if terminated or truncated:\n        break\n\nmetric\n
    Sum: 775.\n

    "},{"location":"api/bandit/EpsilonGreedy/#methods","title":"Methods","text":"pull

    Pull arm(s).

    This method is a generator that yields the arm(s) that should be pulled. During the burn-in phase, all the arms that have not been pulled enough times are yielded. Once the burn-in phase is over, the policy is allowed to choose the arm(s) that should be pulled. If you only want to pull one arm at a time during the burn-in phase, simply call next(policy.pull(arms)).

    Parameters

    • arm_ids \u2014 'list[ArmID]'

    Returns

    ArmID: A single arm.

    update

    Update an arm's state.

    Parameters

    • arm_id
    • reward_args
    • reward_kwargs

    1. \u03b5-Greedy Algorithm - The Multi-Armed Bandit Problem and Its Solutions - Lilian Weng \u21a9

    "},{"location":"api/bandit/Exp3/","title":"Exp3","text":"

    Exp3 bandit policy.

    This policy works by maintaining a weight for each arm. These weights are used to randomly decide which arm to pull. The weights are increased or decreased, depending on the reward. An egalitarianism factor \\(\\gamma \\in [0, 1]\\) is included, to tune the desire to pick an arm uniformly at random. That is, if \\(\\gamma = 1\\), the arms are picked uniformly at random.

    "},{"location":"api/bandit/Exp3/#parameters","title":"Parameters","text":"
    • gamma

      Type \u2192 float

      The egalitarianism factor. Setting this to 0 leads to what is called the EXP3 policy.

    • reward_obj

      Default \u2192 None

      The reward object used to measure the performance of each arm. This can be a metric, a statistic, or a distribution.

    • reward_scaler

      Default \u2192 None

      A reward scaler used to scale the rewards before they are fed to the reward object. This can be useful to scale the rewards to a (0, 1) range for instance.

    • burn_in

      Default \u2192 0

      The number of steps to use for the burn-in phase. Each arm is given the chance to be pulled during the burn-in phase. This is useful to mitigate selection bias.

    • seed

      Type \u2192 int | None

      Default \u2192 None

      Random number generator seed for reproducibility.

    "},{"location":"api/bandit/Exp3/#attributes","title":"Attributes","text":"
    • ranking

      Return the list of arms in descending order of performance.

    "},{"location":"api/bandit/Exp3/#examples","title":"Examples","text":"

    import gym\nfrom river import bandit\nfrom river import proba\nfrom river import stats\n\nenv = gym.make(\n    'river_bandits/CandyCaneContest-v0'\n)\n_ = env.reset(seed=42)\n_ = env.action_space.seed(123)\n\npolicy = bandit.Exp3(gamma=0.5, seed=42)\n\nmetric = stats.Sum()\nwhile True:\n    action = policy.pull(range(env.action_space.n))\n    observation, reward, terminated, truncated, info = env.step(action)\n    policy = policy.update(action, reward)\n    metric = metric.update(reward)\n    if terminated or truncated:\n        break\n\nmetric\n
    Sum: 799.\n

    "},{"location":"api/bandit/Exp3/#methods","title":"Methods","text":"pull

    Pull arm(s).

    This method is a generator that yields the arm(s) that should be pulled. During the burn-in phase, all the arms that have not been pulled enough times are yielded. Once the burn-in phase is over, the policy is allowed to choose the arm(s) that should be pulled. If you only want to pull one arm at a time during the burn-in phase, simply call next(policy.pull(arms)).

    Parameters

    • arm_ids \u2014 'list[ArmID]'

    Returns

    ArmID: A single arm.

    update

    Update an arm's state.

    Parameters

    • arm_id
    • reward_args
    • reward_kwargs

    1. Auer, P., Cesa-Bianchi, N., Freund, Y. and Schapire, R.E., 2002. The nonstochastic multiarmed bandit problem. SIAM journal on computing, 32(1), pp.48-77. \u21a9

    2. Adversarial Bandits and the Exp3 Algorithm \u2014 Jeremy Kun \u21a9

    "},{"location":"api/bandit/LinUCBDisjoint/","title":"LinUCBDisjoint","text":"

    LinUCB, disjoint variant.

    Although it works, as of yet it is too slow to realistically be used in practice.

    The way this works is that each arm is assigned a linear_model.BayesianLinearRegression instance. This instance is updated every time the arm is pulled. The context is used as features for the regression. The reward is used as the target. The posterior distribution is used to compute the upper confidence bound. The arm with the highest upper confidence bound is pulled.

    "},{"location":"api/bandit/LinUCBDisjoint/#parameters","title":"Parameters","text":"
    • alpha

      Type \u2192 float

      Default \u2192 1.0

      Parameter used in each Bayesian linear regression.

    • beta

      Type \u2192 float

      Default \u2192 1.0

      Parameter used in each Bayesian linear regression.

    • smoothing

      Type \u2192 float | None

      Default \u2192 None

      Parameter used in each Bayesian linear regression.

    • reward_obj

      Default \u2192 None

      The reward object used to measure the performance of each arm.

    • burn_in

      Default \u2192 0

      The number of time steps during which each arm is pulled once.

    • seed

      Type \u2192 int | None

      Default \u2192 None

      Random number generator seed for reproducibility.

    "},{"location":"api/bandit/LinUCBDisjoint/#attributes","title":"Attributes","text":"
    • ranking

      Return the list of arms in descending order of performance.

    "},{"location":"api/bandit/LinUCBDisjoint/#methods","title":"Methods","text":"pull

    Pull arm(s).

    This method is a generator that yields the arm(s) that should be pulled. During the burn-in phase, all the arms that have not been pulled enough times are yielded. Once the burn-in phase is over, the policy is allowed to choose the arm(s) that should be pulled. If you only want to pull one arm at a time during the burn-in phase, simply call next(policy.pull(arms)).

    Parameters

    • arm_ids \u2014 'list[ArmID]'
    • context \u2014 'dict' \u2014 defaults to None

    Returns

    ArmID: A single arm.

    update

    Rewrite update function

    Parameters

    • arm_id
    • context
    • reward_args
    • reward_kwargs

    1. A Contextual-Bandit Approach to Personalized News Article Recommendation [^2:] Contextual Bandits Analysis of LinUCB Disjoint Algorithm with Dataset \u21a9

    "},{"location":"api/bandit/RandomPolicy/","title":"RandomPolicy","text":"

    Random bandit policy.

    This policy simply pulls a random arm at each time step. It is useful as a baseline.

    "},{"location":"api/bandit/RandomPolicy/#parameters","title":"Parameters","text":"
    • reward_obj

      Default \u2192 None

      The reward object that is used to update the posterior distribution.

    • burn_in

      Default \u2192 0

      Number of initial observations per arm before using the posterior distribution.

    • seed

      Type \u2192 int | None

      Default \u2192 None

      Random number generator seed for reproducibility.

    "},{"location":"api/bandit/RandomPolicy/#attributes","title":"Attributes","text":"
    • ranking

      Return the list of arms in descending order of performance.

    "},{"location":"api/bandit/RandomPolicy/#examples","title":"Examples","text":"

    import gym\nfrom river import bandit\nfrom river import proba\nfrom river import stats\n\nenv = gym.make(\n    'river_bandits/CandyCaneContest-v0'\n)\n_ = env.reset(seed=42)\n_ = env.action_space.seed(123)\n\npolicy = bandit.RandomPolicy(seed=123)\n\nmetric = stats.Sum()\nwhile True:\n    action = policy.pull(range(env.action_space.n))\n    observation, reward, terminated, truncated, info = env.step(action)\n    policy = policy.update(action, reward)\n    metric = metric.update(reward)\n    if terminated or truncated:\n        break\n\nmetric\n
    Sum: 755.\n

    "},{"location":"api/bandit/RandomPolicy/#methods","title":"Methods","text":"pull

    Pull arm(s).

    This method is a generator that yields the arm(s) that should be pulled. During the burn-in phase, all the arms that have not been pulled enough times are yielded. Once the burn-in phase is over, the policy is allowed to choose the arm(s) that should be pulled. If you only want to pull one arm at a time during the burn-in phase, simply call next(policy.pull(arms)).

    Parameters

    • arm_ids \u2014 'list[ArmID]'

    Returns

    ArmID: A single arm.

    update

    Update an arm's state.

    Parameters

    • arm_id
    • reward_args
    • reward_kwargs

    "},{"location":"api/bandit/ThompsonSampling/","title":"ThompsonSampling","text":"

    Thompson sampling.

    Thompson sampling is often used with a Beta distribution. However, any probability distribution can be used, as long it makes sense with the reward shape. For instance, a Beta distribution is meant to be used with binary rewards, while a Gaussian distribution is meant to be used with continuous rewards.

    The randomness of a distribution is controlled by its seed. The seed should not set within the distribution, but should rather be defined in the policy parametrization. In other words, you should do this:

    policy = ThompsonSampling(dist=proba.Beta(1, 1), seed=42) \n

    and not this:

    policy = ThompsonSampling(dist=proba.Beta(1, 1, seed=42)) \n
    "},{"location":"api/bandit/ThompsonSampling/#parameters","title":"Parameters","text":"
    • reward_obj

      Type \u2192 proba.base.Distribution

      Default \u2192 None

      A distribution to sample from.

    • burn_in

      Default \u2192 0

      The number of steps to use for the burn-in phase. Each arm is given the chance to be pulled during the burn-in phase. This is useful to mitigate selection bias.

    • seed

      Type \u2192 int | None

      Default \u2192 None

      Random number generator seed for reproducibility.

    "},{"location":"api/bandit/ThompsonSampling/#attributes","title":"Attributes","text":"
    • ranking

      Return the list of arms in descending order of performance.

    "},{"location":"api/bandit/ThompsonSampling/#examples","title":"Examples","text":"

    import gym\nfrom river import bandit\nfrom river import proba\nfrom river import stats\n\nenv = gym.make(\n    'river_bandits/CandyCaneContest-v0'\n)\n_ = env.reset(seed=42)\n_ = env.action_space.seed(123)\n\npolicy = bandit.ThompsonSampling(reward_obj=proba.Beta(), seed=101)\n\nmetric = stats.Sum()\nwhile True:\n    arm = policy.pull(range(env.action_space.n))\n    observation, reward, terminated, truncated, info = env.step(arm)\n    policy = policy.update(arm, reward)\n    metric = metric.update(reward)\n    if terminated or truncated:\n        break\n\nmetric\n
    Sum: 820.\n

    "},{"location":"api/bandit/ThompsonSampling/#methods","title":"Methods","text":"pull

    Pull arm(s).

    This method is a generator that yields the arm(s) that should be pulled. During the burn-in phase, all the arms that have not been pulled enough times are yielded. Once the burn-in phase is over, the policy is allowed to choose the arm(s) that should be pulled. If you only want to pull one arm at a time during the burn-in phase, simply call next(policy.pull(arms)).

    Parameters

    • arm_ids \u2014 'list[ArmID]'

    Returns

    ArmID: A single arm.

    update

    Update an arm's state.

    Parameters

    • arm_id
    • reward_args
    • reward_kwargs

    1. An Empirical Evaluation of Thompson Sampling \u21a9

    "},{"location":"api/bandit/UCB/","title":"UCB","text":"

    Upper Confidence Bound (UCB) bandit policy.

    Due to the nature of this algorithm, it's recommended to scale the target so that it exhibits sub-gaussian properties. This can be done by passing a preprocessing.TargetStandardScaler instance to the reward_scaler argument.

    "},{"location":"api/bandit/UCB/#parameters","title":"Parameters","text":"
    • delta

      Type \u2192 float

      The confidence level. Setting this to 1 leads to what is called the UCB1 policy.

    • reward_obj

      Default \u2192 None

      The reward object used to measure the performance of each arm. This can be a metric, a statistic, or a distribution.

    • reward_scaler

      Default \u2192 None

      A reward scaler used to scale the rewards before they are fed to the reward object. This can be useful to scale the rewards to a (0, 1) range for instance.

    • burn_in

      Default \u2192 0

      The number of steps to use for the burn-in phase. Each arm is given the chance to be pulled during the burn-in phase. This is useful to mitigate selection bias.

    • seed

      Type \u2192 int

      Default \u2192 None

      Random number generator seed for reproducibility.

    "},{"location":"api/bandit/UCB/#attributes","title":"Attributes","text":"
    • ranking

      Return the list of arms in descending order of performance.

    "},{"location":"api/bandit/UCB/#examples","title":"Examples","text":"

    import gym\nfrom river import bandit\nfrom river import preprocessing\nfrom river import stats\n\nenv = gym.make(\n    'river_bandits/CandyCaneContest-v0'\n)\n_ = env.reset(seed=42)\n_ = env.action_space.seed(123)\n\npolicy = bandit.UCB(\n    delta=100,\n    reward_scaler=preprocessing.TargetStandardScaler(None),\n    seed=42\n)\n\nmetric = stats.Sum()\nwhile True:\n    arm = policy.pull(range(env.action_space.n))\n    observation, reward, terminated, truncated, info = env.step(arm)\n    policy = policy.update(arm, reward)\n    metric = metric.update(reward)\n    if terminated or truncated:\n        break\n\nmetric\n
    Sum: 744.\n

    "},{"location":"api/bandit/UCB/#methods","title":"Methods","text":"pull

    Pull arm(s).

    This method is a generator that yields the arm(s) that should be pulled. During the burn-in phase, all the arms that have not been pulled enough times are yielded. Once the burn-in phase is over, the policy is allowed to choose the arm(s) that should be pulled. If you only want to pull one arm at a time during the burn-in phase, simply call next(policy.pull(arms)).

    Parameters

    • arm_ids \u2014 'list[ArmID]'

    Returns

    ArmID: A single arm.

    update

    Update an arm's state.

    Parameters

    • arm_id
    • reward_args
    • reward_kwargs

    1. Lai, T. L., & Robbins, H. (1985). Asymptotically efficient adaptive allocation rules. Advances in applied mathematics, 6(1), 4-22. \u21a9

    2. Upper Confidence Bounds - The Multi-Armed Bandit Problem and Its Solutions - Lilian Weng \u21a9

    3. The Upper Confidence Bound Algorithm - Bandit Algorithms \u21a9

    "},{"location":"api/bandit/evaluate-offline/","title":"evaluate_offline","text":"

    Evaluate a policy on historical logs using replay.

    This is a high-level utility function for evaluating a policy using the replay methodology. This methodology is an off-policy evaluation method. It does not require an environment, and is instead data-driven.

    At each step, an arm is pulled from the provided policy. If the arm is the same as the arm that was pulled in the historical data, the reward is used to update the policy. If the arm is different, the reward is ignored. This is the off-policy aspect of the evaluation.

    "},{"location":"api/bandit/evaluate-offline/#parameters","title":"Parameters","text":"
    • policy

      Type \u2192 bandit.base.Policy

      The policy to evaluate.

    • history

      Type \u2192 History | bandit.datasets.BanditDataset

      The history of the bandit problem. This is a generator that yields tuples of the form (arms, context, arm, reward).

    • reward_stat

      Type \u2192 stats.base.Univariate

      Default \u2192 None

      The reward statistic to use. Defaults to stats.Sum.

    "},{"location":"api/bandit/evaluate-offline/#examples","title":"Examples","text":"

    import random\nfrom river import bandit\n\nrng = random.Random(42)\narms = ['A', 'B', 'C']\nclicks = [\n    (\n        arms,\n        # no context\n        None,\n        # random arm\n        rng.choice(arms),\n        # reward\n        rng.random() > 0.5\n    )\n    for _ in range(1000)\n]\n\ntotal_reward, n_samples_used = bandit.evaluate_offline(\n    policy=bandit.EpsilonGreedy(0.1, seed=42),\n    history=clicks,\n)\n\ntotal_reward\n
    Sum: 172.\n

    n_samples_used\n
    321\n

    This also works out of the box with datasets that inherit from river.bandit.BanditDataset.

    news = bandit.datasets.NewsArticles()\ntotal_reward, n_samples_used = bandit.evaluate_offline(\n    policy=bandit.RandomPolicy(seed=42),\n    history=news,\n)\n\ntotal_reward, n_samples_used\n
    (Sum: 105., 1027)\n

    As expected, the policy succeeds in roughly 10% of cases. Indeed, there are 10 arms and 10000 samples, so the expected number of successes is 1000.

    1. Offline Evaluation of Multi-Armed Bandit Algorithms in Python using Replay \u21a9

    2. Unbiased Offline Evaluation of Contextual-bandit-based News Article Recommendation Algorithms \u21a9

    3. Understanding Inverse Propensity Score for Contextual Bandits \u21a9

    "},{"location":"api/bandit/evaluate/","title":"evaluate","text":"

    Benchmark a list of policies on a given Gym environment.

    This is a high-level utility function for benchmarking a list of policies on a given Gym environment. For example, it can be used to populate a pandas.DataFrame with the contents of each step of each episode.

    "},{"location":"api/bandit/evaluate/#parameters","title":"Parameters","text":"
    • policies

      Type \u2192 list[bandit.base.Policy]

      A list of policies to evaluate. The policy will be reset before each episode.

    • env

      Type \u2192 gym.Env

      The Gym environment to use. One copy will be made for each policy at the beginning of each episode.

    • reward_stat

      Type \u2192 stats.base.Univariate | None

      Default \u2192 None

      A univariate statistic to keep track of the rewards. This statistic will be reset before each episode. Note that this is not the same as the reward object used by the policies. It's just a statistic to keep track of each policy's performance. If None, stats.Sum is used.

    • n_episodes

      Type \u2192 int

      Default \u2192 20

      The number of episodes to run.

    • seed

      Type \u2192 int | None

      Default \u2192 None

      Random number generator seed for reproducibility. A random number generator will be used to seed differently the environment before each episode.

    "},{"location":"api/bandit/evaluate/#examples","title":"Examples","text":"

    import gym\nfrom river import bandit\n\ntrace = bandit.evaluate(\n    policies=[\n        bandit.UCB(delta=1, seed=42),\n        bandit.EpsilonGreedy(epsilon=0.1, seed=42),\n    ],\n    env=gym.make(\n        'river_bandits/CandyCaneContest-v0',\n        max_episode_steps=100\n    ),\n    n_episodes=5,\n    seed=42\n)\n\nfor step in trace:\n    print(step)\n    break\n
    {'episode': 0, 'step': 0, 'policy_idx': 0, 'arm': 81, 'reward': 0.0, 'reward_stat': 0.0}\n

    The return type of this function is a generator. Each step of the generator is a dictionary. You can pass the generator to a pandas.DataFrame to get a nice representation of the results.

    import pandas as pd\n\ntrace = bandit.evaluate(\n    policies=[\n        bandit.UCB(delta=1, seed=42),\n        bandit.EpsilonGreedy(epsilon=0.1, seed=42),\n    ],\n    env=gym.make(\n        'river_bandits/CandyCaneContest-v0',\n        max_episode_steps=100\n    ),\n    n_episodes=5,\n    seed=42\n)\n\ntrace_df = pd.DataFrame(trace)\ntrace_df.sample(5, random_state=42)\n
         episode  step  policy_idx  arm  reward  reward_stat\n521        2    60           1   25     0.0         36.0\n737        3    68           1   40     1.0         20.0\n740        3    70           0   58     0.0         36.0\n660        3    30           0   31     1.0         16.0\n411        2     5           1   35     1.0          5.0\n

    The length of the dataframe is the number of policies times the number of episodes times the maximum number of steps per episode.

    len(trace_df)\n
    1000\n

    (\n    trace_df.policy_idx.nunique() *\n    trace_df.episode.nunique() *\n    trace_df.step.nunique()\n)\n
    1000\n

    "},{"location":"api/bandit/base/ContextualPolicy/","title":"ContextualPolicy","text":"

    Contextual bandit policy base class.

    "},{"location":"api/bandit/base/ContextualPolicy/#parameters","title":"Parameters","text":"
    • reward_obj

      Type \u2192 RewardObj | None

      Default \u2192 None

      The reward object used to measure the performance of each arm. This can be a metric, a statistic, or a distribution.

    • reward_scaler

      Type \u2192 compose.TargetTransformRegressor | None

      Default \u2192 None

      A reward scaler used to scale the rewards before they are fed to the reward object. This can be useful to scale the rewards to a (0, 1) range for instance.

    • burn_in

      Default \u2192 0

      The number of steps to use for the burn-in phase. Each arm is given the chance to be pulled during the burn-in phase. This is useful to mitigate selection bias.

    "},{"location":"api/bandit/base/ContextualPolicy/#attributes","title":"Attributes","text":"
    • ranking

      Return the list of arms in descending order of performance.

    "},{"location":"api/bandit/base/ContextualPolicy/#methods","title":"Methods","text":"pull

    Pull arm(s).

    This method is a generator that yields the arm(s) that should be pulled. During the burn-in phase, all the arms that have not been pulled enough times are yielded. Once the burn-in phase is over, the policy is allowed to choose the arm(s) that should be pulled. If you only want to pull one arm at a time during the burn-in phase, simply call next(policy.pull(arms)).

    Parameters

    • arm_ids \u2014 'list[ArmID]'
    • context \u2014 'dict' \u2014 defaults to None

    Returns

    ArmID: A single arm.

    update

    Update an arm's state.

    Parameters

    • arm_id
    • context
    • reward_args
    • reward_kwargs

    "},{"location":"api/bandit/base/Policy/","title":"Policy","text":"

    Bandit policy base class.

    "},{"location":"api/bandit/base/Policy/#parameters","title":"Parameters","text":"
    • reward_obj

      Type \u2192 RewardObj | None

      Default \u2192 None

      The reward object used to measure the performance of each arm. This can be a metric, a statistic, or a distribution.

    • reward_scaler

      Type \u2192 compose.TargetTransformRegressor | None

      Default \u2192 None

      A reward scaler used to scale the rewards before they are fed to the reward object. This can be useful to scale the rewards to a (0, 1) range for instance.

    • burn_in

      Default \u2192 0

      The number of steps to use for the burn-in phase. Each arm is given the chance to be pulled during the burn-in phase. This is useful to mitigate selection bias.

    "},{"location":"api/bandit/base/Policy/#attributes","title":"Attributes","text":"
    • ranking

      Return the list of arms in descending order of performance.

    "},{"location":"api/bandit/base/Policy/#methods","title":"Methods","text":"pull

    Pull arm(s).

    This method is a generator that yields the arm(s) that should be pulled. During the burn-in phase, all the arms that have not been pulled enough times are yielded. Once the burn-in phase is over, the policy is allowed to choose the arm(s) that should be pulled. If you only want to pull one arm at a time during the burn-in phase, simply call next(policy.pull(arms)).

    Parameters

    • arm_ids \u2014 'list[ArmID]'

    Returns

    ArmID: A single arm.

    update

    Update an arm's state.

    Parameters

    • arm_id
    • reward_args
    • reward_kwargs

    "},{"location":"api/bandit/datasets/BanditDataset/","title":"BanditDataset","text":"

    Base class for bandit datasets.

    "},{"location":"api/bandit/datasets/BanditDataset/#parameters","title":"Parameters","text":"
    • n_features

      Number of features in the dataset.

    • n_samples

      Default \u2192 None

      Number of samples in the dataset.

    • n_classes

      Default \u2192 None

      Number of classes in the dataset, only applies to classification datasets.

    • n_outputs

      Default \u2192 None

      Number of outputs the target is made of, only applies to multi-output datasets.

    • sparse

      Default \u2192 False

      Whether the dataset is sparse or not.

    "},{"location":"api/bandit/datasets/BanditDataset/#attributes","title":"Attributes","text":"
    • arms

      The list of arms that can be pulled.

    • desc

      Return the description from the docstring.

    "},{"location":"api/bandit/datasets/BanditDataset/#methods","title":"Methods","text":"take

    Iterate over the k samples.

    Parameters

    • k \u2014 'int'

    "},{"location":"api/bandit/datasets/NewsArticles/","title":"NewsArticles","text":"

    News articles bandit dataset.

    This is a personalization dataset. It contains 10000 observations. There are 10 arms, and the reward is binary. There are 100 features, which turns this into a contextual bandit problem.

    "},{"location":"api/bandit/datasets/NewsArticles/#attributes","title":"Attributes","text":"
    • arms

      The list of arms that can be pulled.

    • desc

      Return the description from the docstring.

    • is_downloaded

      Indicate whether or the data has been correctly downloaded.

    • path

    "},{"location":"api/bandit/datasets/NewsArticles/#examples","title":"Examples","text":"

    from river import bandit\n\ndataset = bandit.datasets.NewsArticles()\ncontext, arm, reward = next(iter(dataset))\n\nlen(context)\n
    100\n

    arm, reward\n
    (2, False)\n

    "},{"location":"api/bandit/datasets/NewsArticles/#methods","title":"Methods","text":"download take

    Iterate over the k samples.

    Parameters

    • k \u2014 'int'

    1. Machine Learning for Personalization homework \u21a9

    2. Contextual Bandits Analysis of LinUCB Disjoint Algorithm with Dataset \u21a9

    "},{"location":"api/bandit/envs/CandyCaneContest/","title":"CandyCaneContest","text":"

    Candy cane contest Kaggle competition.

    "},{"location":"api/bandit/envs/CandyCaneContest/#parameters","title":"Parameters","text":"
    • n_machines

      Default \u2192 100

      Number of vending machines.

    • reward_decay

      Default \u2192 0.03

      The multiplicate rate at which the expected reward of each vending machine decays.

    "},{"location":"api/bandit/envs/CandyCaneContest/#attributes","title":"Attributes","text":"
    • np_random

      Returns the environment's internal :attr:_np_random that if not set will initialise with a random seed.

    • render_mode

    • spec

    • unwrapped

      Returns the base non-wrapped environment. Returns: Env: The base non-wrapped gym.Env instance

    "},{"location":"api/bandit/envs/CandyCaneContest/#examples","title":"Examples","text":"

    import gym\nfrom river import stats\n\nenv = gym.make('river_bandits/CandyCaneContest-v0')\n_ = env.reset(seed=42)\n_ = env.action_space.seed(123)\n\nmetric = stats.Sum()\nwhile True:\n    arm = env.action_space.sample()\n    observation, reward, terminated, truncated, info = env.step(arm)\n    metric = metric.update(reward)\n    if terminated or truncated:\n        break\n\nmetric\n
    Sum: 734.\n

    "},{"location":"api/bandit/envs/CandyCaneContest/#methods","title":"Methods","text":"close

    Override close in your subclass to perform any necessary cleanup.

    Environments will automatically :meth:close() themselves when garbage collected or when the program exits.

    render

    Compute the render frames as specified by render_mode attribute during initialization of the environment.

    The set of supported modes varies per environment. (And some third-party environments may not support rendering at all.) By convention, if render_mode is: - None (default): no render is computed. - human: render return None. The environment is continuously rendered in the current display or terminal. Usually for human consumption. - rgb_array: return a single frame representing the current state of the environment. A frame is a numpy.ndarray with shape (x, y, 3) representing RGB values for an x-by-y pixel image. - rgb_array_list: return a list of frames representing the states of the environment since the last reset. Each frame is a numpy.ndarray with shape (x, y, 3), as with rgb_array. - ansi: Return a strings (str) or StringIO.StringIO containing a terminal-style text representation for each time step. The text can include newlines and ANSI escape sequences (e.g. for colors). Note: Make sure that your class's metadata 'render_modes' key includes the list of supported modes. It's recommended to call super() in implementations to use the functionality of this method.

    reset

    Resets the environment to an initial state and returns the initial observation.

    This method can reset the environment's random number generator(s) if seed is an integer or if the environment has not yet initialized a random number generator. If the environment already has a random number generator and :meth:reset is called with seed=None, the RNG should not be reset. Moreover, :meth:reset should (in the typical use case) be called with an integer seed right after initialization and then never again. Args: seed (optional int): The seed that is used to initialize the environment's PRNG. If the environment does not already have a PRNG and seed=None (the default option) is passed, a seed will be chosen from some source of entropy (e.g. timestamp or /dev/urandom). However, if the environment already has a PRNG and seed=None is passed, the PRNG will not be reset. If you pass an integer, the PRNG will be reset even if it already exists. Usually, you want to pass an integer right after the environment has been initialized and then never again. Please refer to the minimal example above to see this paradigm in action. options (optional dict): Additional information to specify how the environment is reset (optional, depending on the specific environment) Returns: observation (object): Observation of the initial state. This will be an element of :attr:observation_space (typically a numpy array) and is analogous to the observation returned by :meth:step. info (dictionary): This dictionary contains auxiliary information complementing observation. It should be analogous to the info returned by :meth:step.

    Parameters

    • seed \u2014 Optional[int] \u2014 defaults to None
    • options \u2014 Optional[dict] \u2014 defaults to None

    step

    Run one timestep of the environment's dynamics.

    When end of episode is reached, you are responsible for calling :meth:reset to reset this environment's state. Accepts an action and returns either a tuple (observation, reward, terminated, truncated, info). Args: action (ActType): an action provided by the agent Returns: observation (object): this will be an element of the environment's :attr:observation_space. This may, for instance, be a numpy array containing the positions and velocities of certain objects. reward (float): The amount of reward returned as a result of taking the action. terminated (bool): whether a terminal state (as defined under the MDP of the task) is reached. In this case further step() calls could return undefined results. truncated (bool): whether a truncation condition outside the scope of the MDP is satisfied. Typically a timelimit, but could also be used to indicate agent physically going out of bounds. Can be used to end the episode prematurely before a terminal state is reached. info (dictionary): info contains auxiliary diagnostic information (helpful for debugging, learning, and logging). This might, for instance, contain: metrics that describe the agent's performance state, variables that are hidden from observations, or individual reward terms that are combined to produce the total reward. It also can contain information that distinguishes truncation and termination, however this is deprecated in favour of returning two booleans, and will be removed in a future version. (deprecated) done (bool): A boolean value for if the episode has ended, in which case further :meth:step calls will return undefined results. A done signal may be emitted for different reasons: Maybe the task underlying the environment was solved successfully, a certain timelimit was exceeded, or the physics simulation has entered an invalid state.

    Parameters

    • machine_index

    1. Santa 2020 - The Candy Cane Contest \u21a9

    "},{"location":"api/bandit/envs/KArmedTestbed/","title":"KArmedTestbed","text":"

    k-armed testbed.

    This is a simple environment that can be used to test bandit algorithms. It is based on the 10 armed testbed described in the book \"Reinforcement Learning: An Introduction\" by Sutton and Barto.

    "},{"location":"api/bandit/envs/KArmedTestbed/#parameters","title":"Parameters","text":"
    • k

      Type \u2192 int

      Default \u2192 10

      Number of arms.

    "},{"location":"api/bandit/envs/KArmedTestbed/#attributes","title":"Attributes","text":"
    • np_random

      Returns the environment's internal :attr:_np_random that if not set will initialise with a random seed.

    • render_mode

    • spec

    • unwrapped

      Returns the base non-wrapped environment. Returns: Env: The base non-wrapped gym.Env instance

    "},{"location":"api/bandit/envs/KArmedTestbed/#methods","title":"Methods","text":"close

    Override close in your subclass to perform any necessary cleanup.

    Environments will automatically :meth:close() themselves when garbage collected or when the program exits.

    render

    Compute the render frames as specified by render_mode attribute during initialization of the environment.

    The set of supported modes varies per environment. (And some third-party environments may not support rendering at all.) By convention, if render_mode is: - None (default): no render is computed. - human: render return None. The environment is continuously rendered in the current display or terminal. Usually for human consumption. - rgb_array: return a single frame representing the current state of the environment. A frame is a numpy.ndarray with shape (x, y, 3) representing RGB values for an x-by-y pixel image. - rgb_array_list: return a list of frames representing the states of the environment since the last reset. Each frame is a numpy.ndarray with shape (x, y, 3), as with rgb_array. - ansi: Return a strings (str) or StringIO.StringIO containing a terminal-style text representation for each time step. The text can include newlines and ANSI escape sequences (e.g. for colors). Note: Make sure that your class's metadata 'render_modes' key includes the list of supported modes. It's recommended to call super() in implementations to use the functionality of this method.

    reset

    Resets the environment to an initial state and returns the initial observation.

    This method can reset the environment's random number generator(s) if seed is an integer or if the environment has not yet initialized a random number generator. If the environment already has a random number generator and :meth:reset is called with seed=None, the RNG should not be reset. Moreover, :meth:reset should (in the typical use case) be called with an integer seed right after initialization and then never again. Args: seed (optional int): The seed that is used to initialize the environment's PRNG. If the environment does not already have a PRNG and seed=None (the default option) is passed, a seed will be chosen from some source of entropy (e.g. timestamp or /dev/urandom). However, if the environment already has a PRNG and seed=None is passed, the PRNG will not be reset. If you pass an integer, the PRNG will be reset even if it already exists. Usually, you want to pass an integer right after the environment has been initialized and then never again. Please refer to the minimal example above to see this paradigm in action. options (optional dict): Additional information to specify how the environment is reset (optional, depending on the specific environment) Returns: observation (object): Observation of the initial state. This will be an element of :attr:observation_space (typically a numpy array) and is analogous to the observation returned by :meth:step. info (dictionary): This dictionary contains auxiliary information complementing observation. It should be analogous to the info returned by :meth:step.

    Parameters

    • seed \u2014 Optional[int] \u2014 defaults to None
    • options \u2014 Optional[dict] \u2014 defaults to None

    step

    Run one timestep of the environment's dynamics.

    When end of episode is reached, you are responsible for calling :meth:reset to reset this environment's state. Accepts an action and returns either a tuple (observation, reward, terminated, truncated, info). Args: action (ActType): an action provided by the agent Returns: observation (object): this will be an element of the environment's :attr:observation_space. This may, for instance, be a numpy array containing the positions and velocities of certain objects. reward (float): The amount of reward returned as a result of taking the action. terminated (bool): whether a terminal state (as defined under the MDP of the task) is reached. In this case further step() calls could return undefined results. truncated (bool): whether a truncation condition outside the scope of the MDP is satisfied. Typically a timelimit, but could also be used to indicate agent physically going out of bounds. Can be used to end the episode prematurely before a terminal state is reached. info (dictionary): info contains auxiliary diagnostic information (helpful for debugging, learning, and logging). This might, for instance, contain: metrics that describe the agent's performance state, variables that are hidden from observations, or individual reward terms that are combined to produce the total reward. It also can contain information that distinguishes truncation and termination, however this is deprecated in favour of returning two booleans, and will be removed in a future version. (deprecated) done (bool): A boolean value for if the episode has ended, in which case further :meth:step calls will return undefined results. A done signal may be emitted for different reasons: Maybe the task underlying the environment was solved successfully, a certain timelimit was exceeded, or the physics simulation has entered an invalid state.

    Parameters

    • arm

    "},{"location":"api/base/Base/","title":"Base","text":"

    Base class that is inherited by the majority of classes in River.

    This base class allows us to handle the following tasks in a uniform manner:

    • Getting and setting parameters

    • Displaying information

    • Mutating/cloning

    "},{"location":"api/base/Base/#methods","title":"Methods","text":"clone

    Return a fresh estimator with the same parameters.

    The clone has the same parameters but has not been updated with any data. This works by looking at the parameters from the class signature. Each parameter is either - recursively cloned if its a class. - deep-copied via copy.deepcopy if not. If the calling object is stochastic (i.e. it accepts a seed parameter) and has not been seeded, then the clone will not be idempotent. Indeed, this method's purpose if simply to return a new instance with the same input parameters.

    Parameters

    • new_params \u2014 'dict | None' \u2014 defaults to None
    • include_attributes \u2014 defaults to False

    mutate

    Modify attributes.

    This changes parameters inplace. Although you can change attributes yourself, this is the recommended way to proceed. By default, all attributes are immutable, meaning they shouldn't be mutated. Calling mutate on an immutable attribute raises a ValueError. Mutable attributes are specified via the _mutable_attributes property, and are thus specified on a per-estimator basis.

    Parameters

    • new_attrs \u2014 'dict'

    "},{"location":"api/base/BinaryDriftAndWarningDetector/","title":"BinaryDriftAndWarningDetector","text":"

    A binary drift detector that is also capable of issuing warnings.

    "},{"location":"api/base/BinaryDriftAndWarningDetector/#attributes","title":"Attributes","text":"
    • drift_detected

      Whether or not a drift is detected following the last update.

    • warning_detected

      Whether or not a drift is detected following the last update.

    "},{"location":"api/base/BinaryDriftAndWarningDetector/#methods","title":"Methods","text":"update

    Update the detector with a single boolean input.

    Parameters

    • x \u2014 'bool'

    Returns

    BinaryDriftDetector: self

    "},{"location":"api/base/BinaryDriftDetector/","title":"BinaryDriftDetector","text":"

    A drift detector for binary data.

    "},{"location":"api/base/BinaryDriftDetector/#attributes","title":"Attributes","text":"
    • drift_detected

      Whether or not a drift is detected following the last update.

    "},{"location":"api/base/BinaryDriftDetector/#methods","title":"Methods","text":"update

    Update the detector with a single boolean input.

    Parameters

    • x \u2014 'bool'

    Returns

    BinaryDriftDetector: self

    "},{"location":"api/base/Classifier/","title":"Classifier","text":"

    A classifier.

    "},{"location":"api/base/Classifier/#methods","title":"Methods","text":"learn_one

    Update the model with a set of features x and a label y.

    Parameters

    • x \u2014 'dict'
    • y \u2014 'base.typing.ClfTarget'

    Returns

    Classifier: self

    predict_one

    Predict the label of a set of features x.

    Parameters

    • x \u2014 'dict'
    • kwargs

    Returns

    base.typing.ClfTarget | None: The predicted label.

    predict_proba_one

    Predict the probability of each label for a dictionary of features x.

    Parameters

    • x \u2014 'dict'

    Returns

    dict[base.typing.ClfTarget, float]: A dictionary that associates a probability which each label.

    "},{"location":"api/base/Clusterer/","title":"Clusterer","text":"

    A clustering model.

    "},{"location":"api/base/Clusterer/#methods","title":"Methods","text":"learn_one

    Update the model with a set of features x.

    Parameters

    • x \u2014 'dict'

    Returns

    Clusterer: self

    predict_one

    Predicts the cluster number for a set of features x.

    Parameters

    • x \u2014 'dict'

    Returns

    int: A cluster number.

    "},{"location":"api/base/DriftAndWarningDetector/","title":"DriftAndWarningDetector","text":"

    A drift detector that is also capable of issuing warnings.

    "},{"location":"api/base/DriftAndWarningDetector/#attributes","title":"Attributes","text":"
    • drift_detected

      Whether or not a drift is detected following the last update.

    • warning_detected

      Whether or not a drift is detected following the last update.

    "},{"location":"api/base/DriftAndWarningDetector/#methods","title":"Methods","text":"update

    Update the detector with a single data point.

    Parameters

    • x \u2014 'int | float'

    Returns

    DriftDetector: self

    "},{"location":"api/base/DriftDetector/","title":"DriftDetector","text":"

    A drift detector.

    "},{"location":"api/base/DriftDetector/#attributes","title":"Attributes","text":"
    • drift_detected

      Whether or not a drift is detected following the last update.

    "},{"location":"api/base/DriftDetector/#methods","title":"Methods","text":"update

    Update the detector with a single data point.

    Parameters

    • x \u2014 'int | float'

    Returns

    DriftDetector: self

    "},{"location":"api/base/Ensemble/","title":"Ensemble","text":"

    An ensemble is a model which is composed of a list of models.

    "},{"location":"api/base/Ensemble/#parameters","title":"Parameters","text":"
    • models

      Type \u2192 Iterator[Estimator]

    "},{"location":"api/base/Ensemble/#attributes","title":"Attributes","text":"
    • models
    "},{"location":"api/base/Ensemble/#methods","title":"Methods","text":"append

    S.append(value) -- append value to the end of the sequence

    Parameters

    • item

    clear

    S.clear() -> None -- remove all items from S

    copy count

    S.count(value) -> integer -- return number of occurrences of value

    Parameters

    • item

    extend

    S.extend(iterable) -- extend sequence by appending elements from the iterable

    Parameters

    • other

    index

    S.index(value, [start, [stop]]) -> integer -- return first index of value. Raises ValueError if the value is not present.

    Supporting start and stop arguments is optional, but recommended.

    Parameters

    • item
    • args

    insert

    S.insert(index, value) -- insert value before index

    Parameters

    • i
    • item

    pop

    S.pop([index]) -> item -- remove and return item at index (default last). Raise IndexError if list is empty or index is out of range.

    Parameters

    • i \u2014 defaults to -1

    remove

    S.remove(value) -- remove first occurrence of value. Raise ValueError if the value is not present.

    Parameters

    • item

    reverse

    S.reverse() -- reverse IN PLACE

    sort"},{"location":"api/base/Estimator/","title":"Estimator","text":"

    An estimator.

    "},{"location":"api/base/Estimator/#methods","title":"Methods","text":""},{"location":"api/base/MiniBatchClassifier/","title":"MiniBatchClassifier","text":"

    A classifier that can operate on mini-batches.

    "},{"location":"api/base/MiniBatchClassifier/#methods","title":"Methods","text":"learn_many

    Update the model with a mini-batch of features X and boolean targets y.

    Parameters

    • X \u2014 'pd.DataFrame'
    • y \u2014 'pd.Series'

    Returns

    MiniBatchClassifier: self

    learn_one

    Update the model with a set of features x and a label y.

    Parameters

    • x \u2014 'dict'
    • y \u2014 'base.typing.ClfTarget'

    Returns

    Classifier: self

    predict_many

    Predict the outcome for each given sample.

    Parameters

    • X \u2014 'pd.DataFrame'

    Returns

    pd.Series: The predicted labels.

    predict_one

    Predict the label of a set of features x.

    Parameters

    • x \u2014 'dict'
    • kwargs

    Returns

    base.typing.ClfTarget | None: The predicted label.

    predict_proba_many

    Predict the outcome probabilities for each given sample.

    Parameters

    • X \u2014 'pd.DataFrame'

    Returns

    pd.DataFrame: A dataframe with probabilities of True and False for each sample.

    predict_proba_one

    Predict the probability of each label for a dictionary of features x.

    Parameters

    • x \u2014 'dict'

    Returns

    dict[base.typing.ClfTarget, float]: A dictionary that associates a probability which each label.

    "},{"location":"api/base/MiniBatchRegressor/","title":"MiniBatchRegressor","text":"

    A regressor that can operate on mini-batches.

    "},{"location":"api/base/MiniBatchRegressor/#methods","title":"Methods","text":"learn_many

    Update the model with a mini-batch of features X and real-valued targets y.

    Parameters

    • X \u2014 'pd.DataFrame'
    • y \u2014 'pd.Series'

    Returns

    MiniBatchRegressor: self

    learn_one

    Fits to a set of features x and a real-valued target y.

    Parameters

    • x \u2014 'dict'
    • y \u2014 'base.typing.RegTarget'

    Returns

    Regressor: self

    predict_many

    Predict the outcome for each given sample.

    Parameters

    • X \u2014 'pd.DataFrame'

    Returns

    pd.Series: The predicted outcomes.

    predict_one

    Predict the output of features x.

    Parameters

    • x \u2014 'dict'

    Returns

    base.typing.RegTarget: The prediction.

    "},{"location":"api/base/MiniBatchSupervisedTransformer/","title":"MiniBatchSupervisedTransformer","text":"

    A supervised transformer that can operate on mini-batches.

    "},{"location":"api/base/MiniBatchSupervisedTransformer/#methods","title":"Methods","text":"learn_many

    Update the model with a mini-batch of features X and targets y.

    Parameters

    • X \u2014 'pd.DataFrame'
    • y \u2014 'pd.Series'

    Returns

    MiniBatchSupervisedTransformer: self

    learn_one

    Update with a set of features x.

    A lot of transformers don't actually have to do anything during the learn_one step because they are stateless. For this reason the default behavior of this function is to do nothing. Transformers that however do something during the learn_one can override this method.

    Parameters

    • x \u2014 'dict'

    Returns

    Transformer: self

    transform_many

    Transform a mini-batch of features.

    Parameters

    • X \u2014 'pd.DataFrame'

    Returns

    pd.DataFrame: A new DataFrame.

    transform_one

    Transform a set of features x.

    Parameters

    • x \u2014 'dict'

    Returns

    dict: The transformed values.

    "},{"location":"api/base/MiniBatchTransformer/","title":"MiniBatchTransformer","text":"

    A transform that can operate on mini-batches.

    "},{"location":"api/base/MiniBatchTransformer/#methods","title":"Methods","text":"learn_many

    Update with a mini-batch of features.

    A lot of transformers don't actually have to do anything during the learn_many step because they are stateless. For this reason the default behavior of this function is to do nothing. Transformers that however do something during the learn_many can override this method.

    Parameters

    • X \u2014 'pd.DataFrame'

    Returns

    Transformer: self

    learn_one

    Update with a set of features x.

    A lot of transformers don't actually have to do anything during the learn_one step because they are stateless. For this reason the default behavior of this function is to do nothing. Transformers that however do something during the learn_one can override this method.

    Parameters

    • x \u2014 'dict'

    Returns

    Transformer: self

    transform_many

    Transform a mini-batch of features.

    Parameters

    • X \u2014 'pd.DataFrame'

    Returns

    pd.DataFrame: A new DataFrame.

    transform_one

    Transform a set of features x.

    Parameters

    • x \u2014 'dict'

    Returns

    dict: The transformed values.

    "},{"location":"api/base/MultiLabelClassifier/","title":"MultiLabelClassifier","text":"

    Multi-label classifier.

    "},{"location":"api/base/MultiLabelClassifier/#methods","title":"Methods","text":"learn_one

    Update the model with a set of features x and the labels y.

    Parameters

    • x \u2014 'dict'
    • y \u2014 'dict[FeatureName, bool]'

    Returns

    MultiLabelClassifier: self

    predict_one

    Predict the labels of a set of features x.

    Parameters

    • x \u2014 'dict'
    • kwargs

    Returns

    dict[FeatureName, bool]: The predicted labels.

    predict_proba_one

    Predict the probability of each label appearing given dictionary of features x.

    Parameters

    • x \u2014 'dict'
    • kwargs

    Returns

    dict[FeatureName, dict[bool, float]]: A dictionary that associates a probability which each label.

    "},{"location":"api/base/MultiTargetRegressor/","title":"MultiTargetRegressor","text":"

    Multi-target regressor.

    "},{"location":"api/base/MultiTargetRegressor/#methods","title":"Methods","text":"learn_one

    Fits to a set of features x and a real-valued target y.

    Parameters

    • x \u2014 'dict'
    • y \u2014 'dict[FeatureName, RegTarget]'
    • kwargs

    Returns

    MultiTargetRegressor: self

    predict_one

    Predict the outputs of features x.

    Parameters

    • x \u2014 'dict'

    Returns

    dict[FeatureName, RegTarget]: The predictions.

    "},{"location":"api/base/Regressor/","title":"Regressor","text":"

    A regressor.

    "},{"location":"api/base/Regressor/#methods","title":"Methods","text":"learn_one

    Fits to a set of features x and a real-valued target y.

    Parameters

    • x \u2014 'dict'
    • y \u2014 'base.typing.RegTarget'

    Returns

    Regressor: self

    predict_one

    Predict the output of features x.

    Parameters

    • x \u2014 'dict'

    Returns

    base.typing.RegTarget: The prediction.

    "},{"location":"api/base/SupervisedTransformer/","title":"SupervisedTransformer","text":"

    A supervised transformer.

    "},{"location":"api/base/SupervisedTransformer/#methods","title":"Methods","text":"learn_one

    Update with a set of features x and a target y.

    Parameters

    • x \u2014 'dict'
    • y \u2014 'base.typing.Target'

    Returns

    SupervisedTransformer: self

    transform_one

    Transform a set of features x.

    Parameters

    • x \u2014 'dict'

    Returns

    dict: The transformed values.

    "},{"location":"api/base/Transformer/","title":"Transformer","text":"

    A transformer.

    "},{"location":"api/base/Transformer/#methods","title":"Methods","text":"learn_one

    Update with a set of features x.

    A lot of transformers don't actually have to do anything during the learn_one step because they are stateless. For this reason the default behavior of this function is to do nothing. Transformers that however do something during the learn_one can override this method.

    Parameters

    • x \u2014 'dict'

    Returns

    Transformer: self

    transform_one

    Transform a set of features x.

    Parameters

    • x \u2014 'dict'

    Returns

    dict: The transformed values.

    "},{"location":"api/base/Wrapper/","title":"Wrapper","text":"

    A wrapper model.

    "},{"location":"api/base/WrapperEnsemble/","title":"WrapperEnsemble","text":"

    A wrapper ensemble is an ensemble composed of multiple copies of the same model.

    "},{"location":"api/base/WrapperEnsemble/#parameters","title":"Parameters","text":"
    • model

      The model to copy.

    • n_models

      The number of copies to make.

    • seed

      Random number generator seed for reproducibility.

    "},{"location":"api/base/WrapperEnsemble/#attributes","title":"Attributes","text":"
    • models
    "},{"location":"api/base/WrapperEnsemble/#methods","title":"Methods","text":""},{"location":"api/cluster/CluStream/","title":"CluStream","text":"

    CluStream

    The CluStream algorithm 1 maintains statistical information about the data using micro-clusters. These micro-clusters are temporal extensions of cluster feature vectors. The micro-clusters are stored at snapshots in time following a pyramidal pattern. This pattern allows to recall summary statistics from different time horizons.

    Training with a new point p is performed in two main tasks:

    • Determinate the closest micro-cluster to p.

    • Check whether p fits (memory) into the closest micro-cluster:

      • if p fits, put into micro-cluster

      • if p does not fit, free some space to insert a new micro-cluster.

      This is done in two ways, delete an old micro-cluster or merge the two micro-clusters closest to each other.

    This implementation is an improved version from the original algorithm. Instead of calculating the traditional cluster feature vector of the number of observations, linear sum and sum of squares of data points and time stamps, this implementation adopts the use of Welford's algorithm 2 to calculate the incremental variance, facilitated through stats.Var available within River.

    Since River does not support an actual \"off-line\" phase of the clustering algorithm (as data points are assumed to arrive continuously, one at a time), a time_gap parameter is introduced. After each time_gap, an incremental K-Means clustering algorithm will be initialized and applied on currently available micro-clusters to form the final solution, i.e. macro-clusters.

    "},{"location":"api/cluster/CluStream/#parameters","title":"Parameters","text":"
    • n_macro_clusters

      Type \u2192 int

      Default \u2192 5

      The number of clusters (k) for the k-means algorithm.

    • max_micro_clusters

      Type \u2192 int

      Default \u2192 100

      The maximum number of micro-clusters to use.

    • micro_cluster_r_factor

      Type \u2192 int

      Default \u2192 2

      Multiplier for the micro-cluster radius. When deciding to add a new data point to a micro-cluster, the maximum boundary is defined as a factor of the micro_cluster_r_factor of the RMS deviation of the data points in the micro-cluster from the centroid.

    • time_window

      Type \u2192 int

      Default \u2192 1000

      If the current time is T and the time window is h, we only consider the data that arrived within the period (T-h,T).

    • time_gap

      Type \u2192 int

      Default \u2192 100

      An incremental k-means is applied on the current set of micro-clusters after each time_gap to form the final macro-cluster solution.

    • seed

      Type \u2192 int | None

      Default \u2192 None

      Random seed used for generating initial centroid positions.

    • kwargs

      Other parameters passed to the incremental kmeans at cluster.KMeans.

    "},{"location":"api/cluster/CluStream/#attributes","title":"Attributes","text":"
    • centers (dict)

      Central positions of each cluster.

    "},{"location":"api/cluster/CluStream/#examples","title":"Examples","text":"

    In the following example, max_micro_clusters is set relatively low due to the limited number of training points. Moreover, all points are learnt before any predictions are made. The halflife is set at 0.4, to show that you can pass cluster.KMeans parameters via keyword arguments.

    from river import cluster\nfrom river import stream\n\nX = [\n    [1, 2],\n    [1, 4],\n    [1, 0],\n    [-4, 2],\n    [-4, 4],\n    [-4, 0],\n    [5, 0],\n    [5, 2],\n    [5, 4]\n]\n\nclustream = cluster.CluStream(\n    n_macro_clusters=3,\n    max_micro_clusters=5,\n    time_gap=3,\n    seed=0,\n    halflife=0.4\n)\n\nfor x, _ in stream.iter_array(X):\n    clustream = clustream.learn_one(x)\n\nclustream.predict_one({0: 1, 1: 1})\n
    1\n

    clustream.predict_one({0: -4, 1: 3})\n
    2\n

    clustream.predict_one({0: 4, 1: 3.5})\n
    0\n

    "},{"location":"api/cluster/CluStream/#methods","title":"Methods","text":"learn_one

    Update the model with a set of features x.

    Parameters

    • x \u2014 'dict'
    • w \u2014 defaults to 1.0

    Returns

    Clusterer: self

    predict_one

    Predicts the cluster number for a set of features x.

    Parameters

    • x \u2014 'dict'

    Returns

    int: A cluster number.

    1. Aggarwal, C.C., Philip, S.Y., Han, J. and Wang, J., 2003, A framework for clustering evolving data streams. In Proceedings 2003 VLDB conference (pp. 81-92). Morgan Kaufmann.\u00a0\u21a9

    2. Chan, T.F., Golub, G.H. and LeVeque, R.J., 1982. Updating formulae and a pairwise algorithm for computing sample variances. In COMPSTAT 1982 5th Symposium held at Toulouse 1982 (pp. 30-41). Physica, Heidelberg. https://doi.org/10.1007/978-3-642-51461-6_3.\u00a0\u21a9

    "},{"location":"api/cluster/DBSTREAM/","title":"DBSTREAM","text":"

    DBSTREAM

    DBSTREAM 1 is a clustering algorithm for evolving data streams. It is the first micro-cluster-based online clustering component that explicitely captures the density between micro-clusters via a shared density graph. The density information in the graph is then exploited for reclustering based on actual density between adjacent micro clusters.

    The algorithm is divided into two parts:

    Online micro-cluster maintenance (learning)

    For a new point p:

    • Find all micro clusters for which p falls within the fixed radius (clustering threshold). If no neighbor is found, a new micro cluster with a weight of 1 is created for p.

    • If no neighbor is found, a new micro cluster with a weight of 1 is created for p. If one or more neighbors of p are found, we update the micro clusters by applying the appropriate fading, increasing their weight and then we try to move them closer to p using the Gaussian neighborhood function.

    • Next, the shared density graph is updated. To prevent collapsing micro clusters, we will restrict the movement for micro clusters in case they come closer than \\(r\\) (clustering threshold) to each other. Finishing this process, the time stamp is also increased by 1.

    • Finally, the cleanup will be processed. It is executed every t_gap time steps, removing weak micro clusters and weak entries in the shared density graph to recover memory and improve the clustering algorithm's processing speed.

    Offline generation of macro clusters (clustering)

    The offline generation of macro clusters is generated through the two following steps:

    • The connectivity graph C is constructed using shared density entries between strong micro clusters. The edges in this connectivity graph with a connectivity value greater than the intersection threshold (\\(\\alpha\\)) are used to find connected components representing the final cluster.

    • After the connectivity graph is generated, a variant of the DBSCAN algorithm proposed by Ester et al. is applied to form all macro clusters from \\(\\alpha\\)-connected micro clusters.

    "},{"location":"api/cluster/DBSTREAM/#parameters","title":"Parameters","text":"
    • clustering_threshold

      Type \u2192 float

      Default \u2192 1.0

      DBStream represents each micro cluster by a leader (a data point defining the micro cluster's center) and the density in an area of a user-specified radius \\(r\\) (clustering_threshold) around the center.

    • fading_factor

      Type \u2192 float

      Default \u2192 0.01

      Parameter that controls the importance of historical data to current cluster. Note that fading_factor has to be different from 0.

    • cleanup_interval

      Type \u2192 float

      Default \u2192 2

      The time interval between two consecutive time points when the cleanup process is conducted.

    • intersection_factor

      Type \u2192 float

      Default \u2192 0.3

      The intersection factor related to the area of the overlap of the micro clusters relative to the area cover by micro clusters. This parameter is used to determine whether a micro cluster or a shared density is weak.

    • minimum_weight

      Type \u2192 float

      Default \u2192 1.0

      The minimum weight for a cluster to be not \"noisy\".

    "},{"location":"api/cluster/DBSTREAM/#attributes","title":"Attributes","text":"
    • n_clusters

      Number of clusters generated by the algorithm.

    • clusters

      A set of final clusters of type DBStreamMicroCluster. However, these are either micro clusters, or macro clusters that are generated by merging all \\(\\alpha\\)-connected micro clusters. This set is generated through the offline phase of the algorithm.

    • centers

      Final clusters' centers.

    • micro_clusters

      Micro clusters generated by the algorithm. Instead of updating directly the new instance points into a nearest micro cluster, through each iteration, the weight and center will be modified so that the clusters are closer to the new points, using the Gaussian neighborhood function.

    "},{"location":"api/cluster/DBSTREAM/#examples","title":"Examples","text":"

    from river import cluster\nfrom river import stream\n\nX = [\n    [1, 0.5], [1, 0.625], [1, 0.75], [1, 1.125], [1, 1.5], [1, 1.75],\n    [4, 1.5], [4, 2.25], [4, 2.5], [4, 3], [4, 3.25], [4, 3.5]\n]\n\ndbstream = cluster.DBSTREAM(\n    clustering_threshold=1.5,\n    fading_factor=0.05,\n    cleanup_interval=4,\n    intersection_factor=0.5,\n    minimum_weight=1\n)\n\nfor x, _ in stream.iter_array(X):\n    dbstream = dbstream.learn_one(x)\n\ndbstream.predict_one({0: 1, 1: 2})\n
    0\n

    dbstream.predict_one({0: 5, 1: 2})\n
    1\n

    dbstream._n_clusters\n
    2\n

    "},{"location":"api/cluster/DBSTREAM/#methods","title":"Methods","text":"learn_one

    Update the model with a set of features x.

    Parameters

    • x \u2014 'dict'
    • sample_weight \u2014 defaults to None

    Returns

    Clusterer: self

    predict_one

    Predicts the cluster number for a set of features x.

    Parameters

    • x \u2014 'dict'
    • sample_weight \u2014 defaults to None

    Returns

    int: A cluster number.

    1. Michael Hahsler and Matthew Bolanos (2016, pp 1449-1461). Clustering Data Streams Based on Shared Density between Micro-Clusters, IEEE Transactions on Knowledge and Data Engineering 28(6) . In Proceedings of the Sixth SIAM International Conference on Data Mining, April 20\u201322, 2006, Bethesda, MD, USA.\u00a0\u21a9

    2. Ester et al (1996). A Density-Based Algorithm for Discovering Clusters in Large Spatial Databases with Noise. In KDD-96 Proceedings, AAAI.\u00a0\u21a9

    "},{"location":"api/cluster/DenStream/","title":"DenStream","text":"

    DenStream

    DenStream 1 is a clustering algorithm for evolving data streams. DenStream can discover clusters with arbitrary shape and is robust against noise (outliers).

    \"Dense\" micro-clusters (named core-micro-clusters) summarise the clusters of arbitrary shape. A pruning strategy based on the concepts of potential and outlier micro-clusters guarantees the precision of the weights of the micro-clusters with limited memory.

    The algorithm is divided into two parts:

    Online micro-cluster maintenance (learning)

    For a new point p:

    • Try to merge p into either the nearest p-micro-cluster (potential), o-micro-cluster (outlier), or create a new o-micro-cluster and insert it into the outlier buffer.

    • For each T_p iterations, consider the weights of all potential and outlier micro-clusters. If their weights are smaller than a certain threshold (different for each type of micro-clusters), the micro-cluster is deleted.

    Offline generation of clusters on-demand (clustering)

    A variant of the DBSCAN algorithm 2 is used, such that all density-connected p-micro-clusters determine the final clusters. Moreover, in order for the algorithm to always be able to generate clusters, a certain number of points must be passed through the algorithm with a suitable streaming speed (number of points passed through within a unit time), indicated by n_samples_init and stream_speed.

    "},{"location":"api/cluster/DenStream/#parameters","title":"Parameters","text":"
    • decaying_factor

      Type \u2192 float

      Default \u2192 0.25

      Parameter that controls the importance of historical data to current cluster. Note that decaying_factor has to be different from 0.

    • beta

      Type \u2192 float

      Default \u2192 0.75

      Parameter to determine the threshold of outlier relative to core micro-clusters. The value of beta must be within the range (0,1].

    • mu

      Type \u2192 float

      Default \u2192 2

      Parameter to determine the threshold of outliers relative to core micro-cluster. As beta * mu must be greater than 1, mu must be within the range (1/beta, inf).

    • epsilon

      Type \u2192 float

      Default \u2192 0.02

      Defines the epsilon neighborhood

    • n_samples_init

      Type \u2192 int

      Default \u2192 1000

      Number of points to to initiqalize the online process

    • stream_speed

      Type \u2192 int

      Default \u2192 100

      Number of points arrived in unit time

    "},{"location":"api/cluster/DenStream/#attributes","title":"Attributes","text":"
    • n_clusters

      Number of clusters generated by the algorithm.

    • clusters

      A set of final clusters of type MicroCluster, which means that these cluster include all the required information, including number of points, creation time, weight, (weighted) linear sum, (weighted) square sum, center and radius.

    • p_micro_clusters

      The potential core-icro-clusters that are generated by the algorithm. When a generate cluster request arrives, these p-micro-clusters will go through a variant of the DBSCAN algorithm to determine the final clusters.

    • o_micro_clusters

      The outlier micro-clusters.

    "},{"location":"api/cluster/DenStream/#examples","title":"Examples","text":"

    The following example uses the default parameters of the algorithm to test its functionality. The set of evolving points X are designed so that clusters are easily identifiable.

    from river import cluster\nfrom river import stream\n\nX = [\n    [-1, -0.5], [-1, -0.625], [-1, -0.75], [-1, -1], [-1, -1.125],\n    [-1, -1.25], [-1.5, -0.5], [-1.5, -0.625], [-1.5, -0.75], [-1.5, -1],\n    [-1.5, -1.125], [-1.5, -1.25], [1, 1.5], [1, 1.75], [1, 2],\n    [4, 1.25], [4, 1.5], [4, 2.25], [4, 2.5], [4, 3],\n    [4, 3.25], [4, 3.5], [4, 3.75], [4, 4],\n]\n\ndenstream = cluster.DenStream(decaying_factor=0.01,\n                              beta=0.5,\n                              mu=2.5,\n                              epsilon=0.5,\n                              n_samples_init=10)\n\nfor x, _ in stream.iter_array(X):\n    denstream = denstream.learn_one(x)\n\ndenstream.predict_one({0: -1, 1: -2})\n
    0\n

    denstream.predict_one({0: 5, 1: 4})\n
    1\n

    denstream.predict_one({0: 1, 1: 1})\n
    0\n

    denstream.n_clusters\n
    2\n

    "},{"location":"api/cluster/DenStream/#methods","title":"Methods","text":"BufferItem learn_one

    Update the model with a set of features x.

    Parameters

    • x \u2014 'dict'
    • sample_weight \u2014 defaults to None

    Returns

    Clusterer: self

    predict_one

    Predicts the cluster number for a set of features x.

    Parameters

    • x \u2014 'dict'
    • sample_weight \u2014 defaults to None

    Returns

    int: A cluster number.

    1. Feng et al (2006, pp 328-339). Density-Based Clustering over an Evolving Data Stream with Noise. In Proceedings of the Sixth SIAM International Conference on Data Mining, April 20\u201322, 2006, Bethesda, MD, USA.\u00a0\u21a9

    2. Ester et al (1996). A Density-Based Algorithm for Discovering Clusters in Large Spatial Databases with Noise. In KDD-96 Proceedings, AAAI.\u00a0\u21a9

    "},{"location":"api/cluster/KMeans/","title":"KMeans","text":"

    Incremental k-means.

    The most common way to implement batch k-means is to use Lloyd's algorithm, which consists in assigning all the data points to a set of cluster centers and then moving the centers accordingly. This requires multiple passes over the data and thus isn't applicable in a streaming setting.

    In this implementation we start by finding the cluster that is closest to the current observation. We then move the cluster's central position towards the new observation. The halflife parameter determines by how much to move the cluster toward the new observation. You will get better results if you scale your data appropriately.

    "},{"location":"api/cluster/KMeans/#parameters","title":"Parameters","text":"
    • n_clusters

      Default \u2192 5

      Maximum number of clusters to assign.

    • halflife

      Default \u2192 0.5

      Amount by which to move the cluster centers, a reasonable value if between 0 and 1.

    • mu

      Default \u2192 0

      Mean of the normal distribution used to instantiate cluster positions.

    • sigma

      Default \u2192 1

      Standard deviation of the normal distribution used to instantiate cluster positions.

    • p

      Default \u2192 2

      Power parameter for the Minkowski metric. When p=1, this corresponds to the Manhattan distance, while p=2 corresponds to the Euclidean distance.

    • seed

      Type \u2192 int | None

      Default \u2192 None

      Random seed used for generating initial centroid positions.

    "},{"location":"api/cluster/KMeans/#attributes","title":"Attributes","text":"
    • centers (dict)

      Central positions of each cluster.

    "},{"location":"api/cluster/KMeans/#examples","title":"Examples","text":"

    In the following example the cluster assignments are exactly the same as when using sklearn's batch implementation. However changing the halflife parameter will produce different outputs.

    from river import cluster\nfrom river import stream\n\nX = [\n    [1, 2],\n    [1, 4],\n    [1, 0],\n    [-4, 2],\n    [-4, 4],\n    [-4, 0]\n]\n\nk_means = cluster.KMeans(n_clusters=2, halflife=0.1, sigma=3, seed=42)\n\nfor i, (x, _) in enumerate(stream.iter_array(X)):\n    k_means = k_means.learn_one(x)\n    print(f'{X[i]} is assigned to cluster {k_means.predict_one(x)}')\n
    [1, 2] is assigned to cluster 1\n[1, 4] is assigned to cluster 1\n[1, 0] is assigned to cluster 0\n[-4, 2] is assigned to cluster 1\n[-4, 4] is assigned to cluster 1\n[-4, 0] is assigned to cluster 0\n

    k_means.predict_one({0: 0, 1: 0})\n
    0\n

    k_means.predict_one({0: 4, 1: 4})\n
    1\n

    "},{"location":"api/cluster/KMeans/#methods","title":"Methods","text":"learn_one

    Update the model with a set of features x.

    Parameters

    • x \u2014 'dict'

    Returns

    Clusterer: self

    learn_predict_one

    Equivalent to k_means.learn_one(x).predict_one(x), but faster.

    Parameters

    • x

    predict_one

    Predicts the cluster number for a set of features x.

    Parameters

    • x \u2014 'dict'

    Returns

    int: A cluster number.

    1. Sequential k-Means Clustering \u21a9

    2. Sculley, D., 2010, April. Web-scale k-means clustering. In Proceedings of the 19th international conference on World wide web (pp. 1177-1178 \u21a9

    "},{"location":"api/cluster/STREAMKMeans/","title":"STREAMKMeans","text":"

    STREAMKMeans

    STREAMKMeans is an alternative version of the original algorithm STREAMLSEARCH proposed by O'Callaghan et al. 1, by replacing the k-medians using LSEARCH by the k-means algorithm.

    However, instead of using the traditional k-means, which requires a total reclustering each time the temporary chunk of data points is full, the implementation of this algorithm uses an increamental k-means.

    At first, the cluster centers are initialized with a KMeans instance. For a new point p:

    • If the size of chunk is less than the maximum size allowed, add the new point to the temporary chunk.

    • When the size of chunk reaches the maximum value size allowed

      • A new incremental KMeans instance is created. The latter will process all points in the

      temporary chunk. The centers of this new instance then become the new centers.

      • All points are deleted from the temporary chunk so that new points can be added.
    • When a prediction request arrives, the centers of the algorithm will be exactly the same as the centers of the original KMeans at the time of retrieval.

    "},{"location":"api/cluster/STREAMKMeans/#parameters","title":"Parameters","text":"
    • chunk_size

      Default \u2192 10

      Maximum size allowed for the temporary data chunk.

    • n_clusters

      Default \u2192 2

      Number of clusters generated by the algorithm.

    • kwargs

      Other parameters passed to the incremental kmeans at cluster.KMeans.

    "},{"location":"api/cluster/STREAMKMeans/#attributes","title":"Attributes","text":"
    • centers

      Cluster centers generated from running the incremental KMeans algorithm through centers of each chunk.

    "},{"location":"api/cluster/STREAMKMeans/#examples","title":"Examples","text":"

    from river import cluster\nfrom river import stream\n\nX = [\n    [1, 0.5], [1, 0.625], [1, 0.75], [1, 1.125], [1, 1.5], [1, 1.75],\n    [4, 1.5], [4, 2.25], [4, 2.5], [4, 3], [4, 3.25], [4, 3.5]\n]\n\nstreamkmeans = cluster.STREAMKMeans(chunk_size=3, n_clusters=2, halflife=0.5, sigma=1.5, seed=0)\n\nfor x, _ in stream.iter_array(X):\n    streamkmeans = streamkmeans.learn_one(x)\n\nstreamkmeans.predict_one({0: 1, 1: 0})\n
    0\n

    streamkmeans.predict_one({0: 5, 1: 2})\n
    1\n

    "},{"location":"api/cluster/STREAMKMeans/#methods","title":"Methods","text":"learn_one

    Update the model with a set of features x.

    Parameters

    • x \u2014 'dict'
    • sample_weight \u2014 defaults to None

    Returns

    Clusterer: self

    predict_one

    Predicts the cluster number for a set of features x.

    Parameters

    • x \u2014 'dict'
    • sample_weight \u2014 defaults to None

    Returns

    int: A cluster number.

    1. O'Callaghan et al. (2002). Streaming-data algorithms for high-quality clustering. In Proceedings 18th International Conference on Data Engineering, Feb 26 - March 1, San Jose, CA, USA. DOI: 10.1109/ICDE.2002.994785.\u00a0\u21a9

    "},{"location":"api/cluster/TextClust/","title":"TextClust","text":"

    textClust, a clustering algorithm for text data.

    textClust 12 is a stream clustering algorithm for textual data that can identify and track topics over time in a stream of texts. The algorithm uses a widely popular two-phase clustering approach where the stream is first summarised in real-time.

    The result is many small preliminary clusters in the stream called micro-clusters. Micro-clusters maintain enough information to update and efficiently calculate the cosine similarity between them over time, based on the TF-IDF vector of their texts. Upon request, the miro-clusters can be reclustered to generate the final result using any distance-based clustering algorithm, such as hierarchical clustering. To keep the micro-clusters up-to-date, our algorithm applies a fading strategy where micro-clusters that are not updated regularly lose relevance and are eventually removed.

    "},{"location":"api/cluster/TextClust/#parameters","title":"Parameters","text":"
    • radius

      Default \u2192 0.3

      Distance threshold to merge two micro-clusters. Must be within the range (0, 1]

    • fading_factor

      Default \u2192 0.0005

      Fading factor of micro-clusters

    • tgap

      Default \u2192 100

      Time between outlier removal

    • term_fading

      Default \u2192 True

      Determines whether individual terms should also be faded

    • real_time_fading

      Default \u2192 True

      Parameter that specifies whether natural time or the number of observations should be used for fading

    • micro_distance

      Default \u2192 tfidf_cosine_distance

      Distance metric used for clustering macro-clusters

    • macro_distance

      Default \u2192 tfidf_cosine_distance

      Distance metric used for clustering macro-clusters

    • num_macro

      Default \u2192 3

      Number of macro clusters that should be identified during the reclustering phase

    • min_weight

      Default \u2192 0

      Minimum weight of micro clusters to be used for reclustering

    • auto_r

      Default \u2192 False

      Parameter that specifies if radius should be automatically updated

    • auto_merge

      Default \u2192 True

      Determines, if close observations shall be merged together

    • sigma

      Default \u2192 1

      Parameter that influences the automated trheshold adaption technique

    "},{"location":"api/cluster/TextClust/#attributes","title":"Attributes","text":"
    • micro_clusters

      Micro-clusters generated by the algorithm. Micro-clusters are of type textclust.microcluster

    "},{"location":"api/cluster/TextClust/#examples","title":"Examples","text":"

    from river import compose\nfrom river import feature_extraction\nfrom river import metrics\nfrom river import cluster\n\ncorpus = [\n   {\"text\":'This is the first document.',\"idd\":1, \"cluster\": 1, \"cluster\":1},\n   {\"text\":'This document is the second document.',\"idd\":2,\"cluster\": 1},\n   {\"text\":'And this is super unrelated.',\"idd\":3,\"cluster\": 2},\n   {\"text\":'Is this the first document?',\"idd\":4,\"cluster\": 1},\n   {\"text\":'This is super unrelated as well',\"idd\":5,\"cluster\": 2},\n   {\"text\":'Test text',\"idd\":6,\"cluster\": 5}\n]\n\nstopwords = [ 'stop', 'the', 'to', 'and', 'a', 'in', 'it', 'is', 'I']\n\nmetric = metrics.AdjustedRand()\n\nmodel = compose.Pipeline(\n    feature_extraction.BagOfWords(lowercase=True, ngram_range=(1, 2), stop_words=stopwords),\n    cluster.TextClust(real_time_fading=False, fading_factor=0.001, tgap=100, auto_r=True,\n    radius=0.9)\n)\n\nfor x in corpus:\n    y_pred = model.predict_one(x[\"text\"])\n    y = x[\"cluster\"]\n    metric = metric.update(y,y_pred)\n    model = model.learn_one(x[\"text\"])\n\nprint(metric)\n
    AdjustedRand: -0.17647058823529413\n

    "},{"location":"api/cluster/TextClust/#methods","title":"Methods","text":"distances get_assignment get_macroclusters learn_one

    Update the model with a set of features x.

    Parameters

    • x \u2014 'dict'
    • t \u2014 defaults to None
    • sample_weight \u2014 defaults to None

    Returns

    Clusterer: self

    microcluster predict_one

    Predicts the cluster number for a set of features x.

    Parameters

    • x \u2014 'dict'
    • sample_weight \u2014 defaults to None
    • type \u2014 defaults to micro

    Returns

    int: A cluster number.

    showclusters tfcontainer updateMacroClusters
    1. Assenmacher, D. und Trautmann, H. (2022). Textual One-Pass Stream Clustering with Automated Distance Threshold Adaption. In: Asian Conference on Intelligent Information and Database Systems (Accepted)\u00a0\u21a9

    2. Carnein, M., Assenmacher, D., Trautmann, H. (2017). Stream Clustering of Chat Messages with Applications to Twitch Streams. In: Advances in Conceptual Modeling. ER 2017.\u00a0\u21a9

    "},{"location":"api/compat/River2SKLClassifier/","title":"River2SKLClassifier","text":"

    Compatibility layer from River to scikit-learn for classification.

    "},{"location":"api/compat/River2SKLClassifier/#parameters","title":"Parameters","text":"
    • river_estimator

      Type \u2192 base.Classifier

    "},{"location":"api/compat/River2SKLClassifier/#methods","title":"Methods","text":"fit

    Fits to an entire dataset contained in memory.

    Parameters

    • X
    • y

    Returns

    self

    get_metadata_routing

    Get metadata routing of this object.

    Please check :ref:User Guide <metadata_routing> on how the routing mechanism works.

    Returns

    MetadataRequest

    get_params

    Get parameters for this estimator.

    Parameters

    • deep \u2014 defaults to True

    Returns

    dict

    partial_fit

    Fits incrementally on a portion of a dataset.

    Parameters

    • X
    • y
    • classes \u2014 defaults to None

    Returns

    self

    predict

    Predicts the target of an entire dataset contained in memory.

    Parameters

    • X

    Returns

    Predicted target values for each row of X.

    predict_proba

    Predicts the target probability of an entire dataset contained in memory.

    Parameters

    • X

    Returns

    Predicted target values for each row of X.

    score

    Return the mean accuracy on the given test data and labels.

    In multi-label classification, this is the subset accuracy which is a harsh metric since you require for each sample that each label set be correctly predicted.

    Parameters

    • X
    • y
    • sample_weight \u2014 defaults to None

    Returns

    float

    set_params

    Set the parameters of this estimator.

    The method works on simple estimators as well as on nested objects (such as :class:~sklearn.pipeline.Pipeline). The latter have parameters of the form <component>__<parameter> so that it's possible to update each component of a nested object.

    Parameters

    • params

    Returns

    estimator instance

    set_partial_fit_request

    Request metadata passed to the partial_fit method.

    Note that this method is only relevant if enable_metadata_routing=True (see :func:sklearn.set_config). Please see :ref:User Guide <metadata_routing> on how the routing mechanism works. The options for each parameter are: - True: metadata is requested, and passed to partial_fit if provided. The request is ignored if metadata is not provided. - False: metadata is not requested and the meta-estimator will not pass it to partial_fit. - None: metadata is not requested, and the meta-estimator will raise an error if the user provides it. - str: metadata should be passed to the meta-estimator with this given alias instead of the original name. The default (sklearn.utils.metadata_routing.UNCHANGED) retains the existing request. This allows you to change the request for some parameters and not others. .. versionadded:: 1.3 .. note:: This method is only relevant if this estimator is used as a sub-estimator of a meta-estimator, e.g. used inside a :class:pipeline.Pipeline. Otherwise it has no effect.

    Parameters

    • classes \u2014 Union[bool, NoneType, str] \u2014 defaults to $UNCHANGED$

    Returns

    River2SKLClassifier: object

    set_score_request

    Request metadata passed to the score method.

    Note that this method is only relevant if enable_metadata_routing=True (see :func:sklearn.set_config). Please see :ref:User Guide <metadata_routing> on how the routing mechanism works. The options for each parameter are: - True: metadata is requested, and passed to score if provided. The request is ignored if metadata is not provided. - False: metadata is not requested and the meta-estimator will not pass it to score. - None: metadata is not requested, and the meta-estimator will raise an error if the user provides it. - str: metadata should be passed to the meta-estimator with this given alias instead of the original name. The default (sklearn.utils.metadata_routing.UNCHANGED) retains the existing request. This allows you to change the request for some parameters and not others. .. versionadded:: 1.3 .. note:: This method is only relevant if this estimator is used as a sub-estimator of a meta-estimator, e.g. used inside a :class:pipeline.Pipeline. Otherwise it has no effect.

    Parameters

    • sample_weight \u2014 Union[bool, NoneType, str] \u2014 defaults to $UNCHANGED$

    Returns

    River2SKLClassifier: object

    "},{"location":"api/compat/River2SKLClusterer/","title":"River2SKLClusterer","text":"

    Compatibility layer from River to scikit-learn for clustering.

    "},{"location":"api/compat/River2SKLClusterer/#parameters","title":"Parameters","text":"
    • river_estimator

      Type \u2192 base.Clusterer

    "},{"location":"api/compat/River2SKLClusterer/#methods","title":"Methods","text":"fit

    Fits to an entire dataset contained in memory.

    Parameters

    • X
    • y \u2014 defaults to None

    Returns

    self

    fit_predict

    Perform clustering on X and returns cluster labels.

    Parameters

    • X
    • y \u2014 defaults to None

    Returns

    ndarray of shape (n_samples,), dtype=np.int64

    get_metadata_routing

    Get metadata routing of this object.

    Please check :ref:User Guide <metadata_routing> on how the routing mechanism works.

    Returns

    MetadataRequest

    get_params

    Get parameters for this estimator.

    Parameters

    • deep \u2014 defaults to True

    Returns

    dict

    partial_fit

    Fits incrementally on a portion of a dataset.

    Parameters

    • X
    • y

    Returns

    self

    predict

    Predicts the target of an entire dataset contained in memory.

    Parameters

    • X

    Returns

    Transformed output.

    set_params

    Set the parameters of this estimator.

    The method works on simple estimators as well as on nested objects (such as :class:~sklearn.pipeline.Pipeline). The latter have parameters of the form <component>__<parameter> so that it's possible to update each component of a nested object.

    Parameters

    • params

    Returns

    estimator instance

    "},{"location":"api/compat/River2SKLRegressor/","title":"River2SKLRegressor","text":"

    Compatibility layer from River to scikit-learn for regression.

    "},{"location":"api/compat/River2SKLRegressor/#parameters","title":"Parameters","text":"
    • river_estimator

      Type \u2192 base.Regressor

    "},{"location":"api/compat/River2SKLRegressor/#methods","title":"Methods","text":"fit

    Fits to an entire dataset contained in memory.

    Parameters

    • X
    • y

    Returns

    self

    get_metadata_routing

    Get metadata routing of this object.

    Please check :ref:User Guide <metadata_routing> on how the routing mechanism works.

    Returns

    MetadataRequest

    get_params

    Get parameters for this estimator.

    Parameters

    • deep \u2014 defaults to True

    Returns

    dict

    partial_fit

    Fits incrementally on a portion of a dataset.

    Parameters

    • X
    • y

    Returns

    self

    predict

    Predicts the target of an entire dataset contained in memory.

    Parameters

    • X

    Returns

    np.ndarray: Predicted target values for each row of X.

    score

    Return the coefficient of determination of the prediction.

    The coefficient of determination :math:R^2 is defined as :math:(1 - \\frac{u}{v}), where :math:u is the residual sum of squares ((y_true - y_pred)** 2).sum() and :math:v is the total sum of squares ((y_true - y_true.mean()) ** 2).sum(). The best possible score is 1.0 and it can be negative (because the model can be arbitrarily worse). A constant model that always predicts the expected value of y, disregarding the input features, would get a :math:R^2 score of 0.0.

    Parameters

    • X
    • y
    • sample_weight \u2014 defaults to None

    Returns

    float

    set_params

    Set the parameters of this estimator.

    The method works on simple estimators as well as on nested objects (such as :class:~sklearn.pipeline.Pipeline). The latter have parameters of the form <component>__<parameter> so that it's possible to update each component of a nested object.

    Parameters

    • params

    Returns

    estimator instance

    set_score_request

    Request metadata passed to the score method.

    Note that this method is only relevant if enable_metadata_routing=True (see :func:sklearn.set_config). Please see :ref:User Guide <metadata_routing> on how the routing mechanism works. The options for each parameter are: - True: metadata is requested, and passed to score if provided. The request is ignored if metadata is not provided. - False: metadata is not requested and the meta-estimator will not pass it to score. - None: metadata is not requested, and the meta-estimator will raise an error if the user provides it. - str: metadata should be passed to the meta-estimator with this given alias instead of the original name. The default (sklearn.utils.metadata_routing.UNCHANGED) retains the existing request. This allows you to change the request for some parameters and not others. .. versionadded:: 1.3 .. note:: This method is only relevant if this estimator is used as a sub-estimator of a meta-estimator, e.g. used inside a :class:pipeline.Pipeline. Otherwise it has no effect.

    Parameters

    • sample_weight \u2014 Union[bool, NoneType, str] \u2014 defaults to $UNCHANGED$

    Returns

    River2SKLRegressor: object

    "},{"location":"api/compat/River2SKLTransformer/","title":"River2SKLTransformer","text":"

    Compatibility layer from River to scikit-learn for transformation.

    "},{"location":"api/compat/River2SKLTransformer/#parameters","title":"Parameters","text":"
    • river_estimator

      Type \u2192 base.Transformer

    "},{"location":"api/compat/River2SKLTransformer/#methods","title":"Methods","text":"fit

    Fits to an entire dataset contained in memory.

    Parameters

    • X
    • y \u2014 defaults to None

    Returns

    self

    fit_transform

    Fit to data, then transform it.

    Fits transformer to X and y with optional parameters fit_params and returns a transformed version of X.

    Parameters

    • X
    • y \u2014 defaults to None
    • fit_params

    Returns

    ndarray array of shape (n_samples, n_features_new)

    get_metadata_routing

    Get metadata routing of this object.

    Please check :ref:User Guide <metadata_routing> on how the routing mechanism works.

    Returns

    MetadataRequest

    get_params

    Get parameters for this estimator.

    Parameters

    • deep \u2014 defaults to True

    Returns

    dict

    partial_fit

    Fits incrementally on a portion of a dataset.

    Parameters

    • X
    • y \u2014 defaults to None

    Returns

    self

    set_output

    Set output container.

    See :ref:sphx_glr_auto_examples_miscellaneous_plot_set_output.py for an example on how to use the API.

    Parameters

    • transform \u2014 defaults to None

    Returns

    estimator instance

    set_params

    Set the parameters of this estimator.

    The method works on simple estimators as well as on nested objects (such as :class:~sklearn.pipeline.Pipeline). The latter have parameters of the form <component>__<parameter> so that it's possible to update each component of a nested object.

    Parameters

    • params

    Returns

    estimator instance

    transform

    Predicts the target of an entire dataset contained in memory.

    Parameters

    • X

    Returns

    Transformed output.

    "},{"location":"api/compat/SKL2RiverClassifier/","title":"SKL2RiverClassifier","text":"

    Compatibility layer from scikit-learn to River for classification.

    "},{"location":"api/compat/SKL2RiverClassifier/#parameters","title":"Parameters","text":"
    • estimator

      Type \u2192 sklearn_base.ClassifierMixin

      A scikit-learn regressor which has a partial_fit method.

    • classes

      Type \u2192 list

    "},{"location":"api/compat/SKL2RiverClassifier/#examples","title":"Examples","text":"

    from river import compat\nfrom river import evaluate\nfrom river import metrics\nfrom river import preprocessing\nfrom river import stream\nfrom sklearn import linear_model\nfrom sklearn import datasets\n\ndataset = stream.iter_sklearn_dataset(\n    dataset=datasets.load_breast_cancer(),\n    shuffle=True,\n    seed=42\n)\n\nmodel = preprocessing.StandardScaler()\nmodel |= compat.convert_sklearn_to_river(\n    estimator=linear_model.SGDClassifier(\n        loss='log_loss',\n        eta0=0.01,\n        learning_rate='constant'\n    ),\n    classes=[False, True]\n)\n\nmetric = metrics.LogLoss()\n\nevaluate.progressive_val_score(dataset, model, metric)\n
    LogLoss: 0.198029\n

    "},{"location":"api/compat/SKL2RiverClassifier/#methods","title":"Methods","text":"learn_many learn_one

    Update the model with a set of features x and a label y.

    Parameters

    • x
    • y

    Returns

    self

    predict_many predict_one

    Predict the label of a set of features x.

    Parameters

    • x

    Returns

    The predicted label.

    predict_proba_many predict_proba_one

    Predict the probability of each label for a dictionary of features x.

    Parameters

    • x

    Returns

    A dictionary that associates a probability which each label.

    "},{"location":"api/compat/SKL2RiverRegressor/","title":"SKL2RiverRegressor","text":"

    Compatibility layer from scikit-learn to River for regression.

    "},{"location":"api/compat/SKL2RiverRegressor/#parameters","title":"Parameters","text":"
    • estimator

      Type \u2192 sklearn_base.BaseEstimator

      A scikit-learn transformer which has a partial_fit method.

    "},{"location":"api/compat/SKL2RiverRegressor/#examples","title":"Examples","text":"

    from river import compat\nfrom river import evaluate\nfrom river import metrics\nfrom river import preprocessing\nfrom river import stream\nfrom sklearn import linear_model\nfrom sklearn import datasets\n\ndataset = stream.iter_sklearn_dataset(\n    dataset=datasets.load_diabetes(),\n    shuffle=True,\n    seed=42\n)\n\nscaler = preprocessing.StandardScaler()\nsgd_reg = compat.convert_sklearn_to_river(linear_model.SGDRegressor())\nmodel = scaler | sgd_reg\n\nmetric = metrics.MAE()\n\nevaluate.progressive_val_score(dataset, model, metric)\n
    MAE: 84.501421\n

    "},{"location":"api/compat/SKL2RiverRegressor/#methods","title":"Methods","text":"learn_many learn_one

    Fits to a set of features x and a real-valued target y.

    Parameters

    • x
    • y

    Returns

    self

    predict_many predict_one

    Predict the output of features x.

    Parameters

    • x

    Returns

    The prediction.

    "},{"location":"api/compat/convert-river-to-sklearn/","title":"convert_river_to_sklearn","text":"

    Wraps a river estimator to make it compatible with scikit-learn.

    "},{"location":"api/compat/convert-river-to-sklearn/#parameters","title":"Parameters","text":"
    • estimator

      Type \u2192 base.Estimator

    "},{"location":"api/compat/convert-sklearn-to-river/","title":"convert_sklearn_to_river","text":"

    Wraps a scikit-learn estimator to make it compatible with river.

    "},{"location":"api/compat/convert-sklearn-to-river/#parameters","title":"Parameters","text":"
    • estimator

      Type \u2192 sklearn_base.BaseEstimator

    • classes

      Type \u2192 list | None

      Default \u2192 None

      Class names necessary for classifiers.

    "},{"location":"api/compose/Discard/","title":"Discard","text":"

    Removes features.

    This can be used in a pipeline when you want to remove certain features. The transform_one method is pure, and therefore returns a fresh new dictionary instead of removing the specified keys from the input.

    "},{"location":"api/compose/Discard/#parameters","title":"Parameters","text":"
    • keys

      Type \u2192 tuple[base.typing.FeatureName]

      Key(s) to discard.

    "},{"location":"api/compose/Discard/#examples","title":"Examples","text":"

    from river import compose\n\nx = {'a': 42, 'b': 12, 'c': 13}\ncompose.Discard('a', 'b').transform_one(x)\n
    {'c': 13}\n

    You can chain a discarder with any estimator in order to apply said estimator to the desired features.

    from river import feature_extraction as fx\n\nx = {'sales': 10, 'shop': 'Ikea', 'country': 'Sweden'}\n\npipeline = (\n    compose.Discard('shop', 'country') |\n    fx.PolynomialExtender()\n)\npipeline.transform_one(x)\n
    {'sales': 10, 'sales*sales': 100}\n

    "},{"location":"api/compose/Discard/#methods","title":"Methods","text":"learn_one

    Update with a set of features x.

    A lot of transformers don't actually have to do anything during the learn_one step because they are stateless. For this reason the default behavior of this function is to do nothing. Transformers that however do something during the learn_one can override this method.

    Parameters

    • x \u2014 'dict'

    Returns

    Transformer: self

    transform_one

    Transform a set of features x.

    Parameters

    • x \u2014 'dict'

    Returns

    dict: The transformed values.

    "},{"location":"api/compose/FuncTransformer/","title":"FuncTransformer","text":"

    Wraps a function to make it usable in a pipeline.

    There is often a need to apply an arbitrary transformation to a set of features. For instance, this could involve parsing a date and then extracting the hour from said date. If you're processing a stream of data, then you can do this yourself by calling the necessary code at your leisure. On the other hand, if you want to do this as part of a pipeline, then you need to follow a simple convention.

    To use a function as part of a pipeline, take as input a dict of features and output a dict. Once you have initialized this class with your function, then you can use it like you would use any other (unsupervised) transformer.

    It is up to you if you want your function to be pure or not. By pure we refer to a function that doesn't modify its input. However, we recommend writing pure functions because this reduces the chances of inserting bugs into your pipeline.

    "},{"location":"api/compose/FuncTransformer/#parameters","title":"Parameters","text":"
    • func

      Type \u2192 typing.Callable[[dict], dict]

      A function that takes as input a dict and outputs a dict.

    "},{"location":"api/compose/FuncTransformer/#examples","title":"Examples","text":"

    from pprint import pprint\nimport datetime as dt\nfrom river import compose\n\nx = {'date': '2019-02-14'}\n\ndef parse_date(x):\n    date = dt.datetime.strptime(x['date'], '%Y-%m-%d')\n    x['is_weekend'] = date.day in (5, 6)\n    x['hour'] = date.hour\n    return x\n\nt = compose.FuncTransformer(parse_date)\npprint(t.transform_one(x))\n
    {'date': '2019-02-14', 'hour': 0, 'is_weekend': False}\n

    The above example is not pure because it modifies the input. The following example is pure and produces the same output:

    def parse_date(x):\n    date = dt.datetime.strptime(x['date'], '%Y-%m-%d')\n    return {'is_weekend': date.day in (5, 6), 'hour': date.hour}\n\nt = compose.FuncTransformer(parse_date)\npprint(t.transform_one(x))\n
    {'hour': 0, 'is_weekend': False}\n

    The previous example doesn't include the date feature because it returns a new dict. However, a common usecase is to add a feature to an existing set of features. You can do this in a pure way by unpacking the input dict into the output dict:

    def parse_date(x):\n    date = dt.datetime.strptime(x['date'], '%Y-%m-%d')\n    return {'is_weekend': date.day in (5, 6), 'hour': date.hour, **x}\n\nt = compose.FuncTransformer(parse_date)\npprint(t.transform_one(x))\n
    {'date': '2019-02-14', 'hour': 0, 'is_weekend': False}\n

    You can add FuncTransformer to a pipeline just like you would with any other transformer.

    from river import naive_bayes\n\npipeline = compose.FuncTransformer(parse_date) | naive_bayes.MultinomialNB()\npipeline\n
    Pipeline (\n  FuncTransformer (\n    func=\"parse_date\"\n  ),\n  MultinomialNB (\n    alpha=1.\n  )\n)\n

    If you provide a function without wrapping it, then the pipeline will do it for you:

    pipeline = parse_date | naive_bayes.MultinomialNB()\n
    "},{"location":"api/compose/FuncTransformer/#methods","title":"Methods","text":"learn_many

    Update with a mini-batch of features.

    A lot of transformers don't actually have to do anything during the learn_many step because they are stateless. For this reason the default behavior of this function is to do nothing. Transformers that however do something during the learn_many can override this method.

    Parameters

    • X \u2014 'pd.DataFrame'

    Returns

    Transformer: self

    learn_one

    Update with a set of features x.

    A lot of transformers don't actually have to do anything during the learn_one step because they are stateless. For this reason the default behavior of this function is to do nothing. Transformers that however do something during the learn_one can override this method.

    Parameters

    • x \u2014 'dict'

    Returns

    Transformer: self

    transform_many

    Transform a mini-batch of features.

    Parameters

    • X \u2014 'pd.DataFrame'

    Returns

    pd.DataFrame: A new DataFrame.

    transform_one

    Transform a set of features x.

    Parameters

    • x \u2014 'dict'

    Returns

    dict: The transformed values.

    "},{"location":"api/compose/Grouper/","title":"Grouper","text":"

    Applies a transformer within different groups.

    This transformer allows you to split your data into groups and apply a transformer within each group. This happens in a streaming manner, which means that the groups are discovered online. A separate copy of the provided transformer is made whenever a new group appears. The groups are defined according to one or more keys.

    "},{"location":"api/compose/Grouper/#parameters","title":"Parameters","text":"
    • transformer

      Type \u2192 base.Transformer

    • by

      Type \u2192 base.typing.FeatureName | list[base.typing.FeatureName]

      The field on which to group the data. This can either by a single value, or a list of values.

    "},{"location":"api/compose/Grouper/#methods","title":"Methods","text":"learn_one

    Update with a set of features x.

    A lot of transformers don't actually have to do anything during the learn_one step because they are stateless. For this reason the default behavior of this function is to do nothing. Transformers that however do something during the learn_one can override this method.

    Parameters

    • x \u2014 'dict'

    Returns

    Transformer: self

    transform_one

    Transform a set of features x.

    Parameters

    • x \u2014 'dict'

    Returns

    dict: The transformed values.

    "},{"location":"api/compose/Pipeline/","title":"Pipeline","text":"

    A pipeline of estimators.

    Pipelines allow you to chain different steps into a sequence. Typically, when doing supervised learning, a pipeline contains one ore more transformation steps, whilst it's is a regressor or a classifier. It is highly recommended to use pipelines with River. Indeed, in an online learning setting, it is very practical to have a model defined as a single object. Take a look at the user guide for further information and practical examples.

    One special thing to take notice to is the way transformers are handled. It is usual to predict something for a sample and wait for the ground truth to arrive. In such a scenario, the features are seen before the ground truth arrives. Therefore, the unsupervised parts of the pipeline are updated when predict_one and predict_proba_one are called. Usually the unsupervised parts of the pipeline are all the steps that precede the final step, which is a supervised model. However, some transformers are supervised and are therefore also updated during calls to learn_one.

    "},{"location":"api/compose/Pipeline/#parameters","title":"Parameters","text":"
    • steps

      Ideally, a list of (name, estimator) tuples. A name is automatically inferred if none is provided.

    "},{"location":"api/compose/Pipeline/#examples","title":"Examples","text":"

    The recommended way to declare a pipeline is to use the | operator. The latter allows you to chain estimators in a very terse manner:

    from river import linear_model\nfrom river import preprocessing\n\nscaler = preprocessing.StandardScaler()\nlog_reg = linear_model.LinearRegression()\nmodel = scaler | log_reg\n

    This results in a pipeline that stores each step inside a dictionary.

    model\n
    Pipeline (\n  StandardScaler (\n    with_std=True\n  ),\n  LinearRegression (\n    optimizer=SGD (\n      lr=Constant (\n        learning_rate=0.01\n      )\n    )\n    loss=Squared ()\n    l2=0.\n    l1=0.\n    intercept_init=0.\n    intercept_lr=Constant (\n      learning_rate=0.01\n    )\n    clip_gradient=1e+12\n    initializer=Zeros ()\n  )\n)\n

    You can access parts of a pipeline in the same manner as a dictionary:

    model['LinearRegression']\n
    LinearRegression (\n  optimizer=SGD (\n    lr=Constant (\n      learning_rate=0.01\n    )\n  )\n  loss=Squared ()\n  l2=0.\n  l1=0.\n  intercept_init=0.\n  intercept_lr=Constant (\n    learning_rate=0.01\n  )\n  clip_gradient=1e+12\n  initializer=Zeros ()\n)\n

    Note that you can also declare a pipeline by using the compose.Pipeline constructor method, which is slightly more verbose:

    from river import compose\n\nmodel = compose.Pipeline(scaler, log_reg)\n

    By using a compose.TransformerUnion, you can define complex pipelines that apply different steps to different parts of the data. For instance, we can extract word counts from text data, and extract polynomial features from numeric data.

    from river import feature_extraction as fx\n\ntfidf = fx.TFIDF('text')\ncounts = fx.BagOfWords('text')\ntext_part = compose.Select('text') | (tfidf + counts)\n\nnum_part = compose.Select('a', 'b') | fx.PolynomialExtender()\n\nmodel = text_part + num_part\nmodel |= preprocessing.StandardScaler()\nmodel |= linear_model.LinearRegression()\n

    The following shows an example of using debug_one to visualize how the information flows and changes throughout the pipeline.

    from river import compose\nfrom river import naive_bayes\n\ndataset = [\n    ('A positive comment', True),\n    ('A negative comment', False),\n    ('A happy comment', True),\n    ('A lovely comment', True),\n    ('A harsh comment', False)\n]\n\ntfidf = fx.TFIDF() | compose.Prefixer('tfidf_')\ncounts = fx.BagOfWords() | compose.Prefixer('count_')\nmnb = naive_bayes.MultinomialNB()\nmodel = (tfidf + counts) | mnb\n\nfor x, y in dataset:\n    model = model.learn_one(x, y)\n\nx = dataset[0][0]\nreport = model.debug_one(dataset[0][0])\nprint(report)\n
    0. Input\n--------\nA positive comment\n1. Transformer union\n--------------------\n    1.0 TFIDF | Prefixer\n    --------------------\n    tfidf_comment: 0.43017 (float)\n    tfidf_positive: 0.90275 (float)\n    1.1 BagOfWords | Prefixer\n    -------------------------\n    count_comment: 1 (int)\n    count_positive: 1 (int)\ncount_comment: 1 (int)\ncount_positive: 1 (int)\ntfidf_comment: 0.43017 (float)\ntfidf_positive: 0.90275 (float)\n2. MultinomialNB\n----------------\nFalse: 0.19221\nTrue: 0.80779\n

    "},{"location":"api/compose/Pipeline/#methods","title":"Methods","text":"debug_one

    Displays the state of a set of features as it goes through the pipeline.

    Parameters

    • x \u2014 'dict'
    • show_types \u2014 defaults to True
    • n_decimals \u2014 defaults to 5

    forecast

    Return a forecast.

    Only works if each estimator has a transform_one method and the final estimator has a forecast method. This is the case of time series models from the time_series module.

    Parameters

    • horizon \u2014 'int'
    • xs \u2014 'list[dict] | None' \u2014 defaults to None

    learn_many

    Fit to a mini-batch.

    Parameters

    • X \u2014 'pd.DataFrame'
    • y \u2014 'pd.Series | None' \u2014 defaults to None
    • params

    learn_one

    Fit to a single instance.

    Parameters

    • x \u2014 'dict'
    • y \u2014 defaults to None
    • params

    predict_many

    Call transform_many, and then predict_many on the final step.

    Parameters

    • X \u2014 'pd.DataFrame'

    predict_one

    Call transform_one on the first steps and predict_one on the last step.

    Parameters

    • x \u2014 'dict'
    • params

    predict_proba_many

    Call transform_many, and then predict_proba_many on the final step.

    Parameters

    • X \u2014 'pd.DataFrame'

    predict_proba_one

    Call transform_one on the first steps and predict_proba_one on the last step.

    Parameters

    • x \u2014 'dict'
    • params

    score_one

    Call transform_one on the first steps and score_one on the last step.

    Parameters

    • x \u2014 'dict'
    • params

    transform_many

    Apply each transformer in the pipeline to some features.

    The final step in the pipeline will be applied if it is a transformer. If not, then it will be ignored and the output from the penultimate step will be returned. Note that the steps that precede the final step are assumed to all be transformers.

    Parameters

    • X \u2014 'pd.DataFrame'

    transform_one

    Apply each transformer in the pipeline to some features.

    The final step in the pipeline will be applied if it is a transformer. If not, then it will be ignored and the output from the penultimate step will be returned. Note that the steps that precede the final step are assumed to all be transformers.

    Parameters

    • x \u2014 'dict'
    • params

    "},{"location":"api/compose/Prefixer/","title":"Prefixer","text":"

    Prepends a prefix on features names.

    "},{"location":"api/compose/Prefixer/#parameters","title":"Parameters","text":"
    • prefix

      Type \u2192 str

    "},{"location":"api/compose/Prefixer/#examples","title":"Examples","text":"

    from river import compose\n\nx = {'a': 42, 'b': 12}\ncompose.Prefixer('prefix_').transform_one(x)\n
    {'prefix_a': 42, 'prefix_b': 12}\n

    "},{"location":"api/compose/Prefixer/#methods","title":"Methods","text":"learn_one

    Update with a set of features x.

    A lot of transformers don't actually have to do anything during the learn_one step because they are stateless. For this reason the default behavior of this function is to do nothing. Transformers that however do something during the learn_one can override this method.

    Parameters

    • x \u2014 'dict'

    Returns

    Transformer: self

    transform_one

    Transform a set of features x.

    Parameters

    • x \u2014 'dict'

    Returns

    dict: The transformed values.

    "},{"location":"api/compose/Renamer/","title":"Renamer","text":"

    Renames features following substitution rules.

    "},{"location":"api/compose/Renamer/#parameters","title":"Parameters","text":"
    • mapping

      Type \u2192 dict[str, str]

      Dictionnary describing substitution rules. Keys in mapping that are not a feature's name are silently ignored.

    "},{"location":"api/compose/Renamer/#examples","title":"Examples","text":"

    from river import compose\n\nmapping = {'a': 'v', 'c': 'o'}\nx = {'a': 42, 'b': 12}\ncompose.Renamer(mapping).transform_one(x)\n
    {'b': 12, 'v': 42}\n

    "},{"location":"api/compose/Renamer/#methods","title":"Methods","text":"learn_one

    Update with a set of features x.

    A lot of transformers don't actually have to do anything during the learn_one step because they are stateless. For this reason the default behavior of this function is to do nothing. Transformers that however do something during the learn_one can override this method.

    Parameters

    • x \u2014 'dict'

    Returns

    Transformer: self

    transform_one

    Transform a set of features x.

    Parameters

    • x \u2014 'dict'

    Returns

    dict: The transformed values.

    "},{"location":"api/compose/Select/","title":"Select","text":"

    Selects features.

    This can be used in a pipeline when you want to select certain features. The transform_one method is pure, and therefore returns a fresh new dictionary instead of filtering the specified keys from the input.

    "},{"location":"api/compose/Select/#parameters","title":"Parameters","text":"
    • keys

      Type \u2192 tuple[base.typing.FeatureName]

      Key(s) to keep.

    "},{"location":"api/compose/Select/#examples","title":"Examples","text":"

    from river import compose\n\nx = {'a': 42, 'b': 12, 'c': 13}\ncompose.Select('c').transform_one(x)\n
    {'c': 13}\n

    You can chain a selector with any estimator in order to apply said estimator to the desired features.

    from river import feature_extraction as fx\n\nx = {'sales': 10, 'shop': 'Ikea', 'country': 'Sweden'}\n\npipeline = (\n    compose.Select('sales') |\n    fx.PolynomialExtender()\n)\npipeline.transform_one(x)\n
    {'sales': 10, 'sales*sales': 100}\n

    This transformer also supports mini-batch processing:

    import random\nfrom river import compose\n\nrandom.seed(42)\nX = [{\"x_1\": random.uniform(8, 12), \"x_2\": random.uniform(8, 12)} for _ in range(6)]\nfor x in X:\n    print(x)\n
    {'x_1': 10.557707193831535, 'x_2': 8.100043020890668}\n{'x_1': 9.100117273476478, 'x_2': 8.892842952595291}\n{'x_1': 10.94588485665605, 'x_2': 10.706797949691644}\n{'x_1': 11.568718270819382, 'x_2': 8.347755330517664}\n{'x_1': 9.687687278741082, 'x_2': 8.119188877752281}\n{'x_1': 8.874551899214413, 'x_2': 10.021421152413449}\n

    import pandas as pd\nX = pd.DataFrame.from_dict(X)\n

    You can then call transform_many to transform a mini-batch of features:

    compose.Select('x_2').transform_many(X)\n
        x_2\n0   8.100043\n1   8.892843\n2  10.706798\n3   8.347755\n4   8.119189\n5  10.021421\n

    "},{"location":"api/compose/Select/#methods","title":"Methods","text":"learn_many

    Update with a mini-batch of features.

    A lot of transformers don't actually have to do anything during the learn_many step because they are stateless. For this reason the default behavior of this function is to do nothing. Transformers that however do something during the learn_many can override this method.

    Parameters

    • X \u2014 'pd.DataFrame'

    Returns

    Transformer: self

    learn_one

    Update with a set of features x.

    A lot of transformers don't actually have to do anything during the learn_one step because they are stateless. For this reason the default behavior of this function is to do nothing. Transformers that however do something during the learn_one can override this method.

    Parameters

    • x \u2014 'dict'

    Returns

    Transformer: self

    transform_many

    Transform a mini-batch of features.

    Parameters

    • X \u2014 'pd.DataFrame'

    Returns

    pd.DataFrame: A new DataFrame.

    transform_one

    Transform a set of features x.

    Parameters

    • x \u2014 'dict'

    Returns

    dict: The transformed values.

    "},{"location":"api/compose/SelectType/","title":"SelectType","text":"

    Selects features based on their type.

    This is practical when you want to apply different preprocessing steps to different kinds of features. For instance, a common usecase is to apply a preprocessing.StandardScaler to numeric features and a preprocessing.OneHotEncoder to categorical features.

    "},{"location":"api/compose/SelectType/#parameters","title":"Parameters","text":"
    • types

      Type \u2192 tuple[type]

      Python types which you want to select. Under the hood, the isinstance method will be used to check if a value is of a given type.

    "},{"location":"api/compose/SelectType/#examples","title":"Examples","text":"
    import numbers\nfrom river import compose\nfrom river import linear_model\nfrom river import preprocessing\n\nnum = compose.SelectType(numbers.Number) | preprocessing.StandardScaler()\ncat = compose.SelectType(str) | preprocessing.OneHotEncoder()\nmodel = (num + cat) | linear_model.LogisticRegression()\n
    "},{"location":"api/compose/SelectType/#methods","title":"Methods","text":"learn_one

    Update with a set of features x.

    A lot of transformers don't actually have to do anything during the learn_one step because they are stateless. For this reason the default behavior of this function is to do nothing. Transformers that however do something during the learn_one can override this method.

    Parameters

    • x \u2014 'dict'

    Returns

    Transformer: self

    transform_one

    Transform a set of features x.

    Parameters

    • x \u2014 'dict'

    Returns

    dict: The transformed values.

    "},{"location":"api/compose/Suffixer/","title":"Suffixer","text":"

    Appends a suffix on features names.

    "},{"location":"api/compose/Suffixer/#parameters","title":"Parameters","text":"
    • suffix

      Type \u2192 str

    "},{"location":"api/compose/Suffixer/#examples","title":"Examples","text":"

    from river import compose\n\nx = {'a': 42, 'b': 12}\ncompose.Suffixer('_suffix').transform_one(x)\n
    {'a_suffix': 42, 'b_suffix': 12}\n

    "},{"location":"api/compose/Suffixer/#methods","title":"Methods","text":"learn_one

    Update with a set of features x.

    A lot of transformers don't actually have to do anything during the learn_one step because they are stateless. For this reason the default behavior of this function is to do nothing. Transformers that however do something during the learn_one can override this method.

    Parameters

    • x \u2014 'dict'

    Returns

    Transformer: self

    transform_one

    Transform a set of features x.

    Parameters

    • x \u2014 'dict'

    Returns

    dict: The transformed values.

    "},{"location":"api/compose/TargetTransformRegressor/","title":"TargetTransformRegressor","text":"

    Modifies the target before training.

    The user is expected to check that func and inverse_func are coherent with each other.

    "},{"location":"api/compose/TargetTransformRegressor/#parameters","title":"Parameters","text":"
    • regressor

      Type \u2192 base.Regressor

      Regression model to wrap.

    • func

      Type \u2192 typing.Callable

      A function modifying the target before training.

    • inverse_func

      Type \u2192 typing.Callable

      A function to return to the target's original space.

    "},{"location":"api/compose/TargetTransformRegressor/#examples","title":"Examples","text":"

    import math\nfrom river import compose\nfrom river import datasets\nfrom river import evaluate\nfrom river import linear_model\nfrom river import metrics\nfrom river import preprocessing\n\ndataset = datasets.TrumpApproval()\nmodel = (\n    preprocessing.StandardScaler() |\n    compose.TargetTransformRegressor(\n        regressor=linear_model.LinearRegression(intercept_lr=0.15),\n        func=math.log,\n        inverse_func=math.exp\n    )\n)\nmetric = metrics.MSE()\n\nevaluate.progressive_val_score(dataset, model, metric)\n
    MSE: 10.999752\n

    "},{"location":"api/compose/TargetTransformRegressor/#methods","title":"Methods","text":"learn_one

    Fits to a set of features x and a real-valued target y.

    Parameters

    • x
    • y

    Returns

    self

    predict_one

    Predict the output of features x.

    Parameters

    • x

    Returns

    The prediction.

    "},{"location":"api/compose/TransformerProduct/","title":"TransformerProduct","text":"

    Computes interactions between the outputs of a set transformers.

    This is for when you want to add interaction terms between groups of features. It may also be used an alternative to feature_extraction.PolynomialExtender when the latter is overkill.

    "},{"location":"api/compose/TransformerProduct/#parameters","title":"Parameters","text":"
    • transformers

      Ideally, a list of (name, estimator) tuples. A name is automatically inferred if none is provided.

    "},{"location":"api/compose/TransformerProduct/#examples","title":"Examples","text":"

    Let's say we have a certain set of features with two groups. In practice these may be different namespaces, such one for items and the other for users.

    x = dict(\n    a=0, b=1,  # group 1\n    x=2, y=3   # group 2\n)\n

    We might want to add interaction terms between groups ('a', 'b') and ('x', 'y'), as so:

    from pprint import pprint\nfrom river.compose import Select, TransformerProduct\n\nproduct = TransformerProduct(\n    Select('a', 'b'),\n    Select('x', 'y')\n)\npprint(product.transform_one(x))\n
    {'a*x': 0, 'a*y': 0, 'b*x': 2, 'b*y': 3}\n

    This can also be done with the following shorthand:

    product = Select('a', 'b') * Select('x', 'y')\npprint(product.transform_one(x))\n
    {'a*x': 0, 'a*y': 0, 'b*x': 2, 'b*y': 3}\n

    If you want to include the original terms, you can do something like this:

    group_1 = Select('a', 'b')\ngroup_2 = Select('x', 'y')\nproduct = group_1 + group_2 + group_1 * group_2\npprint(product.transform_one(x))\n
    {'a': 0, 'a*x': 0, 'a*y': 0, 'b': 1, 'b*x': 2, 'b*y': 3, 'x': 2, 'y': 3}\n

    "},{"location":"api/compose/TransformerProduct/#methods","title":"Methods","text":"learn_many

    Update each transformer.

    Parameters

    • X \u2014 'pd.DataFrame'
    • y \u2014 'pd.Series | None' \u2014 defaults to None

    learn_one

    Update each transformer.

    Parameters

    • x \u2014 'dict'
    • y \u2014 defaults to None

    transform_many

    Passes the data through each transformer and packs the results together.

    Parameters

    • X \u2014 'pd.DataFrame'

    transform_one

    Passes the data through each transformer and packs the results together.

    Parameters

    • x \u2014 'dict'

    "},{"location":"api/compose/TransformerUnion/","title":"TransformerUnion","text":"

    Packs multiple transformers into a single one.

    Pipelines allow you to apply steps sequentially. Therefore, the output of a step becomes the input of the next one. In many cases, you may want to pass the output of a step to multiple steps. This simple transformer allows you to do so. In other words, it enables you to apply particular steps to different parts of an input. A typical example is when you want to scale numeric features and one-hot encode categorical features.

    This transformer is essentially a list of transformers. Whenever it is updated, it loops through each transformer and updates them. Meanwhile, calling transform_one collects the output of each transformer and merges them into a single dictionary.

    "},{"location":"api/compose/TransformerUnion/#parameters","title":"Parameters","text":"
    • transformers

      Ideally, a list of (name, estimator) tuples. A name is automatically inferred if none is provided.

    "},{"location":"api/compose/TransformerUnion/#examples","title":"Examples","text":"

    Take the following dataset:

    X = [\n    {'place': 'Taco Bell', 'revenue': 42},\n    {'place': 'Burger King', 'revenue': 16},\n    {'place': 'Burger King', 'revenue': 24},\n    {'place': 'Taco Bell', 'revenue': 58},\n    {'place': 'Burger King', 'revenue': 20},\n    {'place': 'Taco Bell', 'revenue': 50}\n]\n

    As an example, let's assume we want to compute two aggregates of a dataset. We therefore define two feature_extraction.Aggs and initialize a TransformerUnion with them:

    from river import compose\nfrom river import feature_extraction\nfrom river import stats\n\nmean = feature_extraction.Agg(\n    on='revenue', by='place',\n    how=stats.Mean()\n)\ncount = feature_extraction.Agg(\n    on='revenue', by='place',\n    how=stats.Count()\n)\nagg = compose.TransformerUnion(mean, count)\n

    We can now update each transformer and obtain their output with a single function call:

    from pprint import pprint\nfor x in X:\n    agg = agg.learn_one(x)\n    pprint(agg.transform_one(x))\n
    {'revenue_count_by_place': 1, 'revenue_mean_by_place': 42.0}\n{'revenue_count_by_place': 1, 'revenue_mean_by_place': 16.0}\n{'revenue_count_by_place': 2, 'revenue_mean_by_place': 20.0}\n{'revenue_count_by_place': 2, 'revenue_mean_by_place': 50.0}\n{'revenue_count_by_place': 3, 'revenue_mean_by_place': 20.0}\n{'revenue_count_by_place': 3, 'revenue_mean_by_place': 50.0}\n

    Note that you can use the + operator as a shorthand notation:

    agg = mean + count

    This allows you to build complex pipelines in a very terse manner. For instance, we can create a pipeline that scales each feature and fits a logistic regression as so:

    from river import linear_model as lm\nfrom river import preprocessing as pp\n\nmodel = (\n    (mean + count) |\n    pp.StandardScaler() |\n    lm.LogisticRegression()\n)\n

    Whice is equivalent to the following code:

    model = compose.Pipeline(\n    compose.TransformerUnion(mean, count),\n    pp.StandardScaler(),\n    lm.LogisticRegression()\n)\n

    Note that you access any part of a TransformerUnion by name:

    model['TransformerUnion']['Agg']\n
    Agg (\n    on=\"revenue\"\n    by=['place']\n    how=Mean ()\n)\n

    model['TransformerUnion']['Agg1']\n
    Agg (\n    on=\"revenue\"\n    by=['place']\n    how=Count ()\n)\n

    You can also manually provide a name for each step:

    agg = compose.TransformerUnion(\n    ('Mean revenue by place', mean),\n    ('# by place', count)\n)\n

    Mini-batch example:

    X = pd.DataFrame([\n    {\"place\": 2, \"revenue\": 42},\n    {\"place\": 3, \"revenue\": 16},\n    {\"place\": 3, \"revenue\": 24},\n    {\"place\": 2, \"revenue\": 58},\n    {\"place\": 3, \"revenue\": 20},\n    {\"place\": 2, \"revenue\": 50},\n])\n

    Since we need a transformer with mini-batch support to demonstrate, we shall use a StandardScaler.

    from river import compose\nfrom river import preprocessing\n\nagg = (\n    compose.Select(\"place\") +\n    (compose.Select(\"revenue\") | preprocessing.StandardScaler())\n)\n\n_ = agg.learn_many(X)\nagg.transform_many(X)\n
       place   revenue\n0      2  0.441250\n1      3 -1.197680\n2      3 -0.693394\n3      2  1.449823\n4      3 -0.945537\n5      2  0.945537\n

    "},{"location":"api/compose/TransformerUnion/#methods","title":"Methods","text":"learn_many

    Update each transformer.

    Parameters

    • X \u2014 'pd.DataFrame'
    • y \u2014 'pd.Series | None' \u2014 defaults to None

    learn_one

    Update each transformer.

    Parameters

    • x \u2014 'dict'
    • y \u2014 defaults to None

    transform_many

    Passes the data through each transformer and packs the results together.

    Parameters

    • X \u2014 'pd.DataFrame'

    transform_one

    Passes the data through each transformer and packs the results together.

    Parameters

    • x \u2014 'dict'

    "},{"location":"api/compose/learn-during-predict/","title":"learn_during_predict","text":"

    A context manager for fitting unsupervised steps during prediction.

    Usually, unsupervised parts of a pipeline are updated during learn_one. However, in the case of online learning, it is possible to update them before, during the prediction step. This context manager allows you to do so.

    This usually brings a slight performance improvement. But it is not done by default because it is not intuitive and is more difficult to test. It also means that you have to call predict_one before learn_one in order for the whole pipeline to be updated.

    "},{"location":"api/compose/learn-during-predict/#examples","title":"Examples","text":"

    Let's first see what methods are called if we just call predict_one.

    import io\nimport logging\nfrom river import compose\nfrom river import datasets\nfrom river import linear_model\nfrom river import preprocessing\nfrom river import utils\n\nmodel = compose.Pipeline(\n    preprocessing.StandardScaler(),\n    linear_model.LinearRegression()\n)\n\nclass_condition = lambda x: x.__class__.__name__ in ('StandardScaler', 'LinearRegression')\n\nlogger = logging.getLogger()\nlogger.setLevel(logging.DEBUG)\n\nlogs = io.StringIO()\nsh = logging.StreamHandler(logs)\nsh.setLevel(logging.DEBUG)\nlogger.addHandler(sh)\n\nwith utils.log_method_calls(class_condition):\n    for x, y in datasets.TrumpApproval().take(1):\n        _ = model.predict_one(x)\n\nprint(logs.getvalue())\n
    StandardScaler.transform_one\nLinearRegression.predict_one\n

    Now let's use the context manager and see what methods get called.

    logs = io.StringIO()\nsh = logging.StreamHandler(logs)\nsh.setLevel(logging.DEBUG)\nlogger.addHandler(sh)\n\nwith utils.log_method_calls(class_condition), compose.learn_during_predict():\n    for x, y in datasets.TrumpApproval().take(1):\n        _ = model.predict_one(x)\n\nprint(logs.getvalue())\n
    StandardScaler.learn_one\nStandardScaler.transform_one\nLinearRegression.predict_one\n

    We can see that the scaler did not get updated before transforming the data.

    This also works when working with mini-batches.

    logs = io.StringIO()\nsh = logging.StreamHandler(logs)\nsh.setLevel(logging.DEBUG)\nlogger.addHandler(sh)\n\nwith utils.log_method_calls(class_condition):\n    for x, y in datasets.TrumpApproval().take(1):\n        _ = model.predict_many(pd.DataFrame([x]))\nprint(logs.getvalue())\n
    StandardScaler.transform_many\nLinearRegression.predict_many\n

    logs = io.StringIO()\nsh = logging.StreamHandler(logs)\nsh.setLevel(logging.DEBUG)\nlogger.addHandler(sh)\n\nwith utils.log_method_calls(class_condition), compose.learn_during_predict():\n    for x, y in datasets.TrumpApproval().take(1):\n        _ = model.predict_many(pd.DataFrame([x]))\nprint(logs.getvalue())\n
    StandardScaler.learn_many\nStandardScaler.transform_many\nLinearRegression.predict_many\n

    "},{"location":"api/conf/Interval/","title":"Interval","text":"

    An object to represent a (prediction) interval.

    Users are not expected to use this class as-is. Instead, they should use the with_interval parameter of the predict_one method of any regressor or classifier wrapped with a conformal prediction method.

    "},{"location":"api/conf/Interval/#parameters","title":"Parameters","text":"
    • lower

      Type \u2192 float

      The lower bound of the interval.

    • upper

      Type \u2192 float

      The upper bound of the interval.

    "},{"location":"api/conf/Interval/#attributes","title":"Attributes","text":"
    • center

      The center of the interval.

    • width

      The width of the interval.

    "},{"location":"api/conf/RegressionJackknife/","title":"RegressionJackknife","text":"

    Jackknife method for regression.

    This is a conformal prediction method for regression. It is based on the jackknife method. The idea is to compute the quantiles of the residuals of the regressor. The prediction interval is then computed as the prediction of the regressor plus the quantiles of the residuals.

    This works naturally online, as the quantiles of the residuals are updated at each iteration. Each residual is produced before the regressor is updated, which ensures the predicted intervals are not optimistic.

    Note that the produced intervals are marginal and not conditional. This means that the intervals are not adjusted for the features x. This is a limitation of the jackknife method. However, the jackknife method is very simple and efficient. It is also very robust to outliers.

    "},{"location":"api/conf/RegressionJackknife/#parameters","title":"Parameters","text":"
    • regressor

      Type \u2192 base.Regressor

      The regressor to be wrapped.

    • confidence_level

      Type \u2192 float

      Default \u2192 0.95

      The confidence level of the prediction intervals.

    • window_size

      Type \u2192 int | None

      Default \u2192 None

      The size of the window used to compute the quantiles of the residuals. If None, the quantiles are computed over the whole history. It is advised to set this if you expect the model's performance to change over time.

    "},{"location":"api/conf/RegressionJackknife/#examples","title":"Examples","text":"
    from river import conf\nfrom river import datasets\nfrom river import linear_model\nfrom river import metrics\nfrom river import preprocessing\nfrom river import stats\n\ndataset = datasets.TrumpApproval()\n\nmodel = conf.RegressionJackknife(\n    (\n        preprocessing.StandardScaler() |\n        linear_model.LinearRegression(intercept_lr=.1)\n    ),\n    confidence_level=0.9\n)\n\nvalidity = stats.Mean()\nefficiency = stats.Mean()\n\nfor x, y in dataset:\n    interval = model.predict_one(x, with_interval=True)\n    validity = validity.update(y in interval)\n    efficiency = efficiency.update(interval.width)\n    model = model.learn_one(x, y)\n

    The interval's validity is the proportion of times the true value is within the interval. We specified a confidence level of 90%, so we expect the validity to be around 90%.

    validity\n
    Mean: 0.939061\n

    The interval's efficiency is the average width of the intervals.

    efficiency\n
    Mean: 4.078361\n

    Lowering the confidence lowering will mechanically improve the efficiency.

    "},{"location":"api/conf/RegressionJackknife/#methods","title":"Methods","text":"learn_one

    Fits to a set of features x and a real-valued target y.

    Parameters

    • x
    • y
    • kwargs

    Returns

    self

    predict_one

    Predict the output of features x.

    Parameters

    • x
    • with_interval \u2014 defaults to False
    • kwargs

    Returns

    The prediction.

    1. Barber, Rina Foygel, Emmanuel J. Candes, Aaditya Ramdas, and Ryan J. Tibshirani. \"Predictive inference with the jackknife+.\" The Annals of Statistics 49, no. 1 (2021): 486-507. \u21a9

    "},{"location":"api/covariance/EmpiricalCovariance/","title":"EmpiricalCovariance","text":"

    Empirical covariance matrix.

    "},{"location":"api/covariance/EmpiricalCovariance/#parameters","title":"Parameters","text":"
    • ddof

      Default \u2192 1

      Delta Degrees of Freedom.

    "},{"location":"api/covariance/EmpiricalCovariance/#attributes","title":"Attributes","text":"
    • matrix
    "},{"location":"api/covariance/EmpiricalCovariance/#examples","title":"Examples","text":"

    import numpy as np\nimport pandas as pd\nfrom river import covariance\n\nnp.random.seed(42)\nX = pd.DataFrame(np.random.random((8, 3)), columns=[\"red\", \"green\", \"blue\"])\nX\n
            red     green      blue\n0  0.374540  0.950714  0.731994\n1  0.598658  0.156019  0.155995\n2  0.058084  0.866176  0.601115\n3  0.708073  0.020584  0.969910\n4  0.832443  0.212339  0.181825\n5  0.183405  0.304242  0.524756\n6  0.431945  0.291229  0.611853\n7  0.139494  0.292145  0.366362\n

    cov = covariance.EmpiricalCovariance()\nfor x in X.to_dict(orient=\"records\"):\n    cov = cov.update(x)\ncov\n
            blue     green    red\n blue    0.076    0.020   -0.010\ngreen    0.020    0.113   -0.053\n  red   -0.010   -0.053    0.079\n

    There is also an update_many method to process mini-batches. The results are identical.

    cov = covariance.EmpiricalCovariance()\ncov = cov.update_many(X)\ncov\n
            blue     green    red\n blue    0.076    0.020   -0.010\ngreen    0.020    0.113   -0.053\n  red   -0.010   -0.053    0.079\n

    The covariances are stored in a dictionary, meaning any one of them can be accessed as such:

    cov[\"blue\", \"green\"]\n
    Cov: 0.020292\n

    Diagonal entries are variances:

    cov[\"blue\", \"blue\"]\n
    Var: 0.076119\n

    "},{"location":"api/covariance/EmpiricalCovariance/#methods","title":"Methods","text":"revert

    Downdate with a single sample.

    Parameters

    • x \u2014 'dict'

    update

    Update with a single sample.

    Parameters

    • x \u2014 'dict'

    update_many

    Update with a dataframe of samples.

    Parameters

    • X \u2014 'pd.DataFrame'

    "},{"location":"api/covariance/EmpiricalPrecision/","title":"EmpiricalPrecision","text":"

    Empirical precision matrix.

    The precision matrix is the inverse of the covariance matrix.

    This implementation leverages the Sherman-Morrison formula. The resulting inverse covariance matrix is not guaranteed to be identical to a batch computation. However, the difference shrinks with the number of observations.

    "},{"location":"api/covariance/EmpiricalPrecision/#attributes","title":"Attributes","text":"
    • matrix
    "},{"location":"api/covariance/EmpiricalPrecision/#examples","title":"Examples","text":"

    import numpy as np\nimport pandas as pd\nfrom river import covariance\n\nnp.random.seed(42)\nX = pd.DataFrame(np.random.random((1000, 3)))\nX.head()\n
              0         1         2\n0  0.374540  0.950714  0.731994\n1  0.598658  0.156019  0.155995\n2  0.058084  0.866176  0.601115\n3  0.708073  0.020584  0.969910\n4  0.832443  0.212339  0.181825\n

    prec = covariance.EmpiricalPrecision()\nfor x in X.to_dict(orient=\"records\"):\n    prec = prec.update(x)\n\nprec\n
        0        1        2\n0   12.026   -0.122   -0.214\n1   -0.122   11.276   -0.026\n2   -0.214   -0.026   11.632\n

    pd.DataFrame(np.linalg.inv(np.cov(X.T, ddof=1)))\n
               0          1          2\n0  12.159791  -0.124966  -0.218671\n1  -0.124966  11.393394  -0.026662\n2  -0.218671  -0.026662  11.756907\n

    "},{"location":"api/covariance/EmpiricalPrecision/#methods","title":"Methods","text":"update

    Update with a single sample.

    Parameters

    • x

    update_many

    Update with a dataframe of samples.

    Parameters

    • X \u2014 'pd.DataFrame'

    1. Online Estimation of the Inverse Covariance Matrix - Markus Thill \u21a9

    2. Fast rank-one updates to matrix inverse? - Tim Vieira \u21a9

    3. Woodbury matrix identity \u21a9

    "},{"location":"api/datasets/AirlinePassengers/","title":"AirlinePassengers","text":"

    Monthly number of international airline passengers.

    The stream contains 144 items and only one single feature, which is the month. The goal is to predict the number of passengers each month by capturing the trend and the seasonality of the data.

    "},{"location":"api/datasets/AirlinePassengers/#attributes","title":"Attributes","text":"
    • desc

      Return the description from the docstring.

    • path

    "},{"location":"api/datasets/AirlinePassengers/#methods","title":"Methods","text":"take

    Iterate over the k samples.

    Parameters

    • k \u2014 'int'

    1. International airline passengers: monthly totals in thousands. Jan 49 \u2013 Dec 60 \u21a9

    "},{"location":"api/datasets/Bananas/","title":"Bananas","text":"

    Bananas dataset.

    An artificial dataset where instances belongs to several clusters with a banana shape. There are two attributes that correspond to the x and y axis, respectively.

    "},{"location":"api/datasets/Bananas/#attributes","title":"Attributes","text":"
    • desc

      Return the description from the docstring.

    • path

    "},{"location":"api/datasets/Bananas/#methods","title":"Methods","text":"take

    Iterate over the k samples.

    Parameters

    • k \u2014 'int'

    1. OpenML page \u21a9

    "},{"location":"api/datasets/Bikes/","title":"Bikes","text":"

    Bike sharing station information from the city of Toulouse.

    The goal is to predict the number of bikes in 5 different bike stations from the city of Toulouse.

    "},{"location":"api/datasets/Bikes/#attributes","title":"Attributes","text":"
    • desc

      Return the description from the docstring.

    • is_downloaded

      Indicate whether or the data has been correctly downloaded.

    • path

    "},{"location":"api/datasets/Bikes/#methods","title":"Methods","text":"download take

    Iterate over the k samples.

    Parameters

    • k \u2014 'int'

    1. A short introduction and conclusion to the OpenBikes 2016 Challenge \u21a9

    "},{"location":"api/datasets/ChickWeights/","title":"ChickWeights","text":"

    Chick weights along time.

    The stream contains 578 items and 3 features. The goal is to predict the weight of each chick along time, according to the diet the chick is on. The data is ordered by time and then by chick.

    "},{"location":"api/datasets/ChickWeights/#attributes","title":"Attributes","text":"
    • desc

      Return the description from the docstring.

    • path

    "},{"location":"api/datasets/ChickWeights/#methods","title":"Methods","text":"take

    Iterate over the k samples.

    Parameters

    • k \u2014 'int'

    1. Chick weight dataset overview \u21a9

    "},{"location":"api/datasets/CreditCard/","title":"CreditCard","text":"

    Credit card frauds.

    The datasets contains transactions made by credit cards in September 2013 by european cardholders. This dataset presents transactions that occurred in two days, where we have 492 frauds out of 284,807 transactions. The dataset is highly unbalanced, the positive class (frauds) account for 0.172% of all transactions.

    It contains only numerical input variables which are the result of a PCA transformation. Unfortunately, due to confidentiality issues, we cannot provide the original features and more background information about the data. Features V1, V2, ... V28 are the principal components obtained with PCA, the only features which have not been transformed with PCA are 'Time' and 'Amount'. Feature 'Time' contains the seconds elapsed between each transaction and the first transaction in the dataset. The feature 'Amount' is the transaction Amount, this feature can be used for example-dependant cost-senstive learning. Feature 'Class' is the response variable and it takes value 1 in case of fraud and 0 otherwise.

    "},{"location":"api/datasets/CreditCard/#attributes","title":"Attributes","text":"
    • desc

      Return the description from the docstring.

    • is_downloaded

      Indicate whether or the data has been correctly downloaded.

    • path

    "},{"location":"api/datasets/CreditCard/#methods","title":"Methods","text":"download take

    Iterate over the k samples.

    Parameters

    • k \u2014 'int'

    1. Andrea Dal Pozzolo, Olivier Caelen, Reid A. Johnson and Gianluca Bontempi. Calibrating Probability with Undersampling for Unbalanced Classification. In Symposium on Computational Intelligence and Data Mining (CIDM), IEEE, 2015\u00a0\u21a9

    2. Dal Pozzolo, Andrea; Caelen, Olivier; Le Borgne, Yann-Ael; Waterschoot, Serge; Bontempi, Gianluca. Learned lessons in credit card fraud detection from a practitioner perspective, Expert systems with applications,41,10,4915-4928,2014, Pergamon\u00a0\u21a9

    3. Dal Pozzolo, Andrea; Boracchi, Giacomo; Caelen, Olivier; Alippi, Cesare; Bontempi, Gianluca. Credit card fraud detection: a realistic modeling and a novel learning strategy, IEEE transactions on neural networks and learning systems,29,8,3784-3797,2018,IEEE\u00a0\u21a9

    4. Dal Pozzolo, Andrea Adaptive Machine learning for credit card fraud detection ULB MLG PhD thesis (supervised by G. Bontempi)\u00a0\u21a9

    5. Carcillo, Fabrizio; Dal Pozzolo, Andrea; Le Borgne, Yann-Ael; Caelen, Olivier; Mazzer, Yannis; Bontempi, Gianluca. Scarff: a scalable framework for streaming credit card fraud detection with Spark, Information fusion,41, 182-194,2018,Elsevier\u00a0\u21a9

    6. Carcillo, Fabrizio; Le Borgne, Yann-Ael; Caelen, Olivier; Bontempi, Gianluca. Streaming active learning strategies for real-life credit card fraud detection: assessment and visualization, International Journal of Data Science and Analytics, 5,4,285-300,2018,Springer International Publishing\u00a0\u21a9

    7. Bertrand Lebichot, Yann-Ael Le Borgne, Liyun He, Frederic Oble, Gianluca Bontempi Deep-Learning Domain Adaptation Techniques for Credit Cards Fraud Detection, INNSBDDL 2019: Recent Advances in Big Data and Deep Learning, pp 78-88, 2019\u00a0\u21a9

    8. Fabrizio Carcillo, Yann-Ael Le Borgne, Olivier Caelen, Frederic Oble, Gianluca Bontempi Combining Unsupervised and Supervised Learning in Credit Card Fraud Detection Information Sciences, 2019\u00a0\u21a9

    "},{"location":"api/datasets/Elec2/","title":"Elec2","text":"

    Electricity prices in New South Wales.

    This is a binary classification task, where the goal is to predict if the price of electricity will go up or down.

    This data was collected from the Australian New South Wales Electricity Market. In this market, prices are not fixed and are affected by demand and supply of the market. They are set every five minutes. Electricity transfers to/from the neighboring state of Victoria were done to alleviate fluctuations.

    "},{"location":"api/datasets/Elec2/#attributes","title":"Attributes","text":"
    • desc

      Return the description from the docstring.

    • is_downloaded

      Indicate whether or the data has been correctly downloaded.

    • path

    "},{"location":"api/datasets/Elec2/#methods","title":"Methods","text":"download take

    Iterate over the k samples.

    Parameters

    • k \u2014 'int'

    1. SPLICE-2 Comparative Evaluation: Electricity Pricing \u21a9

    2. DataHub description \u21a9

    "},{"location":"api/datasets/HTTP/","title":"HTTP","text":"

    HTTP dataset of the KDD 1999 cup.

    The goal is to predict whether or not an HTTP connection is anomalous or not. The dataset only contains 2,211 (0.4%) positive labels.

    "},{"location":"api/datasets/HTTP/#attributes","title":"Attributes","text":"
    • desc

      Return the description from the docstring.

    • is_downloaded

      Indicate whether or the data has been correctly downloaded.

    • path

    "},{"location":"api/datasets/HTTP/#methods","title":"Methods","text":"download take

    Iterate over the k samples.

    Parameters

    • k \u2014 'int'

    1. HTTP (KDDCUP99) dataset \u21a9

    "},{"location":"api/datasets/Higgs/","title":"Higgs","text":"

    Higgs dataset.

    The data has been produced using Monte Carlo simulations. The first 21 features (columns 2-22) are kinematic properties measured by the particle detectors in the accelerator. The last seven features are functions of the first 21 features; these are high-level features derived by physicists to help discriminate between the two classes.

    "},{"location":"api/datasets/Higgs/#attributes","title":"Attributes","text":"
    • desc

      Return the description from the docstring.

    • is_downloaded

      Indicate whether or the data has been correctly downloaded.

    • path

    "},{"location":"api/datasets/Higgs/#methods","title":"Methods","text":"download take

    Iterate over the k samples.

    Parameters

    • k \u2014 'int'

    1. UCI page \u21a9

    "},{"location":"api/datasets/ImageSegments/","title":"ImageSegments","text":"

    Image segments classification.

    This dataset contains features that describe image segments into 7 classes: brickface, sky, foliage, cement, window, path, and grass.

    "},{"location":"api/datasets/ImageSegments/#attributes","title":"Attributes","text":"
    • desc

      Return the description from the docstring.

    • path

    "},{"location":"api/datasets/ImageSegments/#methods","title":"Methods","text":"take

    Iterate over the k samples.

    Parameters

    • k \u2014 'int'

    1. UCI page \u21a9

    "},{"location":"api/datasets/Insects/","title":"Insects","text":"

    Insects dataset.

    This dataset has different variants, which are:

    • abrupt_balanced

    • abrupt_imbalanced

    • gradual_balanced

    • gradual_imbalanced

    • incremental-abrupt_balanced

    • incremental-abrupt_imbalanced

    • incremental-reoccurring_balanced

    • incremental-reoccurring_imbalanced

    • incremental_balanced

    • incremental_imbalanced

    • out-of-control

    The number of samples and the difficulty change from one variant to another. The number of classes is always the same (6), except for the last variant (24).

    "},{"location":"api/datasets/Insects/#parameters","title":"Parameters","text":"
    • variant

      Default \u2192 abrupt_balanced

      Indicates which variant of the dataset to load.

    "},{"location":"api/datasets/Insects/#attributes","title":"Attributes","text":"
    • desc

      Return the description from the docstring.

    • is_downloaded

      Indicate whether or the data has been correctly downloaded.

    • path

    "},{"location":"api/datasets/Insects/#methods","title":"Methods","text":"download take

    Iterate over the k samples.

    Parameters

    • k \u2014 'int'

    1. USP DS repository \u21a9

    2. Souza, V., Reis, D.M.D., Maletzke, A.G. and Batista, G.E., 2020. Challenges in Benchmarking Stream Learning Algorithms with Real-world Data. arXiv preprint arXiv:2005.00113. \u21a9

    "},{"location":"api/datasets/Keystroke/","title":"Keystroke","text":"

    CMU keystroke dataset.

    Users are tasked to type in a password. The task is to determine which user is typing in the password.

    The only difference with the original dataset is that the \"sessionIndex\" and \"rep\" attributes have been dropped.

    "},{"location":"api/datasets/Keystroke/#attributes","title":"Attributes","text":"
    • desc

      Return the description from the docstring.

    • is_downloaded

      Indicate whether or the data has been correctly downloaded.

    • path

    "},{"location":"api/datasets/Keystroke/#methods","title":"Methods","text":"download take

    Iterate over the k samples.

    Parameters

    • k \u2014 'int'

    1. Keystroke Dynamics - Benchmark Data Set \u21a9

    "},{"location":"api/datasets/MaliciousURL/","title":"MaliciousURL","text":"

    Malicious URLs dataset.

    This dataset contains features about URLs that are classified as malicious or not.

    "},{"location":"api/datasets/MaliciousURL/#attributes","title":"Attributes","text":"
    • desc

      Return the description from the docstring.

    • is_downloaded

      Indicate whether or the data has been correctly downloaded.

    • path

    "},{"location":"api/datasets/MaliciousURL/#methods","title":"Methods","text":"download take

    Iterate over the k samples.

    Parameters

    • k \u2014 'int'

    1. Detecting Malicious URLs \u21a9

    2. Identifying Suspicious URLs: An Application of Large-Scale Online Learning \u21a9

    "},{"location":"api/datasets/MovieLens100K/","title":"MovieLens100K","text":"

    MovieLens 100K dataset.

    MovieLens datasets were collected by the GroupLens Research Project at the University of Minnesota. This dataset consists of 100,000 ratings (1-5) from 943 users on 1682 movies. Each user has rated at least 20 movies. User and movie information are provided. The data was collected through the MovieLens web site (movielens.umn.edu) during the seven-month period from September 19th, 1997 through April 22nd, 1998.

    "},{"location":"api/datasets/MovieLens100K/#parameters","title":"Parameters","text":"
    • unpack_user_and_item

      Default \u2192 False

      Whether or not the user and item should be extracted from the context and included as extra keyword arguments.

    "},{"location":"api/datasets/MovieLens100K/#attributes","title":"Attributes","text":"
    • desc

      Return the description from the docstring.

    • is_downloaded

      Indicate whether or the data has been correctly downloaded.

    • path

    "},{"location":"api/datasets/MovieLens100K/#methods","title":"Methods","text":"download take

    Iterate over the k samples.

    Parameters

    • k \u2014 'int'

    1. The MovieLens Datasets: History and Context \u21a9

    "},{"location":"api/datasets/Music/","title":"Music","text":"

    Multi-label music mood prediction.

    The goal is to predict to which kinds of moods a song pertains to.

    "},{"location":"api/datasets/Music/#attributes","title":"Attributes","text":"
    • desc

      Return the description from the docstring.

    • is_downloaded

      Indicate whether or the data has been correctly downloaded.

    • path

    "},{"location":"api/datasets/Music/#methods","title":"Methods","text":"download take

    Iterate over the k samples.

    Parameters

    • k \u2014 'int'

    1. Read, J., Reutemann, P., Pfahringer, B. and Holmes, G., 2016. MEKA: a multi-label/multi-target extension to WEKA. The Journal of Machine Learning Research, 17(1), pp.667-671. \u21a9

    "},{"location":"api/datasets/Phishing/","title":"Phishing","text":"

    Phishing websites.

    This dataset contains features from web pages that are classified as phishing or not.

    "},{"location":"api/datasets/Phishing/#attributes","title":"Attributes","text":"
    • desc

      Return the description from the docstring.

    • path

    "},{"location":"api/datasets/Phishing/#methods","title":"Methods","text":"take

    Iterate over the k samples.

    Parameters

    • k \u2014 'int'

    1. UCI page \u21a9

    "},{"location":"api/datasets/Restaurants/","title":"Restaurants","text":"

    Data from the Kaggle Recruit Restaurants challenge.

    The goal is to predict the number of visitors in each of 829 Japanese restaurants over a priod of roughly 16 weeks. The data is ordered by date and then by restaurant ID.

    "},{"location":"api/datasets/Restaurants/#attributes","title":"Attributes","text":"
    • desc

      Return the description from the docstring.

    • is_downloaded

      Indicate whether or the data has been correctly downloaded.

    • path

    "},{"location":"api/datasets/Restaurants/#methods","title":"Methods","text":"download take

    Iterate over the k samples.

    Parameters

    • k \u2014 'int'

    1. Recruit Restaurant Visitor Forecasting \u21a9

    "},{"location":"api/datasets/SMSSpam/","title":"SMSSpam","text":"

    SMS Spam Collection dataset.

    The data contains 5,574 items and 1 feature (i.e. SMS body). Spam messages represent 13.4% of the dataset. The goal is to predict whether an SMS is a spam or not.

    "},{"location":"api/datasets/SMSSpam/#attributes","title":"Attributes","text":"
    • desc

      Return the description from the docstring.

    • is_downloaded

      Indicate whether or the data has been correctly downloaded.

    • path

    "},{"location":"api/datasets/SMSSpam/#methods","title":"Methods","text":"download take

    Iterate over the k samples.

    Parameters

    • k \u2014 'int'

    1. Almeida, T.A., Hidalgo, J.M.G. and Yamakami, A., 2011, September. Contributions to the study of SMS spam filtering: new collection and results. In Proceedings of the 11th ACM symposium on Document engineering (pp. 259-262). \u21a9

    "},{"location":"api/datasets/SMTP/","title":"SMTP","text":"

    SMTP dataset from the KDD 1999 cup.

    The goal is to predict whether or not an SMTP connection is anomalous or not. The dataset only contains 2,211 (0.4%) positive labels.

    "},{"location":"api/datasets/SMTP/#attributes","title":"Attributes","text":"
    • desc

      Return the description from the docstring.

    • is_downloaded

      Indicate whether or the data has been correctly downloaded.

    • path

    "},{"location":"api/datasets/SMTP/#methods","title":"Methods","text":"download take

    Iterate over the k samples.

    Parameters

    • k \u2014 'int'

    1. SMTP (KDDCUP99) dataset \u21a9

    "},{"location":"api/datasets/SolarFlare/","title":"SolarFlare","text":"

    Solar flare multi-output regression.

    "},{"location":"api/datasets/SolarFlare/#attributes","title":"Attributes","text":"
    • desc

      Return the description from the docstring.

    • path

    "},{"location":"api/datasets/SolarFlare/#methods","title":"Methods","text":"take

    Iterate over the k samples.

    Parameters

    • k \u2014 'int'

    1. UCI page \u21a9

    "},{"location":"api/datasets/TREC07/","title":"TREC07","text":"

    TREC's 2007 Spam Track dataset.

    The data contains 75,419 chronologically ordered items, i.e. 3 months of emails delivered to a particular server in 2007. Spam messages represent 66.6% of the dataset. The goal is to predict whether an email is a spam or not.

    The available raw features are: sender, recipients, date, subject, body.

    "},{"location":"api/datasets/TREC07/#attributes","title":"Attributes","text":"
    • desc

      Return the description from the docstring.

    • is_downloaded

      Indicate whether or the data has been correctly downloaded.

    • path

    "},{"location":"api/datasets/TREC07/#methods","title":"Methods","text":"download take

    Iterate over the k samples.

    Parameters

    • k \u2014 'int'

    1. TREC 2007 Spam Track Overview \u21a9

    2. Code ran to parse the dataset \u21a9

    "},{"location":"api/datasets/Taxis/","title":"Taxis","text":"

    Taxi ride durations in New York City.

    The goal is to predict the duration of taxi rides in New York City.

    "},{"location":"api/datasets/Taxis/#attributes","title":"Attributes","text":"
    • desc

      Return the description from the docstring.

    • is_downloaded

      Indicate whether or the data has been correctly downloaded.

    • path

    "},{"location":"api/datasets/Taxis/#methods","title":"Methods","text":"download take

    Iterate over the k samples.

    Parameters

    • k \u2014 'int'

    1. New York City Taxi Trip Duration competition on Kaggle \u21a9

    "},{"location":"api/datasets/TrumpApproval/","title":"TrumpApproval","text":"

    Donald Trump approval ratings.

    This dataset was obtained by reshaping the data used by FiveThirtyEight for analyzing Donald Trump's approval ratings. It contains 5 features, which are approval ratings collected by 5 polling agencies. The target is the approval rating from FiveThirtyEight's model. The goal of this task is to see if we can reproduce FiveThirtyEight's model.

    "},{"location":"api/datasets/TrumpApproval/#attributes","title":"Attributes","text":"
    • desc

      Return the description from the docstring.

    • path

    "},{"location":"api/datasets/TrumpApproval/#methods","title":"Methods","text":"take

    Iterate over the k samples.

    Parameters

    • k \u2014 'int'

    1. Trump Approval Ratings \u21a9

    "},{"location":"api/datasets/WaterFlow/","title":"WaterFlow","text":"

    Water flow through a pipeline branch.

    The series includes hourly values for about 2 months, March 2022 to May 2022. The values are expressed in liters per second. There are four anomalous segments in the series:

    • 3 \"low value moments\": this is due to water losses or human intervention for maintenance * A small peak in the water inflow after the first 2 segments: this is due to a pumping operation into the main pipeline, when more water pressure is needed

    This dataset is well suited for time series forecasting models, as well as anomaly detection methods. Ideally, the goal is to build a time series forecasting model that is robust to the anomalous segments.

    This data has been kindly donated by the Tecnojest s.r.l. company (www.invidea.it) from Italy.

    "},{"location":"api/datasets/WaterFlow/#attributes","title":"Attributes","text":"
    • desc

      Return the description from the docstring.

    • path

    "},{"location":"api/datasets/WaterFlow/#methods","title":"Methods","text":"take

    Iterate over the k samples.

    Parameters

    • k \u2014 'int'

    "},{"location":"api/datasets/base/Dataset/","title":"Dataset","text":"

    Base class for all datasets.

    All datasets inherit from this class, be they stored in a file or generated on the fly.

    "},{"location":"api/datasets/base/Dataset/#parameters","title":"Parameters","text":"
    • task

      Type of task the dataset is meant for. Should be one of the following: - \"Regression\" - \"Binary classification\" - \"Multi-class classification\" - \"Multi-output binary classification\" - \"Multi-output regression\"

    • n_features

      Number of features in the dataset.

    • n_samples

      Default \u2192 None

      Number of samples in the dataset.

    • n_classes

      Default \u2192 None

      Number of classes in the dataset, only applies to classification datasets.

    • n_outputs

      Default \u2192 None

      Number of outputs the target is made of, only applies to multi-output datasets.

    • sparse

      Default \u2192 False

      Whether the dataset is sparse or not.

    "},{"location":"api/datasets/base/Dataset/#attributes","title":"Attributes","text":"
    • desc

      Return the description from the docstring.

    "},{"location":"api/datasets/base/Dataset/#methods","title":"Methods","text":"take

    Iterate over the k samples.

    Parameters

    • k \u2014 'int'

    "},{"location":"api/datasets/base/FileDataset/","title":"FileDataset","text":"

    Base class for datasets that are stored in a local file.

    Small datasets that are part of the river package inherit from this class.

    "},{"location":"api/datasets/base/FileDataset/#parameters","title":"Parameters","text":"
    • filename

      The file's name.

    • directory

      Default \u2192 None

      The directory where the file is contained. Defaults to the location of the datasets module.

    • desc

      Extra dataset parameters to pass as keyword arguments.

    "},{"location":"api/datasets/base/FileDataset/#attributes","title":"Attributes","text":"
    • desc

      Return the description from the docstring.

    • path

    "},{"location":"api/datasets/base/FileDataset/#methods","title":"Methods","text":"take

    Iterate over the k samples.

    Parameters

    • k \u2014 'int'

    "},{"location":"api/datasets/base/RemoteDataset/","title":"RemoteDataset","text":"

    Base class for datasets that are stored in a remote file.

    Medium and large datasets that are not part of the river package inherit from this class.

    The filename doesn't have to be provided if unpack is False. Indeed in the latter case the filename will be inferred from the URL.

    "},{"location":"api/datasets/base/RemoteDataset/#parameters","title":"Parameters","text":"
    • url

      The URL the dataset is located at.

    • size

      The expected download size.

    • unpack

      Default \u2192 True

      Whether to unpack the download or not.

    • filename

      Default \u2192 None

      An optional name to given to the file if the file is unpacked.

    • desc

      Extra dataset parameters to pass as keyword arguments.

    "},{"location":"api/datasets/base/RemoteDataset/#attributes","title":"Attributes","text":"
    • desc

      Return the description from the docstring.

    • is_downloaded

      Indicate whether or the data has been correctly downloaded.

    • path

    "},{"location":"api/datasets/base/RemoteDataset/#methods","title":"Methods","text":"download take

    Iterate over the k samples.

    Parameters

    • k \u2014 'int'

    "},{"location":"api/datasets/base/SyntheticDataset/","title":"SyntheticDataset","text":"

    A synthetic dataset.

    "},{"location":"api/datasets/base/SyntheticDataset/#parameters","title":"Parameters","text":"
    • task

      Type of task the dataset is meant for. Should be one of: - \"Regression\" - \"Binary classification\" - \"Multi-class classification\" - \"Multi-output binary classification\" - \"Multi-output regression\"

    • n_features

      Number of features in the dataset.

    • n_samples

      Default \u2192 None

      Number of samples in the dataset.

    • n_classes

      Default \u2192 None

      Number of classes in the dataset, only applies to classification datasets.

    • n_outputs

      Default \u2192 None

      Number of outputs the target is made of, only applies to multi-output datasets.

    • sparse

      Default \u2192 False

      Whether the dataset is sparse or not.

    "},{"location":"api/datasets/base/SyntheticDataset/#attributes","title":"Attributes","text":"
    • desc

      Return the description from the docstring.

    "},{"location":"api/datasets/base/SyntheticDataset/#methods","title":"Methods","text":"take

    Iterate over the k samples.

    Parameters

    • k \u2014 'int'

    "},{"location":"api/datasets/synth/Agrawal/","title":"Agrawal","text":"

    Agrawal stream generator.

    The generator was introduced by Agrawal et al. 1, and was a common source of data for early work on scaling up decision tree learners. The generator produces a stream containing nine features, six numeric and three categorical. There are 10 functions defined for generating binary class labels from the features. Presumably these determine whether the loan should be approved. Classification functions are listed in the original paper 1.

    Feature | Description | Values

    • salary | salary | uniformly distributed from 20k to 150k

    • commission | commission | 0 if salary < 75k else uniformly distributed from 10k to 75k

    • age | age | uniformly distributed from 20 to 80

    • elevel | education level | uniformly chosen from 0 to 4

    • car | car maker | uniformly chosen from 1 to 20

    • zipcode | zip code of the town | uniformly chosen from 0 to 8

    • hvalue | house value | uniformly distributed from 50k x zipcode to 100k x zipcode

    • hyears | years house owned | uniformly distributed from 1 to 30

    • loan | total loan amount | uniformly distributed from 0 to 500k

    "},{"location":"api/datasets/synth/Agrawal/#parameters","title":"Parameters","text":"
    • classification_function

      Type \u2192 int

      Default \u2192 0

      The classification function to use for the generation. Valid values are from 0 to 9.

    • seed

      Type \u2192 int | None

      Default \u2192 None

      Random seed for reproducibility.

    • balance_classes

      Type \u2192 bool

      Default \u2192 False

      If True, the class distribution will converge to a uniform distribution.

    • perturbation

      Type \u2192 float

      Default \u2192 0.0

      The probability that noise will happen in the generation. Each new sample will be perturbed by the magnitude of perturbation. Valid values are in the range [0.0 to 1.0].

    "},{"location":"api/datasets/synth/Agrawal/#attributes","title":"Attributes","text":"
    • desc

      Return the description from the docstring.

    "},{"location":"api/datasets/synth/Agrawal/#examples","title":"Examples","text":"

    from river.datasets import synth\n\ndataset = synth.Agrawal(\n    classification_function=0,\n    seed=42\n)\n\ndataset\n
    Synthetic data generator\n<BLANKLINE>\n    Name  Agrawal\n    Task  Binary classification\n Samples  \u221e\nFeatures  9\n Outputs  1\n Classes  2\n  Sparse  False\n<BLANKLINE>\nConfiguration\n-------------\nclassification_function  0\n                   seed  42\n        balance_classes  False\n           perturbation  0.0\n

    for x, y in dataset.take(5):\n    print(list(x.values()), y)\n
    [103125.4837, 0, 21, 2, 8, 3, 319768.9642, 4, 338349.7437] 1\n[135983.3438, 0, 25, 4, 14, 0, 423837.7755, 7, 116330.4466] 1\n[98262.4347, 0, 55, 1, 18, 6, 144088.1244, 19, 139095.3541] 0\n[133009.0417, 0, 68, 1, 14, 5, 233361.4025, 7, 478606.5361] 1\n[63757.2908, 16955.9382, 26, 2, 12, 4, 522851.3093, 24, 229712.4398] 1\n

    "},{"location":"api/datasets/synth/Agrawal/#methods","title":"Methods","text":"generate_drift

    Generate drift by switching the classification function randomly.

    take

    Iterate over the k samples.

    Parameters

    • k \u2014 'int'

    "},{"location":"api/datasets/synth/Agrawal/#notes","title":"Notes","text":"

    The sample generation works as follows: The 9 features are generated with the random generator, initialized with the seed passed by the user. Then, the classification function decides, as a function of all the attributes, whether to classify the instance as class 0 or class 1. The next step is to verify if the classes should be balanced, and if so, balance the classes. Finally, add noise if perturbation > 0.0.

    1. Rakesh Agrawal, Tomasz Imielinksi, and Arun Swami. \"Database Mining: A Performance Perspective\", IEEE Transactions on Knowledge and Data Engineering, 5(6), December 1993.\u00a0\u21a9\u21a9

    "},{"location":"api/datasets/synth/AnomalySine/","title":"AnomalySine","text":"

    Simulate a stream with anomalies in sine waves.

    The amount of data generated by this generator is finite.

    The data generated corresponds to sine and cosine functions. Anomalies are induced by replacing the cosine values with values from a different a sine function. The contextual flag can be used to introduce contextual anomalies which are values in the normal global range, but abnormal compared to the seasonal pattern. Contextual attributes are introduced by replacing cosine entries with sine values.

    The target indicates whether or not the instances are anomalous.

    "},{"location":"api/datasets/synth/AnomalySine/#parameters","title":"Parameters","text":"
    • n_samples

      Type \u2192 int

      Default \u2192 10000

      The number of samples to generate. This generator creates a batch of data affected by contextual anomalies and noise.

    • n_anomalies

      Type \u2192 int

      Default \u2192 2500

      Number of anomalies. Can't be larger than n_samples.

    • contextual

      Type \u2192 bool

      Default \u2192 False

      If True, will add contextual anomalies.

    • n_contextual

      Type \u2192 int

      Default \u2192 2500

      Number of contextual anomalies. Can't be larger than n_samples.

    • shift

      Type \u2192 int

      Default \u2192 4

      Shift in number of samples applied when retrieving contextual anomalies.

    • noise

      Type \u2192 float

      Default \u2192 0.5

      Amount of noise.

    • replace

      Type \u2192 bool

      Default \u2192 True

      If True, anomalies are randomly sampled with replacement.

    • seed

      Type \u2192 int | None

      Default \u2192 None

      Random seed for reproducibility.

    "},{"location":"api/datasets/synth/AnomalySine/#attributes","title":"Attributes","text":"
    • desc

      Return the description from the docstring.

    "},{"location":"api/datasets/synth/AnomalySine/#examples","title":"Examples","text":"

    from river.datasets import synth\n\ndataset = synth.AnomalySine(\n    seed=12345,\n    n_samples=100,\n    n_anomalies=25,\n    contextual=True,\n    n_contextual=10\n)\n\nfor x, y in dataset.take(5):\n    print(x, y)\n
    {'sine': -0.7119, 'cosine': 0.8777} False\n{'sine': 0.8792, 'cosine': -0.0290} False\n{'sine': 0.0440, 'cosine': 3.0852} True\n{'sine': 0.5520, 'cosine': 3.4515} True\n{'sine': 0.8037, 'cosine': 0.4027} False\n

    "},{"location":"api/datasets/synth/AnomalySine/#methods","title":"Methods","text":"take

    Iterate over the k samples.

    Parameters

    • k \u2014 'int'

    "},{"location":"api/datasets/synth/ConceptDriftStream/","title":"ConceptDriftStream","text":"

    Generates a stream with concept drift.

    A stream generator that adds concept drift or change by joining two streams. This is done by building a weighted combination of two pure distributions that characterizes the target concepts before and after the change.

    The sigmoid function is an elegant and practical solution to define the probability that each new instance of the stream belongs to the new concept after the drift. The sigmoid function introduces a gradual, smooth transition whose duration is controlled with two parameters:

    • \\(p\\), the position of the change.

    • \\(w\\), the width of the transition.

    The sigmoid function at sample \\(t\\) is

    \\[f(t) = 1/(1+e^{-4(t-p)/w})\\]"},{"location":"api/datasets/synth/ConceptDriftStream/#parameters","title":"Parameters","text":"
    • stream

      Type \u2192 datasets.base.SyntheticDataset | None

      Default \u2192 None

      Original stream

    • drift_stream

      Type \u2192 datasets.base.SyntheticDataset | None

      Default \u2192 None

      Drift stream

    • position

      Type \u2192 int

      Default \u2192 5000

      Central position of the concept drift change.

    • width

      Type \u2192 int

      Default \u2192 1000

      Width of concept drift change.

    • seed

      Type \u2192 int | None

      Default \u2192 None

      Random seed for reproducibility.

    • alpha

      Type \u2192 float | None

      Default \u2192 None

      Angle of change used to estimate the width of concept drift change. If set, it will override the width parameter. Valid values are in the range (0.0, 90.0].

    "},{"location":"api/datasets/synth/ConceptDriftStream/#attributes","title":"Attributes","text":"
    • desc

      Return the description from the docstring.

    "},{"location":"api/datasets/synth/ConceptDriftStream/#examples","title":"Examples","text":"

    from river.datasets import synth\n\ndataset = synth.ConceptDriftStream(\n    stream=synth.SEA(seed=42, variant=0),\n    drift_stream=synth.SEA(seed=42, variant=1),\n    seed=1, position=5, width=2\n)\n\nfor x, y in dataset.take(10):\n    print(x, y)\n
    {0: 6.3942, 1: 0.2501, 2: 2.7502} False\n{0: 2.2321, 1: 7.3647, 2: 6.7669} True\n{0: 8.9217, 1: 0.8693, 2: 4.2192} True\n{0: 0.2979, 1: 2.1863, 2: 5.0535} False\n{0: 6.3942, 1: 0.2501, 2: 2.7502} False\n{0: 2.2321, 1: 7.3647, 2: 6.7669} True\n{0: 8.9217, 1: 0.8693, 2: 4.2192} True\n{0: 0.2979, 1: 2.1863, 2: 5.0535} False\n{0: 0.2653, 1: 1.9883, 2: 6.4988} False\n{0: 5.4494, 1: 2.2044, 2: 5.8926} False\n

    "},{"location":"api/datasets/synth/ConceptDriftStream/#methods","title":"Methods","text":"take

    Iterate over the k samples.

    Parameters

    • k \u2014 'int'

    "},{"location":"api/datasets/synth/ConceptDriftStream/#notes","title":"Notes","text":"

    An optional way to estimate the width of the transition \\(w\\) is based on the angle \\(\\alpha\\), \\(w = 1/ tan(\\alpha)\\). Since width corresponds to the number of samples for the transition, the width is rounded to the nearest smaller integer. Notice that larger values of \\(\\alpha\\) result in smaller widths. For \\(\\alpha > 45.0\\), the width is smaller than 1 so values are rounded to 1 to avoid division by zero errors.

    "},{"location":"api/datasets/synth/Friedman/","title":"Friedman","text":"

    Friedman synthetic dataset.

    Each observation is composed of 10 features. Each feature value is sampled uniformly in [0, 1]. The target is defined by the following function:

    \\[y = 10 sin(\\pi x_0 x_1) + 20 (x_2 - 0.5)^2 + 10 x_3 + 5 x_4 + \\epsilon\\]

    In the last expression, \\(\\epsilon \\sim \\mathcal{N}(0, 1)\\), is the noise. Therefore, only the first 5 features are relevant.

    "},{"location":"api/datasets/synth/Friedman/#parameters","title":"Parameters","text":"
    • seed

      Type \u2192 int | None

      Default \u2192 None

      Random seed number used for reproducibility.

    "},{"location":"api/datasets/synth/Friedman/#attributes","title":"Attributes","text":"
    • desc

      Return the description from the docstring.

    "},{"location":"api/datasets/synth/Friedman/#examples","title":"Examples","text":"

    from river.datasets import synth\n\ndataset = synth.Friedman(seed=42)\n\nfor x, y in dataset.take(5):\n    print(list(x.values()), y)\n
    [0.63, 0.02, 0.27, 0.22, 0.73, 0.67, 0.89, 0.08, 0.42, 0.02] 7.66\n[0.02, 0.19, 0.64, 0.54, 0.22, 0.58, 0.80, 0.00, 0.80, 0.69] 8.33\n[0.34, 0.15, 0.95, 0.33, 0.09, 0.09, 0.84, 0.60, 0.80, 0.72] 7.04\n[0.37, 0.55, 0.82, 0.61, 0.86, 0.57, 0.70, 0.04, 0.22, 0.28] 18.16\n[0.07, 0.23, 0.10, 0.27, 0.63, 0.36, 0.37, 0.20, 0.26, 0.93] 8.90\n

    "},{"location":"api/datasets/synth/Friedman/#methods","title":"Methods","text":"take

    Iterate over the k samples.

    Parameters

    • k \u2014 'int'

    1. Friedman, J.H., 1991. Multivariate adaptive regression splines. The annals of statistics, pp.1-67. \u21a9

    "},{"location":"api/datasets/synth/FriedmanDrift/","title":"FriedmanDrift","text":"

    Friedman synthetic dataset with concept drifts.

    Each observation is composed of 10 features. Each feature value is sampled uniformly in [0, 1]. Only the first 5 features are relevant. The target is defined by different functions depending on the type of the drift.

    The three available modes of operation of the data generator are described in 1.

    "},{"location":"api/datasets/synth/FriedmanDrift/#parameters","title":"Parameters","text":"
    • drift_type

      Type \u2192 str

      Default \u2192 lea

      The variant of concept drift. - 'lea': Local Expanding Abrupt drift. The concept drift appears in two distinct regions of the instance space, while the remaining regions are left unaltered. There are three points of abrupt change in the training dataset. At every consecutive change the regions of drift are expanded. - 'gra': Global Recurring Abrupt drift. The concept drift appears over the whole instance space. There are two points of concept drift. At the second point of drift the old concept reoccurs. - 'gsg': Global and Slow Gradual drift. The concept drift affects all the instance space. However, the change is gradual and not abrupt. After each one of the two change points covered by this variant, and during a window of length transition_window, examples from both old and the new concepts are generated with equal probability. After the transition period, only the examples from the new concept are generated.

    • position

      Type \u2192 tuple[int, ...]

      Default \u2192 (50000, 100000, 150000)

      The amount of monitored instances after which each concept drift occurs. A tuple with at least two element must be passed, where each number is greater than the preceding one. If drift_type='lea', then the tuple must have three elements.

    • transition_window

      Type \u2192 int

      Default \u2192 10000

      The length of the transition window between two concepts. Only applicable when drift_type='gsg'. If set to zero, the drifts will be abrupt. Anytime transition_window > 0, it defines a window in which instances of the new concept are gradually introduced among the examples from the old concept. During this transition phase, both old and new concepts appear with equal probability.

    • seed

      Type \u2192 int | None

      Default \u2192 None

      Random seed number used for reproducibility.

    "},{"location":"api/datasets/synth/FriedmanDrift/#attributes","title":"Attributes","text":"
    • desc

      Return the description from the docstring.

    "},{"location":"api/datasets/synth/FriedmanDrift/#examples","title":"Examples","text":"

    from river.datasets import synth\n\ndataset = synth.FriedmanDrift(\n    drift_type='lea',\n    position=(1, 2, 3),\n    seed=42\n)\n\nfor x, y in dataset.take(5):\n    print(list(x.values()), y)\n
    [0.63, 0.02, 0.27, 0.22, 0.73, 0.67, 0.89, 0.08, 0.42, 0.02] 7.66\n[0.02, 0.19, 0.64, 0.54, 0.22, 0.58, 0.80, 0.00, 0.80, 0.69] 8.33\n[0.34, 0.15, 0.95, 0.33, 0.09, 0.09, 0.84, 0.60, 0.80, 0.72] 7.04\n[0.37, 0.55, 0.82, 0.61, 0.86, 0.57, 0.70, 0.04, 0.22, 0.28] 18.16\n[0.07, 0.23, 0.10, 0.27, 0.63, 0.36, 0.37, 0.20, 0.26, 0.93] -2.65\n

    dataset = synth.FriedmanDrift(\n    drift_type='gra',\n    position=(2, 3),\n    seed=42\n)\n\nfor x, y in dataset.take(5):\n    print(list(x.values()), y)\n
    [0.63, 0.02, 0.27, 0.22, 0.73, 0.67, 0.89, 0.08, 0.42, 0.02] 7.66\n[0.02, 0.19, 0.64, 0.54, 0.22, 0.58, 0.80, 0.00, 0.80, 0.69] 8.33\n[0.34, 0.15, 0.95, 0.33, 0.09, 0.09, 0.84, 0.60, 0.80, 0.72] 8.96\n[0.37, 0.55, 0.82, 0.61, 0.86, 0.57, 0.70, 0.04, 0.22, 0.28] 18.16\n[0.07, 0.23, 0.10, 0.27, 0.63, 0.36, 0.37, 0.20, 0.26, 0.93] 8.90\n

    dataset = synth.FriedmanDrift(\n    drift_type='gsg',\n    position=(1, 4),\n    transition_window=2,\n    seed=42\n)\n\nfor x, y in dataset.take(5):\n    print(list(x.values()), y)\n
    [0.63, 0.02, 0.27, 0.22, 0.73, 0.67, 0.89, 0.08, 0.42, 0.02] 7.66\n[0.02, 0.19, 0.64, 0.54, 0.22, 0.58, 0.80, 0.00, 0.80, 0.69] 8.33\n[0.34, 0.15, 0.95, 0.33, 0.09, 0.09, 0.84, 0.60, 0.80, 0.72] 8.92\n[0.37, 0.55, 0.82, 0.61, 0.86, 0.57, 0.70, 0.04, 0.22, 0.28] 17.32\n[0.07, 0.23, 0.10, 0.27, 0.63, 0.36, 0.37, 0.20, 0.26, 0.93] 6.05\n

    "},{"location":"api/datasets/synth/FriedmanDrift/#methods","title":"Methods","text":"take

    Iterate over the k samples.

    Parameters

    • k \u2014 'int'

    1. Ikonomovska, E., Gama, J. and D\u017eeroski, S., 2011. Learning model trees from evolving data streams. Data mining and knowledge discovery, 23(1), pp.128-168.\u00a0\u21a9

    "},{"location":"api/datasets/synth/Hyperplane/","title":"Hyperplane","text":"

    Hyperplane stream generator.

    Generates a problem of prediction class of a rotation hyperplane. It was used as testbed for CVFDT and VFDT in 1.

    A hyperplane in d-dimensional space is the set of points \\(x\\) that satisfy

    \\[\\sum^{d}_{i=1} w_i x_i = w_0 = \\sum^{d}_{i=1} w_i\\]

    where \\(x_i\\) is the i-th coordinate of \\(x\\).

    • Examples for which \\(\\sum^{d}_{i=1} w_i x_i > w_0\\), are labeled positive.

    • Examples for which \\(\\sum^{d}_{i=1} w_i x_i \\leq w_0\\), are labeled negative.

    Hyperplanes are useful for simulating time-changing concepts because we can change the orientation and position of the hyperplane in a smooth manner by changing the relative size of the weights. We introduce change to this dataset by adding drift to each weighted feature \\(w_i = w_i + d \\sigma\\), where \\(\\sigma\\) is the probability that the direction of change is reversed and \\(d\\) is the change applied to each example.

    "},{"location":"api/datasets/synth/Hyperplane/#parameters","title":"Parameters","text":"
    • seed

      Type \u2192 int | None

      Default \u2192 None

      Random seed for reproducibility.

    • n_features

      Type \u2192 int

      Default \u2192 10

      The number of attributes to generate. Higher than 2.

    • n_drift_features

      Type \u2192 int

      Default \u2192 2

      The number of attributes with drift. Higher than 2.

    • mag_change

      Type \u2192 float

      Default \u2192 0.0

      Magnitude of the change for every example. From 0.0 to 1.0.

    • noise_percentage

      Type \u2192 float

      Default \u2192 0.05

      Percentage of noise to add to the data. From 0.0 to 1.0.

    • sigma

      Type \u2192 float

      Default \u2192 0.1

      Probability that the direction of change is reversed. From 0.0 to 1.0.

    "},{"location":"api/datasets/synth/Hyperplane/#attributes","title":"Attributes","text":"
    • desc

      Return the description from the docstring.

    "},{"location":"api/datasets/synth/Hyperplane/#examples","title":"Examples","text":"

    from river.datasets import synth\n\ndataset = synth.Hyperplane(seed=42, n_features=2)\n\nfor x, y in dataset.take(5):\n    print(x, y)\n
    {0: 0.2750, 1: 0.2232} 0\n{0: 0.0869, 1: 0.4219} 1\n{0: 0.0265, 1: 0.1988} 0\n{0: 0.5892, 1: 0.8094} 0\n{0: 0.3402, 1: 0.1554} 0\n

    "},{"location":"api/datasets/synth/Hyperplane/#methods","title":"Methods","text":"take

    Iterate over the k samples.

    Parameters

    • k \u2014 'int'

    "},{"location":"api/datasets/synth/Hyperplane/#notes","title":"Notes","text":"

    The sample generation works as follows: The features are generated with the random number generator, initialized with the seed passed by the user. Then the classification function decides, as a function of the sum of the weighted features and the sum of the weights, whether the instance belongs to class 0 or class 1. The last step is to add noise and generate drift.

    1. G. Hulten, L. Spencer, and P. Domingos. Mining time-changing data streams. In KDD'01, pages 97-106, San Francisco, CA, 2001. ACM Press.\u00a0\u21a9

    "},{"location":"api/datasets/synth/LED/","title":"LED","text":"

    LED stream generator.

    This data source originates from the CART book 1. An implementation in C was donated to the UCI 2 machine learning repository by David Aha. The goal is to predict the digit displayed on a seven-segment LED display, where each attribute has a 10% chance of being inverted. It has an optimal Bayes classification rate of 74%. The particular configuration of the generator used for experiments (LED) produces 24 binary attributes, 17 of which are irrelevant.

    "},{"location":"api/datasets/synth/LED/#parameters","title":"Parameters","text":"
    • seed

      Type \u2192 int | None

      Default \u2192 None

      Random seed for reproducibility.

    • noise_percentage

      Type \u2192 float

      Default \u2192 0.0

      The probability that noise will happen in the generation. At each new sample generated, a random number is generated, and if it is equal or less than the noise_percentage, the led value will be switched

    • irrelevant_features

      Type \u2192 bool

      Default \u2192 False

      Adds 17 non-relevant attributes to the stream.

    "},{"location":"api/datasets/synth/LED/#attributes","title":"Attributes","text":"
    • desc

      Return the description from the docstring.

    "},{"location":"api/datasets/synth/LED/#examples","title":"Examples","text":"

    from river.datasets import synth\n\ndataset = synth.LED(seed = 112, noise_percentage = 0.28, irrelevant_features= False)\n\nfor x, y in dataset.take(5):\n    print(x, y)\n
    {0: 1, 1: 0, 2: 1, 3: 0, 4: 0, 5: 1, 6: 0} 7\n{0: 1, 1: 1, 2: 1, 3: 1, 4: 1, 5: 1, 6: 0} 8\n{0: 1, 1: 1, 2: 1, 3: 1, 4: 0, 5: 1, 6: 0} 9\n{0: 0, 1: 0, 2: 1, 3: 0, 4: 0, 5: 1, 6: 0} 1\n{0: 0, 1: 1, 2: 1, 3: 0, 4: 0, 5: 0, 6: 0} 1\n

    "},{"location":"api/datasets/synth/LED/#methods","title":"Methods","text":"take

    Iterate over the k samples.

    Parameters

    • k \u2014 'int'

    "},{"location":"api/datasets/synth/LED/#notes","title":"Notes","text":"

    An instance is generated based on the parameters passed. If has_noise is set then the total number of attributes will be 24, otherwise there will be 7 attributes.

    1. Leo Breiman, Jerome Friedman, R. Olshen, and Charles J. Stone. Classification and Regression Trees. Wadsworth and Brooks, Monterey, CA,1984.\u00a0\u21a9

    2. A. Asuncion and D. J. Newman. UCI Machine Learning Repository [http://www.ics.uci.edu/~mlearn/mlrepository.html]. University of California, Irvine, School of Information and Computer Sciences,2007.\u00a0\u21a9

    "},{"location":"api/datasets/synth/LEDDrift/","title":"LEDDrift","text":"

    LED stream generator with concept drift.

    This class is an extension of the LED generator whose purpose is to add concept drift to the stream.

    "},{"location":"api/datasets/synth/LEDDrift/#parameters","title":"Parameters","text":"
    • seed

      Type \u2192 int | None

      Default \u2192 None

      Random seed for reproducibility.

    • noise_percentage

      Type \u2192 float

      Default \u2192 0.0

      The probability that noise will happen in the generation. At each new sample generated, a random number is generated, and if it is equal or less than the noise_percentage, the led value will be switched

    • irrelevant_features

      Type \u2192 bool

      Default \u2192 False

      Adds 17 non-relevant attributes to the stream.

    • n_drift_features

      Type \u2192 int

      Default \u2192 0

      The number of attributes that have drift.

    "},{"location":"api/datasets/synth/LEDDrift/#attributes","title":"Attributes","text":"
    • desc

      Return the description from the docstring.

    "},{"location":"api/datasets/synth/LEDDrift/#examples","title":"Examples","text":"

    from river.datasets import synth\n\ndataset = synth.LEDDrift(seed = 112, noise_percentage = 0.28,\n                         irrelevant_features= True, n_drift_features=4)\n\nfor x, y in dataset.take(5):\n    print(list(x.values()), y)\n
    [1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1] 7\n[1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0] 6\n[0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1] 1\n[1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1] 6\n[1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0] 7\n

    "},{"location":"api/datasets/synth/LEDDrift/#methods","title":"Methods","text":"take

    Iterate over the k samples.

    Parameters

    • k \u2014 'int'

    "},{"location":"api/datasets/synth/LEDDrift/#notes","title":"Notes","text":"

    An instance is generated based on the parameters passed. If has_noise is set then the total number of attributes will be 24, otherwise there will be 7 attributes.

    "},{"location":"api/datasets/synth/Logical/","title":"Logical","text":"

    Logical functions stream generator.

    Make a toy dataset with three labels that represent the logical functions: OR, XOR, AND (functions of the 2D input).

    Data is generated in 'tiles' which contain the complete set of logical operations results. The tiles are repeated n_tiles times. Optionally, the generated data can be shuffled.

    "},{"location":"api/datasets/synth/Logical/#parameters","title":"Parameters","text":"
    • n_tiles

      Type \u2192 int

      Default \u2192 1

      Number of tiles to generate.

    • shuffle

      Type \u2192 bool

      Default \u2192 True

      If set, generated data will be shuffled.

    • seed

      Type \u2192 int | None

      Default \u2192 None

      Random seed for reproducibility.

    "},{"location":"api/datasets/synth/Logical/#attributes","title":"Attributes","text":"
    • desc

      Return the description from the docstring.

    "},{"location":"api/datasets/synth/Logical/#examples","title":"Examples","text":"

    from river.datasets import synth\n\ndataset = synth.Logical(n_tiles=2, shuffle=True, seed=42)\n\nfor x, y in dataset.take(5):\n    print(x, y)\n
    {'A': 1, 'B': 1} {'OR': 1, 'XOR': 0, 'AND': 1}\n{'A': 0, 'B': 0} {'OR': 0, 'XOR': 0, 'AND': 0}\n{'A': 1, 'B': 0} {'OR': 1, 'XOR': 1, 'AND': 0}\n{'A': 1, 'B': 1} {'OR': 1, 'XOR': 0, 'AND': 1}\n{'A': 1, 'B': 0} {'OR': 1, 'XOR': 1, 'AND': 0}\n

    "},{"location":"api/datasets/synth/Logical/#methods","title":"Methods","text":"take

    Iterate over the k samples.

    Parameters

    • k \u2014 'int'

    "},{"location":"api/datasets/synth/Mixed/","title":"Mixed","text":"

    Mixed data stream generator.

    This generator is an implementation of a data stream with abrupt concept drift and boolean noise-free examples as described in 1.

    It has four relevant attributes, two boolean attributes \\(v, w\\) and two numeric attributes \\(x, y\\) uniformly distributed from 0 to 1. The examples are labeled depending on the classification function chosen from below.

    • function 0: if \\(v\\) and \\(w\\) are true or \\(v\\) and \\(z\\) are true or \\(w\\) and \\(z\\) are true then 0 else 1, where \\(z\\) is \\(y < 0.5 + 0.3 sin(3 \\pi x)\\)

    • function 1: The opposite of function 0.

    Concept drift can be introduced by changing the classification function. This can be done manually or using ConceptDriftStream.

    "},{"location":"api/datasets/synth/Mixed/#parameters","title":"Parameters","text":"
    • classification_function

      Type \u2192 int

      Default \u2192 0

      Which of the two classification functions to use for the generation. Valid options are 0 or 1.

    • seed

      Type \u2192 int | None

      Default \u2192 None

      Random seed for reproducibility.

    • balance_classes

      Type \u2192 bool

      Default \u2192 False

      Whether to balance classes or not. If balanced, the class distribution will converge to a uniform distribution.

    "},{"location":"api/datasets/synth/Mixed/#attributes","title":"Attributes","text":"
    • desc

      Return the description from the docstring.

    "},{"location":"api/datasets/synth/Mixed/#examples","title":"Examples","text":"

    from river.datasets import synth\ndataset = synth.Mixed(seed = 42, classification_function=1, balance_classes = True)\nfor x, y in dataset.take(5):\n    print(x, y)\n
    {0: True, 1: False, 2: 0.2750, 3: 0.2232} 1\n{0: False, 1: False, 2: 0.2186, 3: 0.5053} 0\n{0: False, 1: True, 2: 0.8094, 3: 0.0064} 1\n{0: False, 1: False, 2: 0.1010, 3: 0.2779} 0\n{0: True, 1: False, 2: 0.37018, 3: 0.2095} 1\n

    "},{"location":"api/datasets/synth/Mixed/#methods","title":"Methods","text":"generate_drift

    Generate drift by switching the classification function.

    take

    Iterate over the k samples.

    Parameters

    • k \u2014 'int'

    "},{"location":"api/datasets/synth/Mixed/#notes","title":"Notes","text":"

    The sample generation works as follows: The two numeric attributes are generated with the random generator initialized with the seed passed by the user (optional). The boolean attributes are either 0 or 1 based on the comparison of the random number generator and 0.5, the classification function decides whether to classify the instance as class 0 or class 1. The next step is to verify if the classes should be balanced, and if so, balance the classes.

    The generated sample will have 4 relevant features and 1 label (it is a binary-classification task).

    1. Gama, Joao, et al. \"Learning with drift detection.\" Advances in artificial intelligence-SBIA 2004. Springer Berlin Heidelberg, 2004. 286-295\"\u00a0\u21a9

    "},{"location":"api/datasets/synth/Mv/","title":"Mv","text":"

    Mv artificial dataset.

    Artificial dataset composed of both nominal and numeric features, whose features present co-dependencies. Originally described in 1.

    The features are generated using the following expressions:

    • \\(x_1\\): uniformly distributed over [-5, 5].

    • \\(x_2\\): uniformly distributed over [-15, -10].

    • \\(x_3\\):

      • if \\(x_1 > 0\\), \\(x_3 \\leftarrow\\) 'green'

      • else \\(x_3 \\leftarrow\\) 'red' with probability \\(0.4\\) and \\(x_3 \\leftarrow\\) 'brown' with probability \\(0.6\\).

    • \\(x_4\\):

      • if \\(x_3 =\\) 'green', \\(x_4 \\leftarrow x_1 + 2 x_2\\)

      • else \\(x_4 = \\frac{x_1}{2}\\) with probability \\(0.3\\) and \\(x_4 = \\frac{x_2}{2}\\) with probability \\(0.7\\).

    • \\(x_5\\): uniformly distributed over [-1, 1].

    • \\(x_6 \\leftarrow x_4 \\times \\epsilon\\), where \\(\\epsilon\\) is uniformly distributed

    over [0, 5].

    • \\(x_7\\): 'yes' with probability \\(0.3\\), and 'no' with probability \\(0.7\\).

    • \\(x_8\\): 'normal' if \\(x_5 < 0.5\\) else 'large'.

    • \\(x_9\\): uniformly distributed over [100, 500].

    • \\(x_{10}\\): uniformly distributed integer over the interval [1000, 1200].

    The target value is generated using the following rules:

    • if \\(x_2 > 2\\), \\(y \\leftarrow 35 - 0.5 x_4\\)

    • else if \\(-2 \\le x_4 \\le 2\\), \\(y \\leftarrow 10 - 2 x_1\\)

    • else if \\(x_7 =\\) 'yes', \\(y \\leftarrow 3 - \\frac{x_1}{x_4}\\)

    • else if \\(x_8 =\\) 'normal', \\(y \\leftarrow x_6 + x_1\\)

    • else \\(y \\leftarrow \\frac{x_1}{2}\\).

    "},{"location":"api/datasets/synth/Mv/#parameters","title":"Parameters","text":"
    • seed

      Type \u2192 int | None

      Default \u2192 None

      Random seed number used for reproducibility.

    "},{"location":"api/datasets/synth/Mv/#attributes","title":"Attributes","text":"
    • desc

      Return the description from the docstring.

    "},{"location":"api/datasets/synth/Mv/#examples","title":"Examples","text":"

    from river.datasets import synth\n\ndataset = synth.Mv(seed=42)\n\nfor x, y in dataset.take(5):\n    print(list(x.values()), y)\n
    [1.39, -14.87, 'green', -28.35, -0.44, -31.64, 'no', 'normal', 370.67, 1178.43] -30.25\n[-4.13, -12.89, 'red', -2.06, 0.01, -0.27, 'yes', 'normal', 359.95, 1108.98] 1.00\n[-2.79, -12.05, 'brown', -1.39, 0.61, -4.87, 'no', 'large', 162.19, 1191.44] 15.59\n[-1.63, -14.53, 'red', -7.26, 0.20, -29.33, 'no', 'normal', 314.49, 1194.62] -30.96\n[-1.21, -12.23, 'brown', -6.11, 0.72, -17.66, 'no', 'large', 118.32, 1045.57] -0.60\n

    "},{"location":"api/datasets/synth/Mv/#methods","title":"Methods","text":"take

    Iterate over the k samples.

    Parameters

    • k \u2014 'int'

    1. Mv in Lu\u00eds Torgo regression datasets \u21a9

    "},{"location":"api/datasets/synth/Planes2D/","title":"Planes2D","text":"

    2D Planes synthetic dataset.

    This dataset is described in 1 and was adapted from 2. The features are generated using the following probabilities:

    \\[P(x_1 = -1) = P(x_1 = 1) = \\frac{1}{2}\\] \\[P(x_m = -1) = P(x_m = 0) = P(x_m = 1) = \\frac{1}{3}, m=2,\\ldots, 10\\]

    The target value is defined by the following rule:

    \\[\\text{if}~x_1 = 1, y \\leftarrow 3 + 3x_2 + 2x_3 + x_4 + \\epsilon\\] \\[\\text{if}~x_1 = -1, y \\leftarrow -3 + 3x_5 + 2x_6 + x_7 + \\epsilon\\]

    In the expressions, \\(\\epsilon \\sim \\mathcal{N}(0, 1)\\), is the noise.

    "},{"location":"api/datasets/synth/Planes2D/#parameters","title":"Parameters","text":"
    • seed

      Type \u2192 int | None

      Default \u2192 None

      Random seed number used for reproducibility.

    "},{"location":"api/datasets/synth/Planes2D/#attributes","title":"Attributes","text":"
    • desc

      Return the description from the docstring.

    "},{"location":"api/datasets/synth/Planes2D/#examples","title":"Examples","text":"

    from river.datasets import synth\n\ndataset = synth.Planes2D(seed=42)\n\nfor x, y in dataset.take(5):\n    print(list(x.values()), y)\n
    [-1, -1, 1, 0, -1, -1, -1, 1, -1, 1] -9.07\n[1, -1, -1, -1, -1, -1, 1, 1, -1, 1] -4.25\n[-1, 1, 1, 1, 1, 0, -1, 0, 1, 0] -0.95\n[-1, 1, 0, 0, 0, -1, -1, 0, -1, -1] -6.10\n[1, -1, 0, 0, 1, 0, -1, 1, 0, 1] 1.60\n

    "},{"location":"api/datasets/synth/Planes2D/#methods","title":"Methods","text":"take

    Iterate over the k samples.

    Parameters

    • k \u2014 'int'

    1. 2DPlanes in Lu\u00eds Torgo regression datasets \u21a9

    2. Breiman, L., Friedman, J., Stone, C.J. and Olshen, R.A., 1984. Classification and regression trees. CRC press.\u00a0\u21a9

    "},{"location":"api/datasets/synth/RandomRBF/","title":"RandomRBF","text":"

    Random Radial Basis Function generator.

    Produces a radial basis function stream. A number of centroids, having a random central position, a standard deviation, a class label and weight are generated. A new sample is created by choosing one of the centroids at random, taking into account their weights, and offsetting the attributes in a random direction from the centroid's center. The offset length is drawn from a Gaussian distribution.

    This process will create a normally distributed hypersphere of samples on the surrounds of each centroid.

    "},{"location":"api/datasets/synth/RandomRBF/#parameters","title":"Parameters","text":"
    • seed_model

      Type \u2192 int | None

      Default \u2192 None

      Model's random seed to generate centroids.

    • seed_sample

      Type \u2192 int | None

      Default \u2192 None

      Sample's random seed.

    • n_classes

      Type \u2192 int

      Default \u2192 2

      The number of class labels to generate.

    • n_features

      Type \u2192 int

      Default \u2192 10

      The number of numerical features to generate.

    • n_centroids

      Type \u2192 int

      Default \u2192 50

      The number of centroids to generate.

    "},{"location":"api/datasets/synth/RandomRBF/#attributes","title":"Attributes","text":"
    • desc

      Return the description from the docstring.

    "},{"location":"api/datasets/synth/RandomRBF/#examples","title":"Examples","text":"

    from river.datasets import synth\ndataset = synth.RandomRBF(seed_model=42, seed_sample=42,\n                          n_classes=4, n_features=4, n_centroids=20)\nfor x, y in dataset.take(5):\n    print(x, y)\n
    {0: 1.0989, 1: 0.3840, 2: 0.7759, 3: 0.6592} 2\n{0: 0.2366, 1: 1.3233, 2: 0.5691, 3: 0.2083} 0\n{0: 1.3540, 1: -0.3306, 2: 0.1683, 3: 0.8865} 0\n{0: 0.2585, 1: -0.2217, 2: 0.4739, 3: 0.6522} 0\n{0: 0.1295, 1: 0.5953, 2: 0.1774, 3: 0.6673} 1\n

    "},{"location":"api/datasets/synth/RandomRBF/#methods","title":"Methods","text":"take

    Iterate over the k samples.

    Parameters

    • k \u2014 'int'

    "},{"location":"api/datasets/synth/RandomRBFDrift/","title":"RandomRBFDrift","text":"

    Random Radial Basis Function generator with concept drift.

    This class is an extension from the RandomRBF generator. Concept drift can be introduced in instances of this class.

    The drift is created by adding a \"speed\" to certain centroids. As the samples are generated each of the moving centroids' centers is changed by an amount determined by its speed.

    "},{"location":"api/datasets/synth/RandomRBFDrift/#parameters","title":"Parameters","text":"
    • seed_model

      Type \u2192 int | None

      Default \u2192 None

      Model's random seed to generate centroids.

    • seed_sample

      Type \u2192 int | None

      Default \u2192 None

      Sample's random seed.

    • n_classes

      Type \u2192 int

      Default \u2192 2

      The number of class labels to generate.

    • n_features

      Type \u2192 int

      Default \u2192 10

      The number of numerical features to generate.

    • n_centroids

      Type \u2192 int

      Default \u2192 50

      The number of centroids to generate.

    • change_speed

      Type \u2192 float

      Default \u2192 0.0

      The concept drift speed.

    • n_drift_centroids

      Type \u2192 int

      Default \u2192 50

      The number of centroids that will drift.

    "},{"location":"api/datasets/synth/RandomRBFDrift/#attributes","title":"Attributes","text":"
    • desc

      Return the description from the docstring.

    "},{"location":"api/datasets/synth/RandomRBFDrift/#examples","title":"Examples","text":"

    from river.datasets import synth\ndataset = synth.RandomRBFDrift(seed_model=42, seed_sample=42,\n                               n_classes=4, n_features=4, n_centroids=20,\n                               change_speed=0.87, n_drift_centroids=10)\nfor x, y in dataset.take(5):\n    print(x, y)\n
    {0: 1.0989, 1: 0.3840, 2: 0.7759, 3: 0.6592} 2\n{0: 1.1496, 1: 1.9014, 2: 1.5393, 3: 0.3210} 0\n{0: 0.7146, 1: -0.2414, 2: 0.8933, 3: 1.6633} 0\n{0: 0.3797, 1: -0.1027, 2: 0.8717, 3: 1.1635} 0\n{0: 0.1295, 1: 0.5953, 2: 0.1774, 3: 0.6673} 1\n

    "},{"location":"api/datasets/synth/RandomRBFDrift/#methods","title":"Methods","text":"take

    Iterate over the k samples.

    Parameters

    • k \u2014 'int'

    "},{"location":"api/datasets/synth/RandomTree/","title":"RandomTree","text":"

    Random Tree generator.

    This generator is based on 1. The generator creates a random tree by splitting features at random and setting labels at its leaves.

    The tree structure is composed of node objects, which can be either inner nodes or leaf nodes. The choice comes as a function of the parameters passed to its initializer.

    Since the concepts are generated and classified according to a tree structure, in theory, it should favor decision tree learners.

    "},{"location":"api/datasets/synth/RandomTree/#parameters","title":"Parameters","text":"
    • seed_tree

      Type \u2192 int | None

      Default \u2192 None

      Seed for random generation of tree.

    • seed_sample

      Type \u2192 int | None

      Default \u2192 None

      Seed for random generation of instances.

    • n_classes

      Type \u2192 int

      Default \u2192 2

      The number of classes to generate.

    • n_num_features

      Type \u2192 int

      Default \u2192 5

      The number of numerical features to generate.

    • n_cat_features

      Type \u2192 int

      Default \u2192 5

      The number of categorical features to generate.

    • n_categories_per_feature

      Type \u2192 int

      Default \u2192 5

      The number of values to generate per categorical feature.

    • max_tree_depth

      Type \u2192 int

      Default \u2192 5

      The maximum depth of the tree concept.

    • first_leaf_level

      Type \u2192 int

      Default \u2192 3

      The first level of the tree above max_tree_depth that can have leaves.

    • fraction_leaves_per_level

      Type \u2192 float

      Default \u2192 0.15

      The fraction of leaves per level from first_leaf_level onwards.

    "},{"location":"api/datasets/synth/RandomTree/#attributes","title":"Attributes","text":"
    • desc

      Return the description from the docstring.

    "},{"location":"api/datasets/synth/RandomTree/#examples","title":"Examples","text":"

    from river.datasets import synth\n\ndataset = synth.RandomTree(seed_tree=42, seed_sample=42, n_classes=2,\n                           n_num_features=2, n_cat_features=2,\n                           n_categories_per_feature=2, max_tree_depth=6,\n                           first_leaf_level=3, fraction_leaves_per_level=0.15)\n\nfor x, y in dataset.take(5):\n    print(x, y)\n
    {'x_num_0': 0.6394, 'x_num_1': 0.0250, 'x_cat_0': 1, 'x_cat_1': 0} 0\n{'x_num_0': 0.2232, 'x_num_1': 0.7364, 'x_cat_0': 0, 'x_cat_1': 1} 1\n{'x_num_0': 0.0317, 'x_num_1': 0.0936, 'x_cat_0': 0, 'x_cat_1': 0} 0\n{'x_num_0': 0.5612, 'x_num_1': 0.7160, 'x_cat_0': 1, 'x_cat_1': 0} 0\n{'x_num_0': 0.4492, 'x_num_1': 0.2781, 'x_cat_0': 0, 'x_cat_1': 0} 0\n

    "},{"location":"api/datasets/synth/RandomTree/#methods","title":"Methods","text":"take

    Iterate over the k samples.

    Parameters

    • k \u2014 'int'

    1. Domingos, Pedro, and Geoff Hulten. \"Mining high-speed data streams.\" In Proceedings of the sixth ACM SIGKDD international conference on Knowledge discovery and data mining, pp. 71-80. 2000.\u00a0\u21a9

    "},{"location":"api/datasets/synth/SEA/","title":"SEA","text":"

    SEA synthetic dataset.

    Implementation of the data stream with abrupt drift described in 1. Each observation is composed of 3 features. Only the first two features are relevant. The target is binary, and is positive if the sum of the features exceeds a certain threshold. There are 4 thresholds to choose from. Concept drift can be introduced by switching the threshold anytime during the stream.

    • Variant 0: True if \\(att1 + att2 > 8\\)

    • Variant 1: True if \\(att1 + att2 > 9\\)

    • Variant 2: True if \\(att1 + att2 > 7\\)

    • Variant 3: True if \\(att1 + att2 > 9.5\\)

    "},{"location":"api/datasets/synth/SEA/#parameters","title":"Parameters","text":"
    • variant

      Default \u2192 0

      Determines the classification function to use. Possible choices are 0, 1, 2, 3.

    • noise

      Default \u2192 0.0

      Determines the amount of observations for which the target sign will be flipped.

    • seed

      Type \u2192 int | None

      Default \u2192 None

      Random seed number used for reproducibility.

    "},{"location":"api/datasets/synth/SEA/#attributes","title":"Attributes","text":"
    • desc

      Return the description from the docstring.

    "},{"location":"api/datasets/synth/SEA/#examples","title":"Examples","text":"

    from river.datasets import synth\n\ndataset = synth.SEA(variant=0, seed=42)\n\nfor x, y in dataset.take(5):\n    print(x, y)\n
    {0: 6.39426, 1: 0.25010, 2: 2.75029} False\n{0: 2.23210, 1: 7.36471, 2: 6.76699} True\n{0: 8.92179, 1: 0.86938, 2: 4.21921} True\n{0: 0.29797, 1: 2.18637, 2: 5.05355} False\n{0: 0.26535, 1: 1.98837, 2: 6.49884} False\n

    "},{"location":"api/datasets/synth/SEA/#methods","title":"Methods","text":"take

    Iterate over the k samples.

    Parameters

    • k \u2014 'int'

    1. A Streaming Ensemble Algorithm (SEA) for Large-Scale Classification \u21a9

    "},{"location":"api/datasets/synth/STAGGER/","title":"STAGGER","text":"

    STAGGER concepts stream generator.

    This generator is an implementation of the dara stream with abrupt concept drift, as described in 1.

    The STAGGER concepts are boolean functions f with three features describing objects: size (small, medium and large), shape (circle, square and triangle) and colour (red, blue and green).

    f options:

    1. True if the size is small and the color is red.

    2. True if the color is green or the shape is a circle.

    3. True if the size is medium or large

    Concept drift can be introduced by changing the classification function. This can be done manually or using datasets.synth.ConceptDriftStream.

    One important feature is the possibility to balance classes, which means the class distribution will tend to a uniform one.

    "},{"location":"api/datasets/synth/STAGGER/#parameters","title":"Parameters","text":"
    • classification_function

      Type \u2192 int

      Default \u2192 0

      Classification functions to use. From 0 to 2.

    • seed

      Type \u2192 int | None

      Default \u2192 None

      Random seed for reproducibility.

    • balance_classes

      Type \u2192 bool

      Default \u2192 False

      Whether to balance classes or not. If balanced, the class distribution will converge to an uniform distribution.

    "},{"location":"api/datasets/synth/STAGGER/#attributes","title":"Attributes","text":"
    • desc

      Return the description from the docstring.

    "},{"location":"api/datasets/synth/STAGGER/#examples","title":"Examples","text":"

    from river.datasets import synth\n\ndataset = synth.STAGGER(classification_function = 2, seed = 112,\n                     balance_classes = False)\n\nfor x, y in dataset.take(5):\n    print(x, y)\n
    {'size': 1, 'color': 2, 'shape': 2} 1\n{'size': 2, 'color': 1, 'shape': 2} 1\n{'size': 1, 'color': 1, 'shape': 2} 1\n{'size': 0, 'color': 1, 'shape': 0} 0\n{'size': 2, 'color': 1, 'shape': 0} 1\n

    "},{"location":"api/datasets/synth/STAGGER/#methods","title":"Methods","text":"generate_drift

    Generate drift by switching the classification function at random.

    take

    Iterate over the k samples.

    Parameters

    • k \u2014 'int'

    "},{"location":"api/datasets/synth/STAGGER/#notes","title":"Notes","text":"

    The sample generation works as follows: The 3 attributes are generated with the random number generator. The classification function defines whether to classify the instance as class 0 or class 1. Finally, data is balanced, if this option is set by the user.

    1. Schlimmer, J. C., & Granger, R. H. (1986). Incremental learning from noisy data. Machine learning, 1(3), 317-354.\u00a0\u21a9

    "},{"location":"api/datasets/synth/Sine/","title":"Sine","text":"

    Sine generator.

    This generator is an implementation of the dara stream with abrupt concept drift, as described in Gama, Joao, et al. 1.

    It generates up to 4 relevant numerical features, that vary from 0 to 1, where only 2 of them are relevant to the classification task and the other 2 are optionally added by as noise. A classification function is chosen among four options:

    1. SINE1. Abrupt concept drift, noise-free examples. It has two relevant attributes. Each attributes has values uniformly distributed in [0, 1]. In the first context all points below the curve \\(y = sin(x)\\) are classified as positive.

    2. Reversed SINE1. The reversed classification of SINE1.

    3. SINE2. The same two relevant attributes. The classification function is \\(y < 0.5 + 0.3 sin(3 \\pi x)\\).

    4. Reversed SINE2. The reversed classification of SINE2.

    Concept drift can be introduced by changing the classification function. This can be done manually or using ConceptDriftStream.

    Two important features are the possibility to balance classes, which means the class distribution will tend to a uniform one, and the possibility to add noise, which will, add two non relevant attributes.

    "},{"location":"api/datasets/synth/Sine/#parameters","title":"Parameters","text":"
    • classification_function

      Type \u2192 int

      Default \u2192 0

      Classification functions to use. From 0 to 3.

    • seed

      Type \u2192 int | None

      Default \u2192 None

      Random seed for reproducibility.

    • balance_classes

      Type \u2192 bool

      Default \u2192 False

      Whether to balance classes or not. If balanced, the class distribution will converge to an uniform distribution.

    • has_noise

      Type \u2192 bool

      Default \u2192 False

      Adds 2 non relevant features to the stream.

    "},{"location":"api/datasets/synth/Sine/#attributes","title":"Attributes","text":"
    • desc

      Return the description from the docstring.

    "},{"location":"api/datasets/synth/Sine/#examples","title":"Examples","text":"

    from river.datasets import synth\n\ndataset = synth.Sine(classification_function = 2, seed = 112,\n                     balance_classes = False, has_noise = True)\n\nfor x, y in dataset.take(5):\n    print(x, y)\n
    {0: 0.4812, 1: 0.6660, 2: 0.6198, 3: 0.6994} 1\n{0: 0.9022, 1: 0.7518, 2: 0.1625, 3: 0.2209} 0\n{0: 0.4547, 1: 0.3901, 2: 0.9629, 3: 0.7287} 0\n{0: 0.4683, 1: 0.3515, 2: 0.2273, 3: 0.6027} 0\n{0: 0.9238, 1: 0.1673, 2: 0.4522, 3: 0.3447} 0\n

    "},{"location":"api/datasets/synth/Sine/#methods","title":"Methods","text":"generate_drift

    Generate drift by switching the classification function at random.

    take

    Iterate over the k samples.

    Parameters

    • k \u2014 'int'

    "},{"location":"api/datasets/synth/Sine/#notes","title":"Notes","text":"

    The sample generation works as follows: The two attributes are generated with the random number generator. The classification function defines whether to classify the instance as class 0 or class 1. Finally, data is balanced and noise is added, if these options are set by the user.

    The generated sample will have 2 relevant features, and an additional two noise features if has_noise is set.

    1. Gama, Joao, et al.'s 'Learning with drift detection.' Advances in artificial intelligence-SBIA 2004. Springer Berlin Heidelberg, 2004. 286-295.\"\u00a0\u21a9

    "},{"location":"api/datasets/synth/Waveform/","title":"Waveform","text":"

    Waveform stream generator.

    Generates samples with 21 numeric features and 3 classes, based on a random differentiation of some base waveforms. Supports noise addition, in this case the samples will have 40 features.

    "},{"location":"api/datasets/synth/Waveform/#parameters","title":"Parameters","text":"
    • seed

      Type \u2192 int | None

      Default \u2192 None

      Random seed for reproducibility.

    • has_noise

      Type \u2192 bool

      Default \u2192 False

      Adds 19 unrelated features to the stream.

    "},{"location":"api/datasets/synth/Waveform/#attributes","title":"Attributes","text":"
    • desc

      Return the description from the docstring.

    "},{"location":"api/datasets/synth/Waveform/#examples","title":"Examples","text":"

    from river.datasets import synth\n\ndataset = synth.Waveform(seed=42, has_noise=True)\n\nfor x, y in dataset:\n    break\n\nx\n
    {0: -0.0397, 1: -0.7484, 2: 0.2974, 3: 0.3574, 4: -0.0735, 5: -0.3647, 6: 1.5631,     7: 2.5291, 8: 4.1599, 9: 4.9587, 10: 4.52587, 11: 4.0097, 12: 3.6705, 13: 1.7033,     14: 1.4898, 15: 1.9743, 16: 0.0898, 17: 2.319, 18: 0.2552, 19: -0.4775, 20: -0.71339,     21: 0.3770, 22: 0.3671, 23: 1.6579, 24: 0.7828, 25: 0.5855, 26: -0.5807, 27: 0.7112,     28: -0.0271, 29: 0.2968, 30: -0.4997, 31: 0.1302, 32: 0.3578, 33: -0.1900, 34: -0.3771,     35: 1.3560, 36: 0.7124, 37: -0.6245, 38: 0.1346, 39: 0.3550}\n

    y\n
    2\n

    "},{"location":"api/datasets/synth/Waveform/#methods","title":"Methods","text":"take

    Iterate over the k samples.

    Parameters

    • k \u2014 'int'

    "},{"location":"api/datasets/synth/Waveform/#notes","title":"Notes","text":"

    An instance is generated based on the parameters passed. The generator will randomly choose one of the hard coded waveforms, as well as random multipliers. For each feature, the actual value generated will be a a combination of the hard coded functions, with the multipliers and a random value.

    If noise is added then the features 21 to 40 will be replaced with a random normal value.

    "},{"location":"api/drift/ADWIN/","title":"ADWIN","text":"

    Adaptive Windowing method for concept drift detection.

    ADWIN (ADaptive WINdowing) is a popular drift detection method with mathematical guarantees. ADWIN efficiently keeps a variable-length window of recent items; such that it holds that there has no been change in the data distribution. This window is further divided into two sub-windows \\((W_0, W_1)\\) used to determine if a change has happened. ADWIN compares the average of \\(W_0\\) and \\(W_1\\) to confirm that they correspond to the same distribution. Concept drift is detected if the distribution equality no longer holds. Upon detecting a drift, \\(W_0\\) is replaced by \\(W_1\\) and a new \\(W_1\\) is initialized. ADWIN uses a significance value \\(\\delta=\\in(0,1)\\) to determine if the two sub-windows correspond to the same distribution.

    "},{"location":"api/drift/ADWIN/#parameters","title":"Parameters","text":"
    • delta

      Default \u2192 0.002

      Significance value.

    • clock

      Default \u2192 32

      How often ADWIN should check for change. 1 means every new data point, default is 32. Higher values speed up processing, but may also lead to increased delay in change detection.

    • max_buckets

      Default \u2192 5

      The maximum number of buckets of each size that ADWIN should keep before merging buckets (default is 5).

    • min_window_length

      Default \u2192 5

      The minimum length of each subwindow (default is 5). Lower values may decrease delay in change detection but may also lead to more false positives.

    • grace_period

      Default \u2192 10

      ADWIN does not perform any change detection until at least this many data points have arrived (default is 10).

    "},{"location":"api/drift/ADWIN/#attributes","title":"Attributes","text":"
    • drift_detected

      Whether or not a drift is detected following the last update.

    • estimation

      Estimate of mean value in the window.

    • n_detections

    • total

    • variance

    • width

      Window size

    "},{"location":"api/drift/ADWIN/#examples","title":"Examples","text":"

    import random\nfrom river import drift\n\nrng = random.Random(12345)\nadwin = drift.ADWIN()\n\ndata_stream = rng.choices([0, 1], k=1000) + rng.choices(range(4, 8), k=1000)\n\nfor i, val in enumerate(data_stream):\n    _ = adwin.update(val)\n    if adwin.drift_detected:\n        print(f\"Change detected at index {i}, input value: {val}\")\n
    Change detected at index 1023, input value: 4\n

    "},{"location":"api/drift/ADWIN/#methods","title":"Methods","text":"update

    Update the change detector with a single data point.

    Apart from adding the element value to the window, by inserting it in the correct bucket, it will also update the relevant statistics, in this case the total sum of all values, the window width and the total variance.

    Parameters

    • x \u2014 'int | float'

    Returns

    DriftDetector: self

    1. Albert Bifet and Ricard Gavalda. \"Learning from time-changing data with adaptive windowing.\" In Proceedings of the 2007 SIAM international conference on data mining, pp. 443-448. Society for Industrial and Applied Mathematics, 2007.\u00a0\u21a9

    "},{"location":"api/drift/DriftRetrainingClassifier/","title":"DriftRetrainingClassifier","text":"

    Drift retraining classifier.

    This classifier is a wrapper for any classifier. It monitors the incoming data for concept drifts and warnings in the model's accurary. In case a warning is detected, a background model starts to train. If a drift is detected, the model will be replaced by the background model, and the background model will be reset.

    "},{"location":"api/drift/DriftRetrainingClassifier/#parameters","title":"Parameters","text":"
    • model

      Type \u2192 base.Classifier

      The classifier and background classifier class.

    • drift_detector

      Type \u2192 base.DriftAndWarningDetector | base.BinaryDriftAndWarningDetector | None

      Default \u2192 None

      Algorithm to track warnings and concept drifts. Attention! If the parameter train_in_background is True, the drift_detector must have a warning tracker.

    • train_in_background

      Type \u2192 bool

      Default \u2192 True

      Parameter to determine if a background model will be used.

    "},{"location":"api/drift/DriftRetrainingClassifier/#examples","title":"Examples","text":"

    from river import datasets\nfrom river import evaluate\nfrom river import drift\nfrom river import metrics\nfrom river import tree\n\ndataset = datasets.Elec2().take(3000)\n\nmodel = drift.DriftRetrainingClassifier(\n    model=tree.HoeffdingTreeClassifier(),\n    drift_detector=drift.binary.DDM()\n)\n\nmetric = metrics.Accuracy()\n\nevaluate.progressive_val_score(dataset, model, metric)\n
    Accuracy: 86.46%\n

    "},{"location":"api/drift/DriftRetrainingClassifier/#methods","title":"Methods","text":"learn_one

    Update the model with a set of features x and a label y.

    Parameters

    • x
    • y
    • kwargs

    Returns

    self

    predict_one

    Predict the label of a set of features x.

    Parameters

    • x \u2014 'dict'
    • kwargs

    Returns

    base.typing.ClfTarget | None: The predicted label.

    predict_proba_one

    Predict the probability of each label for a dictionary of features x.

    Parameters

    • x
    • kwargs

    Returns

    A dictionary that associates a probability which each label.

    "},{"location":"api/drift/DummyDriftDetector/","title":"DummyDriftDetector","text":"

    Baseline drift detector that generates pseudo drift detection signals.

    There are two approaches1:

    • fixed where the drift signal is generated every t_0 samples.

    • random corresponds to a pseudo-random drift detection strategy.

    "},{"location":"api/drift/DummyDriftDetector/#parameters","title":"Parameters","text":"
    • trigger_method

      Type \u2192 str

      Default \u2192 fixed

      The trigger method to use. * fixed * random

    • t_0

      Type \u2192 int

      Default \u2192 300

      Reference point to define triggers.

    • w

      Type \u2192 int

      Default \u2192 0

      Auxiliary parameter whose purpose is twofold: - if trigger_method=\"fixed\", the periodic drift signals will only start after an initial warm-up period randomly defined between [0, w]. Useful to avoid that all ensemble members are reset at the same time when periodic triggers are used as the adaptation strategy. - if trigger_method=\"random\", w defines the probability bounds of triggering a drift. The chance of triggering a drift is \\(0.5\\) after observing t_0 instances and becomes \\(1\\) after monitoring t_0 + w / 2 instances. A sigmoid function is used to produce values between [0, 1] that are used as the reset probabilities.

    • dynamic_cloning

      Type \u2192 bool

      Default \u2192 False

      Whether to change the seed and w values each time clone() is called.

    • seed

      Type \u2192 int | None

      Default \u2192 None

      Random seed for reproducibility.

    "},{"location":"api/drift/DummyDriftDetector/#attributes","title":"Attributes","text":"
    • drift_detected

      Whether or not a drift is detected following the last update.

    "},{"location":"api/drift/DummyDriftDetector/#examples","title":"Examples","text":"
    import random\nfrom river import drift\n\nrng = random.Random(42)\n

    The observed values will not affect the periodic triggers.

    data = [rng.gauss(0, 1) for _ in range(1000)]\n

    Let's start with the fixed drift signals:

    ptrigger = DummyDriftDetector(t_0=500, seed=42)\nfor i, v in enumerate(data):\n    _ = ptrigger.update(v)\n    if ptrigger.drift_detected:\n        print(f\"Drift detected at instance {i}.\")\n
    Drift detected at instance 499.\nDrift detected at instance 999.\n

    Now, the random drift signals:

    rtrigger = DummyDriftDetector(\n    trigger_method=\"random\",\n    t_0=500,\n    w=100,\n    dynamic_cloning=True,\n    seed=42\n)\nfor i, v in enumerate(data):\n    _ = rtrigger.update(v)\n    if rtrigger.drift_detected:\n        print(f\"Drift detected at instance {i}.\")\n
    Drift detected at instance 368.\nDrift detected at instance 817.\n

    Remember to set a w > 0 value if random triggers are used:

    try:\n    DummyDriftDetector(trigger_method=\"random\")\nexcept ValueError as ve:\n    print(ve)\n
    The 'w' value must be greater than zero when 'trigger_method' is 'random'.\n

    Since we set dynamic_cloning to True, a clone of the periodic trigger will have its internal paramenters changed:

    rtrigger = rtrigger.clone()\nfor i, v in enumerate(data):\n    _ = rtrigger.update(v)\n    if rtrigger.drift_detected:\n        print(f\"Drift detected at instance {i}.\")\n
    Drift detected at instance 429.\nDrift detected at instance 728.\n

    "},{"location":"api/drift/DummyDriftDetector/#methods","title":"Methods","text":"update

    Update the detector with a single data point.

    Parameters

    • x \u2014 'int | float'

    Returns

    DriftDetector: self

    "},{"location":"api/drift/DummyDriftDetector/#notes","title":"Notes","text":"

    When used in ensembles, a naive implementation of periodic drift signals would make all ensemble members reset at the same time. To avoid that, the dynamic_cloning parameter can be set to True. In this case, every time the clone method of this detector is called in an ensemble a new seed is defined. If dynamic_cloning=True and trigger_method=\"fixed\", a new w between [0, t_0] will also be created for the new cloned instance.

    1. Heitor Gomes, Jacob Montiel, Saulo Martiello Mastelini, Bernhard Pfahringer, and Albert Bifet. On Ensemble Techniques for Data Stream Regression. IJCNN'20. International Joint Conference on Neural Networks. 2020.\u00a0\u21a9

    "},{"location":"api/drift/KSWIN/","title":"KSWIN","text":"

    Kolmogorov-Smirnov Windowing method for concept drift detection.

    "},{"location":"api/drift/KSWIN/#parameters","title":"Parameters","text":"
    • alpha

      Type \u2192 float

      Default \u2192 0.005

      Probability for the test statistic of the Kolmogorov-Smirnov-Test. The alpha parameter is very sensitive, therefore should be set below 0.01.

    • window_size

      Type \u2192 int

      Default \u2192 100

      Size of the sliding window.

    • stat_size

      Type \u2192 int

      Default \u2192 30

      Size of the statistic window.

    • seed

      Type \u2192 int | None

      Default \u2192 None

      Random seed for reproducibility.

    • window

      Type \u2192 typing.Iterable | None

      Default \u2192 None

      Already collected data to avoid cold start.

    "},{"location":"api/drift/KSWIN/#attributes","title":"Attributes","text":"
    • drift_detected

      Whether or not a drift is detected following the last update.

    "},{"location":"api/drift/KSWIN/#examples","title":"Examples","text":"

    import random\nfrom river import drift\n\nrng = random.Random(12345)\nkswin = drift.KSWIN(alpha=0.0001, seed=42)\n\ndata_stream = rng.choices([0, 1], k=1000) + rng.choices(range(4, 8), k=1000)\n\nfor i, val in enumerate(data_stream):\n    _ = kswin.update(val)\n    if kswin.drift_detected:\n        print(f\"Change detected at index {i}, input value: {val}\")\n
    Change detected at index 1016, input value: 6\n

    "},{"location":"api/drift/KSWIN/#methods","title":"Methods","text":"update

    Update the change detector with a single data point.

    Adds an element on top of the sliding window and removes the oldest one from the window. Afterwards, the KS-test is performed.

    Parameters

    • x \u2014 'int | float'

    Returns

    DriftDetector: self

    "},{"location":"api/drift/KSWIN/#notes","title":"Notes","text":"

    KSWIN (Kolmogorov-Smirnov Windowing) is a concept change detection method based on the Kolmogorov-Smirnov (KS) statistical test. KS-test is a statistical test with no assumption of underlying data distribution. KSWIN can monitor data or performance distributions. Note that the detector accepts one dimensional input as array.

    KSWIN maintains a sliding window \\(\\Psi\\) of fixed size \\(n\\) (window_size). The last \\(r\\) (stat_size) samples of \\(\\Psi\\) are assumed to represent the last concept considered as \\(R\\). From the first \\(n-r\\) samples of \\(\\Psi\\), \\(r\\) samples are uniformly drawn, representing an approximated last concept \\(W\\).

    The KS-test is performed on the windows \\(R\\) and \\(W\\) of the same size. KS -test compares the distance of the empirical cumulative data distribution \\(dist(R,W)\\).

    A concept drift is detected by KSWIN if:

    \\[ dist(R,W) > \\sqrt{-\\frac{ln\\alpha}{r}} \\]

    The difference in empirical data distributions between the windows \\(R\\) and \\(W\\) is too large since \\(R\\) and \\(W\\) come from the same distribution.

    1. Christoph Raab, Moritz Heusinger, Frank-Michael Schleif, Reactive Soft Prototype Computing for Concept Drift Streams, Neurocomputing, 2020,\u00a0\u21a9

    "},{"location":"api/drift/PageHinkley/","title":"PageHinkley","text":"

    Page-Hinkley method for concept drift detection.

    This change detection method works by computing the observed values and their mean up to the current moment. Page-Hinkley does not signal warning zones, only change detections.

    This detector implements the CUSUM control chart for detecting changes. This implementation also supports the two-sided Page-Hinkley test to detect increasing and decreasing changes in the mean of the input values.

    "},{"location":"api/drift/PageHinkley/#parameters","title":"Parameters","text":"
    • min_instances

      Type \u2192 int

      Default \u2192 30

      The minimum number of instances before detecting change.

    • delta

      Type \u2192 float

      Default \u2192 0.005

      The delta factor for the Page-Hinkley test.

    • threshold

      Type \u2192 float

      Default \u2192 50.0

      The change detection threshold (lambda).

    • alpha

      Type \u2192 float

      Default \u2192 0.9999

      The forgetting factor, used to weight the observed value and the mean.

    • mode

      Type \u2192 str

      Default \u2192 both

      Whether to consider increases (\"up\"), decreases (\"down\") or both (\"both\") when monitoring the fading mean.

    "},{"location":"api/drift/PageHinkley/#attributes","title":"Attributes","text":"
    • drift_detected

      Whether or not a drift is detected following the last update.

    "},{"location":"api/drift/PageHinkley/#examples","title":"Examples","text":"

    import random\nfrom river import drift\n\nrng = random.Random(12345)\nph = drift.PageHinkley()\n\ndata_stream = rng.choices([0, 1], k=1000) + rng.choices(range(4, 8), k=1000)\n\nfor i, val in enumerate(data_stream):\n    _ = ph.update(val)\n    if ph.drift_detected:\n        print(f\"Change detected at index {i}, input value: {val}\")\n
    Change detected at index 1006, input value: 5\n

    "},{"location":"api/drift/PageHinkley/#methods","title":"Methods","text":"update

    Update the detector with a single data point.

    Parameters

    • x \u2014 'int | float'

    Returns

    DriftDetector: self

    1. E. S. Page. 1954. Continuous Inspection Schemes. Biometrika 41, 1/2 (1954), 100-115.\u00a0\u21a9

    2. Sebasti\u00e3o, R., & Fernandes, J. M. (2017, June). Supporting the Page-Hinkley test with empirical mode decomposition for change detection. In International Symposium on Methodologies for Intelligent Systems (pp. 492-498). Springer, Cham.\u00a0\u21a9

    "},{"location":"api/drift/binary/DDM/","title":"DDM","text":"

    Drift Detection Method.

    DDM (Drift Detection Method) is a concept change detection method based on the PAC learning model premise, that the learner's error rate will decrease as the number of analysed samples increase, as long as the data distribution is stationary.

    If the algorithm detects an increase in the error rate, that surpasses a calculated threshold, either change is detected or the algorithm will warn the user that change may occur in the near future, which is called the warning zone.

    The detection threshold is calculated in function of two statistics, obtained when \\((p_i + s_i)\\) is minimum:

    • \\(p_{min}\\): The minimum recorded error rate.

    • \\(s_{min}\\): The minimum recorded standard deviation.

    At instant \\(i\\), the detection algorithm uses:

    • \\(p_i\\): The error rate at instant \\(i\\).

    • \\(s_i\\): The standard deviation at instant \\(i\\).

    The conditions for entering the warning zone and detecting change are as follows [see implementation note below]:

    • if \\(p_i + s_i \\geq p_{min} + w_l * s_{min}\\) -> Warning zone

    • if \\(p_i + s_i \\geq p_{min} + d_l * s_{min}\\) -> Change detected

    In the above expressions, \\(w_l\\) and \\(d_l\\) represent, respectively, the warning and drift thresholds.

    Input: x is an entry in a stream of bits, where 1 indicates error/failure and 0 represents correct/normal values.

    For example, if a classifier's prediction \\(y'\\) is right or wrong w.r.t. the true target label \\(y\\):

    • 0: Correct, \\(y=y'\\)

    • 1: Error, \\(y \\neq y'\\)

    "},{"location":"api/drift/binary/DDM/#parameters","title":"Parameters","text":"
    • warm_start

      Type \u2192 int

      Default \u2192 30

      The minimum required number of analyzed samples so change can be detected. Warm start parameter for the drift detector.

    • warning_threshold

      Type \u2192 float

      Default \u2192 2.0

      Threshold to decide if the detector is in a warning zone. The default value gives 95\\% of confidence level to the warning assessment.

    • drift_threshold

      Type \u2192 float

      Default \u2192 3.0

      Threshold to decide if a drift was detected. The default value gives a 99\\% of confidence level to the drift assessment.

    "},{"location":"api/drift/binary/DDM/#attributes","title":"Attributes","text":"
    • drift_detected

      Whether or not a drift is detected following the last update.

    • warning_detected

      Whether or not a drift is detected following the last update.

    "},{"location":"api/drift/binary/DDM/#examples","title":"Examples","text":"

    import random\nfrom river import drift\n\nrng = random.Random(42)\nddm = drift.binary.DDM()\n\ndata_stream = rng.choices([0, 1], k=1000)\ndata_stream = data_stream + rng.choices([0, 1], k=1000, weights=[0.3, 0.7])\n\nprint_warning = True\nfor i, x in enumerate(data_stream):\n    _ = ddm.update(x)\n    if ddm.warning_detected and print_warning:\n        print(f\"Warning detected at index {i}\")\n        print_warning = False\n    if ddm.drift_detected:\n        print(f\"Change detected at index {i}\")\n        print_warning = True\n
    Warning detected at index 1084\nChange detected at index 1334\nWarning detected at index 1492\n

    "},{"location":"api/drift/binary/DDM/#methods","title":"Methods","text":"update

    Update the detector with a single boolean input.

    Parameters

    • x \u2014 'bool'

    Returns

    BinaryDriftDetector: self

    1. Jo\u00e3o Gama, Pedro Medas, Gladys Castillo, Pedro Pereira Rodrigues: Learning with Drift Detection. SBIA 2004: 286-295\u00a0\u21a9

    "},{"location":"api/drift/binary/EDDM/","title":"EDDM","text":"

    Early Drift Detection Method.

    EDDM (Early Drift Detection Method) aims to improve the detection rate of gradual concept drift in DDM, while keeping a good performance against abrupt concept drift.

    This method works by keeping track of the average distance between two errors instead of only the error rate. For this, it is necessary to keep track of the running average distance and the running standard deviation, as well as the maximum distance and the maximum standard deviation.

    The algorithm works similarly to the DDM algorithm, by keeping track of statistics only. It works with the running average distance (\\(p_i'\\)) and the running standard deviation (\\(s_i'\\)), as well as \\(p'_{max}\\) and \\(s'_{max}\\), which are the values of \\(p_i'\\) and \\(s_i'\\) when \\((p_i' + 2 * s_i')\\) reaches its maximum.

    Like DDM, there are two threshold values that define the borderline between no change, warning zone, and drift detected. These are as follows:

    • if \\((p_i' + 2 * s_i') / (p'_{max} + 2 * s'_{max}) < \\alpha\\) -> Warning zone

    • if \\((p_i' + 2 * s_i') / (p'_{max} + 2 * s'_{max}) < \\beta\\) -> Change detected

    \\(\\alpha\\) and \\(\\beta\\) are set to 0.95 and 0.9, respectively.

    Input: x is an entry in a stream of bits, where 1 indicates error/failure and 0 represents correct/normal values.

    For example, if a classifier's prediction \\(y'\\) is right or wrong w.r.t. the true target label \\(y\\):

    • 0: Correct, \\(y=y'\\)

    • 1: Error, \\(y \\\\neq y'\\)

    "},{"location":"api/drift/binary/EDDM/#parameters","title":"Parameters","text":"
    • warm_start

      Type \u2192 int

      Default \u2192 30

      The minimum required number of monitored errors/failures so change can be detected. Warm start parameter for the drift detector.

    • alpha

      Type \u2192 float

      Default \u2192 0.95

      Threshold for triggering a warning. Must be between 0 and 1. The smaller the value, the more conservative the detector becomes.

    • beta

      Type \u2192 float

      Default \u2192 0.9

      Threshold for triggering a drift. Must be between 0 and 1. The smaller the value, the more conservative the detector becomes.

    "},{"location":"api/drift/binary/EDDM/#attributes","title":"Attributes","text":"
    • drift_detected

      Whether or not a drift is detected following the last update.

    • warning_detected

      Whether or not a drift is detected following the last update.

    "},{"location":"api/drift/binary/EDDM/#examples","title":"Examples","text":"

    import random\nfrom river import drift\n\nrng = random.Random(42)\neddm = drift.binary.EDDM(alpha=0.8, beta=0.75)\n\ndata_stream = rng.choices([0, 1], k=1000)\ndata_stream = data_stream + rng.choices([0, 1], k=1000, weights=[0.3, 0.7])\n\nprint_warning = True\nfor i, x in enumerate(data_stream):\n    _ = eddm.update(x)\n    if eddm.warning_detected and print_warning:\n        print(f\"Warning detected at index {i}\")\n        print_warning = False\n    if eddm.drift_detected:\n        print(f\"Change detected at index {i}\")\n        print_warning = True\n
    Warning detected at index 1059\nChange detected at index 1278\n

    "},{"location":"api/drift/binary/EDDM/#methods","title":"Methods","text":"update

    Update the change detector with a single data point.

    Parameters

    • x \u2014 'bool'

    Returns

    BinaryDriftDetector: self

    1. Early Drift Detection Method. Manuel Baena-Garcia, Jose Del Campo-Avila, Ra\u00fal Fidalgo, Albert Bifet, Ricard Gavalda, Rafael Morales-Bueno. In Fourth International Workshop on Knowledge Discovery from Data Streams, 2006.\u00a0\u21a9

    "},{"location":"api/drift/binary/HDDM-A/","title":"HDDM_A","text":"

    Drift Detection Method based on Hoeffding's bounds with moving average-test.

    HDDM_A is a drift detection method based on the Hoeffding's inequality which uses the input average as estimator.

    Input: x is an entry in a stream of bits, where 1 indicates error/failure and 0 represents correct/normal values.

    For example, if a classifier's prediction \\(y'\\) is right or wrong w.r.t. the true target label \\(y\\):

    • 0: Correct, \\(y=y'\\)

    • 1: Error, \\(y \\neq y'\\)

    Implementation based on MOA.

    "},{"location":"api/drift/binary/HDDM-A/#parameters","title":"Parameters","text":"
    • drift_confidence

      Default \u2192 0.001

      Confidence to the drift

    • warning_confidence

      Default \u2192 0.005

      Confidence to the warning

    • two_sided_test

      Default \u2192 False

      If True, will monitor error increments and decrements (two-sided). By default will only monitor increments (one-sided).

    "},{"location":"api/drift/binary/HDDM-A/#attributes","title":"Attributes","text":"
    • drift_detected

      Whether or not a drift is detected following the last update.

    • warning_detected

      Whether or not a drift is detected following the last update.

    "},{"location":"api/drift/binary/HDDM-A/#examples","title":"Examples","text":"

    import random\nfrom river import drift\n\nrng = random.Random(42)\nhddm_a = drift.binary.HDDM_A()\n\ndata_stream = rng.choices([0, 1], k=1000)\ndata_stream = data_stream + rng.choices([0, 1], k=1000, weights=[0.3, 0.7])\n\nprint_warning = True\nfor i, x in enumerate(data_stream):\n    _ = hddm_a.update(x)\n    if hddm_a.warning_detected and print_warning:\n        print(f\"Warning detected at index {i}\")\n        print_warning = False\n    if hddm_a.drift_detected:\n        print(f\"Change detected at index {i}\")\n        print_warning = True\n
    Warning detected at index 451\nChange detected at index 1206\n

    "},{"location":"api/drift/binary/HDDM-A/#methods","title":"Methods","text":"update

    Update the change detector with a single data point.

    Parameters

    • x \u2014 'bool'

    Returns

    BinaryDriftDetector: self

    1. Fr\u00edas-Blanco I, del Campo-\u00c1vila J, Ramos-Jimenez G, et al. Online and non-parametric drift detection methods based on Hoeffding's bounds. IEEE Transactions on Knowledge and Data Engineering, 2014, 27(3): 810-823.\u00a0\u21a9

    2. Albert Bifet, Geoff Holmes, Richard Kirkby, Bernhard Pfahringer. MOA: Massive Online Analysis; Journal of Machine Learning Research 11: 1601-1604, 2010.\u00a0\u21a9

    "},{"location":"api/drift/binary/HDDM-W/","title":"HDDM_W","text":"

    Drift Detection Method based on Hoeffding's bounds with moving weighted average-test.

    HDDM_W is an online drift detection method based on McDiarmid's bounds. HDDM_W uses the Exponentially Weighted Moving Average (EWMA) statistic as estimator.

    Input: x is an entry in a stream of bits, where 1 indicates error/failure and 0 represents correct/normal values.

    For example, if a classifier's prediction \\(y'\\) is right or wrong w.r.t. the true target label \\(y\\):

    • 0: Correct, \\(y=y'\\)

    • 1: Error, \\(y \\neq y'\\)

    Implementation based on MOA.

    "},{"location":"api/drift/binary/HDDM-W/#parameters","title":"Parameters","text":"
    • drift_confidence

      Default \u2192 0.001

      Confidence to the drift

    • warning_confidence

      Default \u2192 0.005

      Confidence to the warning

    • lambda_val

      Default \u2192 0.05

      The weight given to recent data. Smaller values mean less weight given to recent data.

    • two_sided_test

      Default \u2192 False

      If True, will monitor error increments and decrements (two-sided). By default will only monitor increments (one-sided).

    "},{"location":"api/drift/binary/HDDM-W/#attributes","title":"Attributes","text":"
    • drift_detected

      Whether or not a drift is detected following the last update.

    • warning_detected

      Whether or not a drift is detected following the last update.

    "},{"location":"api/drift/binary/HDDM-W/#examples","title":"Examples","text":"

    import random\nfrom river import drift\n\nrng = random.Random(42)\nhddm_w = drift.binary.HDDM_W()\n\ndata_stream = rng.choices([0, 1], k=1000)\ndata_stream = data_stream + rng.choices([0, 1], k=1000, weights=[0.3, 0.7])\n\nprint_warning = True\nfor i, x in enumerate(data_stream):\n    _ = hddm_w.update(x)\n    if hddm_w.warning_detected and print_warning:\n        print(f\"Warning detected at index {i}\")\n        print_warning = False\n    if hddm_w.drift_detected:\n        print(f\"Change detected at index {i}\")\n        print_warning = True\n
    Warning detected at index 451\nChange detected at index 1077\n

    "},{"location":"api/drift/binary/HDDM-W/#methods","title":"Methods","text":"update

    Update the change detector with a single data point.

    Parameters

    • x \u2014 'bool'

    Returns

    BinaryDriftDetector: self

    1. Fr\u00edas-Blanco I, del Campo-\u00c1vila J, Ramos-Jimenez G, et al. Online and non-parametric drift detection methods based on Hoeffding\u2019s bounds. IEEE Transactions on Knowledge and Data Engineering, 2014, 27(3): 810-823.\u00a0\u21a9

    2. Albert Bifet, Geoff Holmes, Richard Kirkby, Bernhard Pfahringer. MOA: Massive Online Analysis; Journal of Machine Learning Research 11: 1601-1604, 2010.\u00a0\u21a9

    "},{"location":"api/drift/datasets/AirlinePassengers/","title":"AirlinePassengers","text":"

    JFK Airline Passengers

    This dataset gives the number of passengers arriving and departing at JFK. The data is obtained from New York State's official Kaggle page for this dataset.

    "},{"location":"api/drift/datasets/AirlinePassengers/#attributes","title":"Attributes","text":"
    • desc

      Return the description from the docstring.

    • path

    "},{"location":"api/drift/datasets/AirlinePassengers/#methods","title":"Methods","text":"take

    Iterate over the k samples.

    Parameters

    • k \u2014 'int'

    1. https://www.kaggle.com/new-york-state/nys-air-passenger-traffic,-port-authority-of-ny-nj#air-passenger-traffic-per-month-port-authority-of-ny-nj-beginning-1977.csv\u00a0\u21a9

    "},{"location":"api/drift/datasets/Apple/","title":"Apple","text":"

    Apple Stock

    This dataset concerns the daily close price and volume of Apple stock around the year 2000. The dataset is sampled every 3 observations to reduce the length of the time series. This dataset is retrieved from Yahoo Finance.

    "},{"location":"api/drift/datasets/Apple/#attributes","title":"Attributes","text":"
    • desc

      Return the description from the docstring.

    • path

    "},{"location":"api/drift/datasets/Apple/#methods","title":"Methods","text":"take

    Iterate over the k samples.

    Parameters

    • k \u2014 'int'

    1. https://finance.yahoo.com/quote/AAPL/history?period1=850348800&period2=1084579200&interval=1d&filter=history&frequency=1d\u00a0\u21a9

    "},{"location":"api/drift/datasets/Bitcoin/","title":"Bitcoin","text":"

    Bitcoin Market Price

    This is a regression task, where the goal is to predict the average USD market price across major bitcoin exchanges. This data was collected from the official Blockchain website. There is only one feature given, the day of exchange, which is in increments of three. The first 500 lines have been removed because they are not interesting.

    "},{"location":"api/drift/datasets/Bitcoin/#attributes","title":"Attributes","text":"
    • desc

      Return the description from the docstring.

    • path

    "},{"location":"api/drift/datasets/Bitcoin/#methods","title":"Methods","text":"take

    Iterate over the k samples.

    Parameters

    • k \u2014 'int'

    1. https://www.blockchain.com/fr/explorer/charts/market-price?timespan=all\u00a0\u21a9

    "},{"location":"api/drift/datasets/BrentSpotPrice/","title":"BrentSpotPrice","text":"

    Brent Spot Price

    This is the USD price for Brent Crude oil, measured daily. We include the time series from 2000 onwards. The data is sampled at every 10 original observations to reduce the length of the series.

    The data is obtained from the U.S. Energy Information Administration. Since the data is in the public domain, we distribute it as part of this repository.

    Since the original data has observations only on trading days, there are arguably gaps in this time series (on non-trading days). However we consider these to be consecutive, and thus also consider the sampled time series to have consecutive observations.

    "},{"location":"api/drift/datasets/BrentSpotPrice/#attributes","title":"Attributes","text":"
    • desc

      Return the description from the docstring.

    • path

    "},{"location":"api/drift/datasets/BrentSpotPrice/#methods","title":"Methods","text":"take

    Iterate over the k samples.

    Parameters

    • k \u2014 'int'

    1. U.S. Energy Information Administration (Sep. 2019)\u00a0\u21a9

    2. https://www.eia.gov/opendata/v1/qb.php?sdid=PET.RBRTE.D\u00a0\u21a9

    "},{"location":"api/drift/datasets/Occupancy/","title":"Occupancy","text":"

    Room occupancy data.

    Dataset on detecting room occupancy based on several variables. The dataset contains temperature, humidity, light, and CO2 variables.

    The data is sampled at every 16 observations to reduce the length of the series.

    "},{"location":"api/drift/datasets/Occupancy/#attributes","title":"Attributes","text":"
    • desc

      Return the description from the docstring.

    • path

    "},{"location":"api/drift/datasets/Occupancy/#methods","title":"Methods","text":"take

    Iterate over the k samples.

    Parameters

    • k \u2014 'int'

    Candanedo, Luis M., and V\u00e9ronique Feldheim. \"Accurate occupancy detection of an office room from light, temperature, humidity and CO2 measurements using statistical learning models.\" Energy and Buildings 112 (2016): 28-39.

    "},{"location":"api/drift/datasets/RunLog/","title":"RunLog","text":"

    Interval Training Running Pace.

    This dataset shows the pace of a runner during an interval training session, where a mobile application provides instructions on when to run and when to walk.

    "},{"location":"api/drift/datasets/RunLog/#attributes","title":"Attributes","text":"
    • desc

      Return the description from the docstring.

    • path

    "},{"location":"api/drift/datasets/RunLog/#methods","title":"Methods","text":"take

    Iterate over the k samples.

    Parameters

    • k \u2014 'int'

    "},{"location":"api/drift/datasets/UKCoalEmploy/","title":"UKCoalEmploy","text":"

    Historic Employment in UK Coal Mines

    This is historic data obtained from the UK government. We use the employment column for the number of workers employed in the British coal mines Missing values in the data are replaced with the value of the preceding year.

    "},{"location":"api/drift/datasets/UKCoalEmploy/#attributes","title":"Attributes","text":"
    • desc

      Return the description from the docstring.

    • path

    "},{"location":"api/drift/datasets/UKCoalEmploy/#methods","title":"Methods","text":"take

    Iterate over the k samples.

    Parameters

    • k \u2014 'int'

    1. https://www.gov.uk/government/statistical-data-sets/historical-coal-data-coal-production-availability-and-consumption\u00a0\u21a9

    "},{"location":"api/dummy/NoChangeClassifier/","title":"NoChangeClassifier","text":"

    Dummy classifier which returns the last class seen.

    The predict_one method will output the last class seen whilst predict_proba_one will return 1 for the last class seen and 0 for the others.

    "},{"location":"api/dummy/NoChangeClassifier/#attributes","title":"Attributes","text":"
    • last_class

      The last class seen.

    • classes

      The set of classes seen.

    "},{"location":"api/dummy/NoChangeClassifier/#examples","title":"Examples","text":"

    Taken from example 2.1 from this page.

    import pprint\nfrom river import dummy\n\nsentences = [\n    ('glad happy glad', '+'),\n    ('glad glad joyful', '+'),\n    ('glad pleasant', '+'),\n    ('miserable sad glad', '\u2212')\n]\n\nmodel = dummy.NoChangeClassifier()\n\nfor sentence, label in sentences:\n    model = model.learn_one(sentence, label)\n\nnew_sentence = 'glad sad miserable pleasant glad'\nmodel.predict_one(new_sentence)\n
    '\u2212'\n

    pprint.pprint(model.predict_proba_one(new_sentence))\n
    {'+': 0, '\u2212': 1}\n

    "},{"location":"api/dummy/NoChangeClassifier/#methods","title":"Methods","text":"learn_one

    Update the model with a set of features x and a label y.

    Parameters

    • x \u2014 'dict'
    • y \u2014 'base.typing.ClfTarget'

    Returns

    Classifier: self

    predict_one

    Predict the label of a set of features x.

    Parameters

    • x \u2014 'dict'

    Returns

    base.typing.ClfTarget | None: The predicted label.

    predict_proba_one

    Predict the probability of each label for a dictionary of features x.

    Parameters

    • x \u2014 'dict'

    Returns

    dict[base.typing.ClfTarget, float]: A dictionary that associates a probability which each label.

    "},{"location":"api/dummy/PriorClassifier/","title":"PriorClassifier","text":"

    Dummy classifier which uses the prior distribution.

    The predict_one method will output the most common class whilst predict_proba_one will return the normalized class counts.

    "},{"location":"api/dummy/PriorClassifier/#attributes","title":"Attributes","text":"
    • counts (collections.Counter)

      Class counts.

    • n (int)

      Total number of seen instances.

    "},{"location":"api/dummy/PriorClassifier/#examples","title":"Examples","text":"

    Taken from example 2.1 from this page

    from river import dummy\n\nsentences = [\n    ('glad happy glad', '+'),\n    ('glad glad joyful', '+'),\n    ('glad pleasant', '+'),\n    ('miserable sad glad', '\u2212')\n]\n\nmodel = dummy.PriorClassifier()\n\nfor sentence, label in sentences:\n    model = model.learn_one(sentence, label)\n\nnew_sentence = 'glad sad miserable pleasant glad'\nmodel.predict_one(new_sentence)\n
    '+'\n
    model.predict_proba_one(new_sentence)\n
    {'+': 0.75, '\u2212': 0.25}\n

    "},{"location":"api/dummy/PriorClassifier/#methods","title":"Methods","text":"learn_one

    Update the model with a set of features x and a label y.

    Parameters

    • x \u2014 'dict'
    • y \u2014 'base.typing.ClfTarget'

    Returns

    Classifier: self

    predict_one

    Predict the label of a set of features x.

    Parameters

    • x \u2014 'dict'
    • kwargs

    Returns

    base.typing.ClfTarget | None: The predicted label.

    predict_proba_one

    Predict the probability of each label for a dictionary of features x.

    Parameters

    • x \u2014 'dict'

    Returns

    dict[base.typing.ClfTarget, float]: A dictionary that associates a probability which each label.

    1. Krichevsky\u2013Trofimov estimator \u21a9

    "},{"location":"api/dummy/StatisticRegressor/","title":"StatisticRegressor","text":"

    Dummy regressor that uses a univariate statistic to make predictions.

    "},{"location":"api/dummy/StatisticRegressor/#parameters","title":"Parameters","text":"
    • statistic

      Type \u2192 stats.base.Univariate

    "},{"location":"api/dummy/StatisticRegressor/#examples","title":"Examples","text":"

    from pprint import pprint\nfrom river import dummy\nfrom river import stats\n\nsentences = [\n    ('glad happy glad', 3),\n    ('glad glad joyful', 3),\n    ('glad pleasant', 2),\n    ('miserable sad glad', -3)\n]\n\nmodel = dummy.StatisticRegressor(stats.Mean())\n\nfor sentence, score in sentences:\n    model = model.learn_one(sentence, score)\n\nnew_sentence = 'glad sad miserable pleasant glad'\nmodel.predict_one(new_sentence)\n
    1.25\n

    "},{"location":"api/dummy/StatisticRegressor/#methods","title":"Methods","text":"learn_one

    Fits to a set of features x and a real-valued target y.

    Parameters

    • x \u2014 'dict'
    • y \u2014 'base.typing.RegTarget'

    Returns

    Regressor: self

    predict_one

    Predict the output of features x.

    Parameters

    • x \u2014 'dict'

    Returns

    base.typing.RegTarget: The prediction.

    "},{"location":"api/ensemble/ADWINBaggingClassifier/","title":"ADWINBaggingClassifier","text":"

    ADWIN Bagging classifier.

    ADWIN Bagging 1 is the online bagging method of Oza and Russell 2 with the addition of the ADWIN algorithm as a change detector. If concept drift is detected, the worst member of the ensemble (based on the error estimation by ADWIN) is replaced by a new (empty) classifier.

    "},{"location":"api/ensemble/ADWINBaggingClassifier/#parameters","title":"Parameters","text":"
    • model

      Type \u2192 base.Classifier

      The classifier to bag.

    • n_models

      Default \u2192 10

      The number of models in the ensemble.

    • seed

      Type \u2192 int | None

      Default \u2192 None

      Random number generator seed for reproducibility.

    "},{"location":"api/ensemble/ADWINBaggingClassifier/#attributes","title":"Attributes","text":"
    • models
    "},{"location":"api/ensemble/ADWINBaggingClassifier/#examples","title":"Examples","text":"

    from river import datasets\nfrom river import ensemble\nfrom river import evaluate\nfrom river import linear_model\nfrom river import metrics\nfrom river import optim\nfrom river import preprocessing\n\ndataset = datasets.Phishing()\n\nmodel = ensemble.ADWINBaggingClassifier(\n    model=(\n        preprocessing.StandardScaler() |\n        linear_model.LogisticRegression()\n    ),\n    n_models=3,\n    seed=42\n)\n\nmetric = metrics.F1()\n\nevaluate.progressive_val_score(dataset, model, metric)\n
    F1: 87.65%\n

    "},{"location":"api/ensemble/ADWINBaggingClassifier/#methods","title":"Methods","text":"learn_one

    Update the model with a set of features x and a label y.

    Parameters

    • x
    • y
    • kwargs

    Returns

    self

    predict_one

    Predict the label of a set of features x.

    Parameters

    • x \u2014 'dict'
    • kwargs

    Returns

    base.typing.ClfTarget | None: The predicted label.

    predict_proba_one

    Averages the predictions of each classifier.

    Parameters

    • x
    • kwargs

    1. Albert Bifet, Geoff Holmes, Bernhard Pfahringer, Richard Kirkby, and Ricard Gavald\u00e0. \"New ensemble methods for evolving data streams.\" In 15th ACM SIGKDD International Conference on Knowledge Discovery and Data Mining, 2009.\u00a0\u21a9

    2. Oza, N., Russell, S. \"Online bagging and boosting.\" In: Artificial Intelligence and Statistics 2001, pp. 105\u2013112. Morgan Kaufmann, 2001.\u00a0\u21a9

    "},{"location":"api/ensemble/ADWINBoostingClassifier/","title":"ADWINBoostingClassifier","text":"

    ADWIN Boosting classifier.

    ADWIN Boosting 1 is the online boosting method of Oza and Russell 2 with the addition of the ADWIN algorithm as a change detector. If concept drift is detected, the worst member of the ensemble (based on the error estimation by ADWIN) is replaced by a new (empty) classifier.

    "},{"location":"api/ensemble/ADWINBoostingClassifier/#parameters","title":"Parameters","text":"
    • model

      Type \u2192 base.Classifier

      The classifier to boost.

    • n_models

      Default \u2192 10

      The number of models in the ensemble.

    • seed

      Type \u2192 int | None

      Default \u2192 None

      Random number generator seed for reproducibility.

    "},{"location":"api/ensemble/ADWINBoostingClassifier/#attributes","title":"Attributes","text":"
    • models
    "},{"location":"api/ensemble/ADWINBoostingClassifier/#examples","title":"Examples","text":"

    from river import datasets\nfrom river import ensemble\nfrom river import evaluate\nfrom river import linear_model\nfrom river import metrics\nfrom river import preprocessing\n\ndataset = datasets.Phishing()\nmodel = ensemble.ADWINBoostingClassifier(\n    model=(\n        preprocessing.StandardScaler() |\n        linear_model.LogisticRegression()\n    ),\n    n_models=3,\n    seed=42\n)\nmetric = metrics.F1()\n\nevaluate.progressive_val_score(dataset, model, metric)\n
    F1: 87.61%\n

    "},{"location":"api/ensemble/ADWINBoostingClassifier/#methods","title":"Methods","text":"learn_one

    Update the model with a set of features x and a label y.

    Parameters

    • x
    • y
    • kwargs

    Returns

    self

    predict_one

    Predict the label of a set of features x.

    Parameters

    • x \u2014 'dict'
    • kwargs

    Returns

    base.typing.ClfTarget | None: The predicted label.

    predict_proba_one

    Predict the probability of each label for a dictionary of features x.

    Parameters

    • x
    • kwargs

    Returns

    A dictionary that associates a probability which each label.

    1. Albert Bifet, Geoff Holmes, Bernhard Pfahringer, Richard Kirkby, and Ricard Gavald\u00e0. \"New ensemble methods for evolving data streams.\" In 15th ACM SIGKDD International Conference on Knowledge Discovery and Data Mining, 2009.\u00a0\u21a9

    2. Oza, N., Russell, S. \"Online bagging and boosting.\" In: Artificial Intelligence and Statistics 2001, pp. 105\u2013112. Morgan Kaufmann, 2001.\u00a0\u21a9

    "},{"location":"api/ensemble/AdaBoostClassifier/","title":"AdaBoostClassifier","text":"

    Boosting for classification.

    For each incoming observation, each model's learn_one method is called k times where k is sampled from a Poisson distribution of parameter lambda. The lambda parameter is updated when the weaks learners fit successively the same observation.

    "},{"location":"api/ensemble/AdaBoostClassifier/#parameters","title":"Parameters","text":"
    • model

      Type \u2192 base.Classifier

      The classifier to boost.

    • n_models

      Default \u2192 10

      The number of models in the ensemble.

    • seed

      Type \u2192 int | None

      Default \u2192 None

      Random number generator seed for reproducibility.

    "},{"location":"api/ensemble/AdaBoostClassifier/#attributes","title":"Attributes","text":"
    • models
    "},{"location":"api/ensemble/AdaBoostClassifier/#examples","title":"Examples","text":"

    In the following example three tree classifiers are boosted together. The performance is slightly better than when using a single tree.

    from river import datasets\nfrom river import ensemble\nfrom river import evaluate\nfrom river import metrics\nfrom river import tree\n\ndataset = datasets.Phishing()\n\nmetric = metrics.LogLoss()\n\nmodel = ensemble.AdaBoostClassifier(\n    model=(\n        tree.HoeffdingTreeClassifier(\n            split_criterion='gini',\n            delta=1e-5,\n            grace_period=2000\n        )\n    ),\n    n_models=5,\n    seed=42\n)\n\nevaluate.progressive_val_score(dataset, model, metric)\n
    LogLoss: 0.370805\n

    print(model)\n
    AdaBoostClassifier(HoeffdingTreeClassifier)\n

    "},{"location":"api/ensemble/AdaBoostClassifier/#methods","title":"Methods","text":"learn_one

    Update the model with a set of features x and a label y.

    Parameters

    • x
    • y
    • kwargs

    Returns

    self

    predict_one

    Predict the label of a set of features x.

    Parameters

    • x \u2014 'dict'
    • kwargs

    Returns

    base.typing.ClfTarget | None: The predicted label.

    predict_proba_one

    Predict the probability of each label for a dictionary of features x.

    Parameters

    • x
    • kwargs

    Returns

    A dictionary that associates a probability which each label.

    1. Oza, N.C., 2005, October. Online bagging and boosting. In 2005 IEEE international conference on systems, man and cybernetics (Vol. 3, pp. 2340-2345). Ieee. \u21a9

    "},{"location":"api/ensemble/BOLEClassifier/","title":"BOLEClassifier","text":"

    Boosting Online Learning Ensemble (BOLE).

    A modified version of Oza Online Boosting Algorithm 1. For each incoming observation, each model's learn_one method is called k times where k is sampled from a Poisson distribution of parameter lambda. The first model to be trained will be the one with worst correct_weight / (correct_weight + wrong_weight). The worst model not yet trained will receive lambda values for training from the models that incorrectly classified an instance, and the best model's not yet trained will receive lambda values for training from the models that correctly classified an instance. For more details, see 2.

    "},{"location":"api/ensemble/BOLEClassifier/#parameters","title":"Parameters","text":"
    • model

      Type \u2192 base.Classifier

      The classifier to boost.

    • n_models

      Default \u2192 10

      The number of models in the ensemble.

    • seed

      Type \u2192 int | None

      Default \u2192 None

      Random number generator seed for reproducibility.

    • error_bound

      Default \u2192 0.5

      Error bound percentage for allowing models to vote.

    "},{"location":"api/ensemble/BOLEClassifier/#attributes","title":"Attributes","text":"
    • models
    "},{"location":"api/ensemble/BOLEClassifier/#examples","title":"Examples","text":"

    from river import datasets\nfrom river import ensemble\nfrom river import evaluate\nfrom river import drift\nfrom river import metrics\nfrom river import tree\n\ndataset = datasets.Elec2().take(3000)\n\nmodel = ensemble.BOLEClassifier(\n    model=drift.DriftRetrainingClassifier(\n        model=tree.HoeffdingTreeClassifier(),\n        drift_detector=drift.binary.DDM()\n    ),\n    n_models=10,\n    seed=42\n)\n\nmetric = metrics.Accuracy()\n\nevaluate.progressive_val_score(dataset, model, metric)\n
    Accuracy: 93.63%\n

    "},{"location":"api/ensemble/BOLEClassifier/#methods","title":"Methods","text":"learn_one

    Update the model with a set of features x and a label y.

    Parameters

    • x
    • y
    • kwargs

    Returns

    self

    predict_one

    Predict the label of a set of features x.

    Parameters

    • x \u2014 'dict'
    • kwargs

    Returns

    base.typing.ClfTarget | None: The predicted label.

    predict_proba_one

    Predict the probability of each label for a dictionary of features x.

    Parameters

    • x
    • kwargs

    Returns

    A dictionary that associates a probability which each label.

    1. Oza, N.C., 2005, October. Online bagging and boosting. In 2005 IEEE international conference on systems, man and cybernetics (Vol. 3, pp. 2340-2345). Ieee. \u21a9

    2. R. S. M. d. Barros, S. Garrido T. de Carvalho Santos and P. M. Gon\u00e7alves J\u00fanior, \"A Boosting-like Online Learning Ensemble,\" 2016 International Joint Conference on Neural Networks (IJCNN), 2016, pp. 1871-1878, doi: 10.1109/IJCNN.2016.7727427.\u00a0\u21a9

    "},{"location":"api/ensemble/BaggingClassifier/","title":"BaggingClassifier","text":"

    Online bootstrap aggregation for classification.

    For each incoming observation, each model's learn_one method is called k times where k is sampled from a Poisson distribution of parameter 1. k thus has a 36% chance of being equal to 0, a 36% chance of being equal to 1, an 18% chance of being equal to 2, a 6% chance of being equal to 3, a 1% chance of being equal to 4, etc. You can do scipy.stats.utils.random.poisson(1).pmf(k) to obtain more detailed values.

    "},{"location":"api/ensemble/BaggingClassifier/#parameters","title":"Parameters","text":"
    • model

      Type \u2192 base.Classifier

      The classifier to bag.

    • n_models

      Default \u2192 10

      The number of models in the ensemble.

    • seed

      Type \u2192 int | None

      Default \u2192 None

      Random number generator seed for reproducibility.

    "},{"location":"api/ensemble/BaggingClassifier/#attributes","title":"Attributes","text":"
    • models
    "},{"location":"api/ensemble/BaggingClassifier/#examples","title":"Examples","text":"

    In the following example three logistic regressions are bagged together. The performance is slightly better than when using a single logistic regression.

    from river import datasets\nfrom river import ensemble\nfrom river import evaluate\nfrom river import linear_model\nfrom river import metrics\nfrom river import optim\nfrom river import preprocessing\n\ndataset = datasets.Phishing()\n\nmodel = ensemble.BaggingClassifier(\n    model=(\n        preprocessing.StandardScaler() |\n        linear_model.LogisticRegression()\n    ),\n    n_models=3,\n    seed=42\n)\n\nmetric = metrics.F1()\n\nevaluate.progressive_val_score(dataset, model, metric)\n
    F1: 87.65%\n

    print(model)\n
    BaggingClassifier(StandardScaler | LogisticRegression)\n

    "},{"location":"api/ensemble/BaggingClassifier/#methods","title":"Methods","text":"learn_one predict_one

    Predict the label of a set of features x.

    Parameters

    • x \u2014 'dict'
    • kwargs

    Returns

    base.typing.ClfTarget | None: The predicted label.

    predict_proba_one

    Averages the predictions of each classifier.

    Parameters

    • x
    • kwargs

    1. Oza, N.C., 2005, October. Online bagging and boosting. In 2005 IEEE international conference on systems, man and cybernetics (Vol. 3, pp. 2340-2345). Ieee. \u21a9

    "},{"location":"api/ensemble/BaggingRegressor/","title":"BaggingRegressor","text":"

    Online bootstrap aggregation for regression.

    For each incoming observation, each model's learn_one method is called k times where k is sampled from a Poisson distribution of parameter 1. k thus has a 36% chance of being equal to 0, a 36% chance of being equal to 1, an 18% chance of being equal to 2, a 6% chance of being equal to 3, a 1% chance of being equal to 4, etc. You can do scipy.stats.utils.random.poisson(1).pmf(k) for more detailed values.

    "},{"location":"api/ensemble/BaggingRegressor/#parameters","title":"Parameters","text":"
    • model

      Type \u2192 base.Regressor

      The regressor to bag.

    • n_models

      Default \u2192 10

      The number of models in the ensemble.

    • seed

      Type \u2192 int | None

      Default \u2192 None

      Random number generator seed for reproducibility.

    "},{"location":"api/ensemble/BaggingRegressor/#attributes","title":"Attributes","text":"
    • models
    "},{"location":"api/ensemble/BaggingRegressor/#examples","title":"Examples","text":"

    In the following example three logistic regressions are bagged together. The performance is slightly better than when using a single logistic regression.

    from river import datasets\nfrom river import ensemble\nfrom river import evaluate\nfrom river import linear_model\nfrom river import metrics\nfrom river import optim\nfrom river import preprocessing\n\ndataset = datasets.TrumpApproval()\n\nmodel = preprocessing.StandardScaler()\nmodel |= ensemble.BaggingRegressor(\n    model=linear_model.LinearRegression(intercept_lr=0.1),\n    n_models=3,\n    seed=42\n)\n\nmetric = metrics.MAE()\n\nevaluate.progressive_val_score(dataset, model, metric)\n
    MAE: 0.677586\n

    "},{"location":"api/ensemble/BaggingRegressor/#methods","title":"Methods","text":"learn_one predict_one

    Averages the predictions of each regressor.

    Parameters

    • x
    • kwargs

    1. Oza, N.C., 2005, October. Online bagging and boosting. In 2005 IEEE international conference on systems, man and cybernetics (Vol. 3, pp. 2340-2345). Ieee. \u21a9

    "},{"location":"api/ensemble/EWARegressor/","title":"EWARegressor","text":"

    Exponentially Weighted Average regressor.

    "},{"location":"api/ensemble/EWARegressor/#parameters","title":"Parameters","text":"
    • models

      Type \u2192 list[base.Regressor]

      The regressors to hedge.

    • loss

      Type \u2192 optim.losses.RegressionLoss | None

      Default \u2192 None

      The loss function that has to be minimized. Defaults to optim.losses.Squared.

    • learning_rate

      Default \u2192 0.5

      The learning rate by which the model weights are multiplied at each iteration.

    "},{"location":"api/ensemble/EWARegressor/#attributes","title":"Attributes","text":"
    • models
    "},{"location":"api/ensemble/EWARegressor/#examples","title":"Examples","text":"

    from river import datasets\nfrom river import ensemble\nfrom river import evaluate\nfrom river import linear_model\nfrom river import metrics\nfrom river import optim\nfrom river import preprocessing\nfrom river import stream\n\noptimizers = [\n    optim.SGD(0.01),\n    optim.RMSProp(),\n    optim.AdaGrad()\n]\n\nfor optimizer in optimizers:\n\n    dataset = datasets.TrumpApproval()\n    metric = metrics.MAE()\n    model = (\n        preprocessing.StandardScaler() |\n        linear_model.LinearRegression(\n            optimizer=optimizer,\n            intercept_lr=.1\n        )\n    )\n\n    print(optimizer, evaluate.progressive_val_score(dataset, model, metric))\n
    SGD MAE: 0.558735\nRMSProp MAE: 0.522449\nAdaGrad MAE: 0.477289\n

    dataset = datasets.TrumpApproval()\nmetric = metrics.MAE()\nhedge = (\n    preprocessing.StandardScaler() |\n    ensemble.EWARegressor(\n        [\n            linear_model.LinearRegression(optimizer=o, intercept_lr=.1)\n            for o in optimizers\n        ],\n        learning_rate=0.005\n    )\n)\n\nevaluate.progressive_val_score(dataset, hedge, metric)\n
    MAE: 0.496298\n

    "},{"location":"api/ensemble/EWARegressor/#methods","title":"Methods","text":"learn_one

    Fits to a set of features x and a real-valued target y.

    Parameters

    • x
    • y

    Returns

    self

    learn_predict_one predict_one

    Predict the output of features x.

    Parameters

    • x

    Returns

    The prediction.

    1. Online Learning from Experts: Weighed Majority and Hedge \u21a9

    2. Wikipedia page on the multiplicative weight update method \u21a9

    3. Kivinen, J. and Warmuth, M.K., 1997. Exponentiated gradient versus gradient descent for linear predictors. information and computation, 132(1), pp.1-63. \u21a9

    "},{"location":"api/ensemble/LeveragingBaggingClassifier/","title":"LeveragingBaggingClassifier","text":"

    Leveraging Bagging ensemble classifier.

    Leveraging Bagging [^1] is an improvement over the Oza Bagging algorithm. The bagging performance is leveraged by increasing the re-sampling. It uses a poisson distribution to simulate the re-sampling process. To increase re-sampling it uses a higher w value of the Poisson distribution (agerage number of events), 6 by default, increasing the input space diversity, by attributing a different range of weights to the data samples.

    To deal with concept drift, Leveraging Bagging uses the ADWIN algorithm to monitor the performance of each member of the enemble If concept drift is detected, the worst member of the ensemble (based on the error estimation by ADWIN) is replaced by a new (empty) classifier.

    "},{"location":"api/ensemble/LeveragingBaggingClassifier/#parameters","title":"Parameters","text":"
    • model

      Type \u2192 base.Classifier

      The classifier to bag.

    • n_models

      Type \u2192 int

      Default \u2192 10

      The number of models in the ensemble.

    • w

      Type \u2192 float

      Default \u2192 6

      Indicates the average number of events. This is the lambda parameter of the Poisson distribution used to compute the re-sampling weight.

    • adwin_delta

      Type \u2192 float

      Default \u2192 0.002

      The delta parameter for the ADWIN change detector.

    • bagging_method

      Type \u2192 str

      Default \u2192 bag

      The bagging method to use. Can be one of the following: * 'bag' - Leveraging Bagging using ADWIN. * 'me' - Assigns \\(weight=1\\) if sample is misclassified, otherwise \\(weight=error/(1-error)\\). * 'half' - Use resampling without replacement for half of the instances. * 'wt' - Resample without taking out all instances. * 'subag' - Resampling without replacement.

    • seed

      Type \u2192 int | None

      Default \u2192 None

      Random number generator seed for reproducibility.

    "},{"location":"api/ensemble/LeveragingBaggingClassifier/#attributes","title":"Attributes","text":"
    • bagging_methods

      Valid bagging_method options.

    • models

    "},{"location":"api/ensemble/LeveragingBaggingClassifier/#examples","title":"Examples","text":"

    from river import datasets\nfrom river import ensemble\nfrom river import evaluate\nfrom river import linear_model\nfrom river import metrics\nfrom river import optim\nfrom river import preprocessing\n\ndataset = datasets.Phishing()\n\nmodel = ensemble.LeveragingBaggingClassifier(\n    model=(\n        preprocessing.StandardScaler() |\n        linear_model.LogisticRegression()\n    ),\n    n_models=3,\n    seed=42\n)\n\nmetric = metrics.F1()\n\nevaluate.progressive_val_score(dataset, model, metric)\n
    F1: 88.55%\n

    "},{"location":"api/ensemble/LeveragingBaggingClassifier/#methods","title":"Methods","text":"learn_one

    Update the model with a set of features x and a label y.

    Parameters

    • x
    • y
    • kwargs

    Returns

    self

    predict_one

    Predict the label of a set of features x.

    Parameters

    • x \u2014 'dict'
    • kwargs

    Returns

    base.typing.ClfTarget | None: The predicted label.

    predict_proba_one

    Averages the predictions of each classifier.

    Parameters

    • x
    • kwargs

    "},{"location":"api/ensemble/SRPClassifier/","title":"SRPClassifier","text":"

    Streaming Random Patches ensemble classifier.

    The Streaming Random Patches (SRP) 1 is an ensemble method that simulates bagging or random subspaces. The default algorithm uses both bagging and random subspaces, namely Random Patches. The default base estimator is a Hoeffding Tree, but other base estimators can be used (differently from random forest variations).

    "},{"location":"api/ensemble/SRPClassifier/#parameters","title":"Parameters","text":"
    • model

      Type \u2192 base.Estimator | None

      Default \u2192 None

      The base estimator.

    • n_models

      Type \u2192 int

      Default \u2192 10

      Number of members in the ensemble.

    • subspace_size

      Type \u2192 int | float | str

      Default \u2192 0.6

      Number of features per subset for each classifier where M is the total number of features. A negative value means M - subspace_size. Only applies when using random subspaces or random patches. * If int indicates the number of features to use. Valid range [2, M]. * If float indicates the percentage of features to use, Valid range (0., 1.]. * 'sqrt' - sqrt(M)+1 * 'rmsqrt' - Residual from M-(sqrt(M)+1)

    • training_method

      Type \u2192 str

      Default \u2192 patches

      The training method to use. * 'subspaces' - Random subspaces. * 'resampling' - Resampling. * 'patches' - Random patches.

    • lam

      Type \u2192 int

      Default \u2192 6

      Lambda value for resampling.

    • drift_detector

      Type \u2192 base.DriftDetector | None

      Default \u2192 None

      Drift detector.

    • warning_detector

      Type \u2192 base.DriftDetector | None

      Default \u2192 None

      Warning detector.

    • disable_detector

      Type \u2192 str

      Default \u2192 off

      Option to disable drift detectors: * If 'off', detectors are enabled. * If 'drift', disables concept drift detection and the background learner. * If 'warning', disables the background learner and ensemble members are reset if drift is detected.

    • disable_weighted_vote

      Type \u2192 bool

      Default \u2192 False

      If True, disables weighted voting.

    • seed

      Type \u2192 int | None

      Default \u2192 None

      Random number generator seed for reproducibility.

    • metric

      Type \u2192 ClassificationMetric | None

      Default \u2192 None

      The metric to track members performance within the ensemble. This implementation assumes that larger values are better when using weighted votes.

    "},{"location":"api/ensemble/SRPClassifier/#attributes","title":"Attributes","text":"
    • models
    "},{"location":"api/ensemble/SRPClassifier/#examples","title":"Examples","text":"

    from river import ensemble\nfrom river import evaluate\nfrom river import metrics\nfrom river.datasets import synth\nfrom river import tree\n\ndataset = synth.ConceptDriftStream(\n    seed=42,\n    position=500,\n    width=50\n).take(1000)\n\nbase_model = tree.HoeffdingTreeClassifier(\n    grace_period=50, delta=0.01,\n    nominal_attributes=['age', 'car', 'zipcode']\n)\nmodel = ensemble.SRPClassifier(\n    model=base_model, n_models=3, seed=42,\n)\n\nmetric = metrics.Accuracy()\n\nevaluate.progressive_val_score(dataset, model, metric)\n
    Accuracy: 72.77%\n

    "},{"location":"api/ensemble/SRPClassifier/#methods","title":"Methods","text":"learn_one predict_one

    Predict the label of a set of features x.

    Parameters

    • x \u2014 'dict'
    • kwargs

    Returns

    base.typing.ClfTarget | None: The predicted label.

    predict_proba_one

    Predict the probability of each label for a dictionary of features x.

    Parameters

    • x
    • kwargs

    Returns

    A dictionary that associates a probability which each label.

    reset"},{"location":"api/ensemble/SRPClassifier/#notes","title":"Notes","text":"

    This implementation uses n_models=10 as default given the impact on processing time. The optimal number of models depends on the data and resources available.

    1. Heitor Murilo Gomes, Jesse Read, Albert Bifet. Streaming Random Patches for Evolving Data Stream Classification. IEEE International Conference on Data Mining (ICDM), 2019.\u00a0\u21a9

    "},{"location":"api/ensemble/SRPRegressor/","title":"SRPRegressor","text":"

    Streaming Random Patches ensemble regressor.

    The Streaming Random Patches 1 ensemble method for regression trains each base learner on a subset of features and instances from the original data, namely a random patch. This strategy to enforce diverse base models is similar to the one in the random forest, yet it is not restricted to using decision trees as base learner.

    This method is an adaptation of 2 for regression.

    "},{"location":"api/ensemble/SRPRegressor/#parameters","title":"Parameters","text":"
    • model

      Type \u2192 base.Regressor | None

      Default \u2192 None

      The base estimator.

    • n_models

      Type \u2192 int

      Default \u2192 10

      Number of members in the ensemble.

    • subspace_size

      Type \u2192 int | float | str

      Default \u2192 0.6

      Number of features per subset for each classifier where M is the total number of features. A negative value means M - subspace_size. Only applies when using random subspaces or random patches. * If int indicates the number of features to use. Valid range [2, M]. * If float indicates the percentage of features to use, Valid range (0., 1.]. * 'sqrt' - sqrt(M)+1 * 'rmsqrt' - Residual from M-(sqrt(M)+1)

    • training_method

      Type \u2192 str

      Default \u2192 patches

      The training method to use. * 'subspaces' - Random subspaces. * 'resampling' - Resampling. * 'patches' - Random patches.

    • lam

      Type \u2192 int

      Default \u2192 6

      Lambda value for bagging.

    • drift_detector

      Type \u2192 base.DriftDetector | None

      Default \u2192 None

      Drift detector.

    • warning_detector

      Type \u2192 base.DriftDetector | None

      Default \u2192 None

      Warning detector.

    • disable_detector

      Type \u2192 str

      Default \u2192 off

      Option to disable drift detectors: * If 'off', detectors are enabled. * If 'drift', disables concept drift detection and the background learner. * If 'warning', disables the background learner and ensemble members are reset if drift is detected.

    • disable_weighted_vote

      Type \u2192 bool

      Default \u2192 True

      If True, disables weighted voting.

    • drift_detection_criteria

      Type \u2192 str

      Default \u2192 error

      The criteria used to track drifts. * 'error' - absolute error. * 'prediction' - predicted target values.

    • aggregation_method

      Type \u2192 str

      Default \u2192 mean

      The method to use to aggregate predictions in the ensemble. * 'mean' * 'median'

    • seed

      Default \u2192 None

      Random number generator seed for reproducibility.

    • metric

      Type \u2192 RegressionMetric | None

      Default \u2192 None

      The metric to track members performance within the ensemble.

    "},{"location":"api/ensemble/SRPRegressor/#attributes","title":"Attributes","text":"
    • models
    "},{"location":"api/ensemble/SRPRegressor/#examples","title":"Examples","text":"

    from river import ensemble\nfrom river import evaluate\nfrom river import metrics\nfrom river.datasets import synth\nfrom river import tree\n\ndataset = synth.FriedmanDrift(\n    drift_type='gsg',\n    position=(350, 750),\n    transition_window=200,\n    seed=42\n).take(1000)\n\nbase_model = tree.HoeffdingTreeRegressor(grace_period=50)\nmodel = ensemble.SRPRegressor(\n    model=base_model,\n    training_method=\"patches\",\n    n_models=3,\n    seed=42\n)\n\nmetric = metrics.R2()\n\nevaluate.progressive_val_score(dataset, model, metric)\n
    R2: 0.571117\n

    "},{"location":"api/ensemble/SRPRegressor/#methods","title":"Methods","text":"learn_one predict_one

    Predict the output of features x.

    Parameters

    • x
    • kwargs

    Returns

    The prediction.

    reset"},{"location":"api/ensemble/SRPRegressor/#notes","title":"Notes","text":"

    This implementation uses n_models=10 as default given the impact on processing time. The optimal number of models depends on the data and resources available.

    1. Heitor Gomes, Jacob Montiel, Saulo Martiello Mastelini, Bernhard Pfahringer, and Albert Bifet. On Ensemble Techniques for Data Stream Regression. IJCNN'20. International Joint Conference on Neural Networks. 2020.\u00a0\u21a9

    2. Heitor Murilo Gomes, Jesse Read, Albert Bifet. Streaming Random Patches for Evolving Data Stream Classification. IEEE International Conference on Data Mining (ICDM), 2019.\u00a0\u21a9

    "},{"location":"api/ensemble/StackingClassifier/","title":"StackingClassifier","text":"

    Stacking for binary classification.

    "},{"location":"api/ensemble/StackingClassifier/#parameters","title":"Parameters","text":"
    • models

      Type \u2192 list[base.Classifier]

    • meta_classifier

      Type \u2192 base.Classifier

    • include_features

      Default \u2192 True

      Indicates whether or not the original features should be provided to the meta-model along with the predictions from each model.

    "},{"location":"api/ensemble/StackingClassifier/#attributes","title":"Attributes","text":"
    • models
    "},{"location":"api/ensemble/StackingClassifier/#examples","title":"Examples","text":"

    from river import compose\nfrom river import datasets\nfrom river import ensemble\nfrom river import evaluate\nfrom river import linear_model as lm\nfrom river import metrics\nfrom river import preprocessing as pp\n\ndataset = datasets.Phishing()\n\nmodel = compose.Pipeline(\n    ('scale', pp.StandardScaler()),\n    ('stack', ensemble.StackingClassifier(\n        [\n            lm.LogisticRegression(),\n            lm.PAClassifier(mode=1, C=0.01),\n            lm.PAClassifier(mode=2, C=0.01),\n        ],\n        meta_classifier=lm.LogisticRegression()\n    ))\n)\n\nmetric = metrics.F1()\n\nevaluate.progressive_val_score(dataset, model, metric)\n
    F1: 88.14%\n

    "},{"location":"api/ensemble/StackingClassifier/#methods","title":"Methods","text":"learn_one

    Update the model with a set of features x and a label y.

    Parameters

    • x
    • y

    Returns

    self

    predict_one

    Predict the label of a set of features x.

    Parameters

    • x \u2014 'dict'
    • kwargs

    Returns

    base.typing.ClfTarget | None: The predicted label.

    predict_proba_one

    Predict the probability of each label for a dictionary of features x.

    Parameters

    • x

    Returns

    A dictionary that associates a probability which each label.

    1. A Kaggler's Guide to Model Stacking in Practice \u21a9

    "},{"location":"api/ensemble/VotingClassifier/","title":"VotingClassifier","text":"

    Voting classifier.

    A classification is made by aggregating the predictions of each model in the ensemble. The probabilities for each class are summed up if use_probabilities is set to True. If not, the probabilities are ignored and each prediction is weighted the same. In this case, it's important that you use an odd number of classifiers. A random class will be picked if the number of classifiers is even.

    "},{"location":"api/ensemble/VotingClassifier/#parameters","title":"Parameters","text":"
    • models

      Type \u2192 list[base.Classifier]

      The classifiers.

    • use_probabilities

      Default \u2192 True

      Whether or to weight each prediction with its associated probability.

    "},{"location":"api/ensemble/VotingClassifier/#attributes","title":"Attributes","text":"
    • models
    "},{"location":"api/ensemble/VotingClassifier/#examples","title":"Examples","text":"

    from river import datasets\nfrom river import ensemble\nfrom river import evaluate\nfrom river import linear_model\nfrom river import metrics\nfrom river import naive_bayes\nfrom river import preprocessing\nfrom river import tree\n\ndataset = datasets.Phishing()\n\nmodel = (\n    preprocessing.StandardScaler() |\n    ensemble.VotingClassifier([\n        linear_model.LogisticRegression(),\n        tree.HoeffdingTreeClassifier(),\n        naive_bayes.GaussianNB()\n    ])\n)\n\nmetric = metrics.F1()\n\nevaluate.progressive_val_score(dataset, model, metric)\n
    F1: 86.94%\n

    "},{"location":"api/ensemble/VotingClassifier/#methods","title":"Methods","text":"learn_one

    Update the model with a set of features x and a label y.

    Parameters

    • x \u2014 'dict'
    • y \u2014 'base.typing.ClfTarget'

    Returns

    Classifier: self

    predict_one

    Predict the label of a set of features x.

    Parameters

    • x \u2014 'dict'

    Returns

    base.typing.ClfTarget | None: The predicted label.

    predict_proba_one

    Predict the probability of each label for a dictionary of features x.

    Parameters

    • x \u2014 'dict'

    Returns

    dict[base.typing.ClfTarget, float]: A dictionary that associates a probability which each label.

    "},{"location":"api/evaluate/BinaryClassificationTrack/","title":"BinaryClassificationTrack","text":"

    This track evaluates a model's performance on binary classification tasks. These do not include synthetic datasets.

    "},{"location":"api/evaluate/BinaryClassificationTrack/#methods","title":"Methods","text":"run"},{"location":"api/evaluate/MultiClassClassificationTrack/","title":"MultiClassClassificationTrack","text":"

    This track evaluates a model's performance on multi-class classification tasks. These do not include synthetic datasets.

    "},{"location":"api/evaluate/MultiClassClassificationTrack/#methods","title":"Methods","text":"run"},{"location":"api/evaluate/RegressionTrack/","title":"RegressionTrack","text":"

    This track evaluates a model's performance on regression tasks. These do not include synthetic datasets.

    "},{"location":"api/evaluate/RegressionTrack/#methods","title":"Methods","text":"run"},{"location":"api/evaluate/Track/","title":"Track","text":"

    A track evaluate a model's performance.

    The following metrics are recorded:

    • Time, which should be interpreted with wisdom. Indeed time can depend on the architecture

      and local resource situations. Comparison via FLOPS should be preferred. - The model's memory footprint.

    • The model's predictive performance on the track's dataset.

    "},{"location":"api/evaluate/Track/#parameters","title":"Parameters","text":"
    • name

      Type \u2192 str

      The name of the track.

    • datasets

      The datasets that compose the track.

    • metric

      The metric(s) used to track performance.

    "},{"location":"api/evaluate/Track/#methods","title":"Methods","text":"run"},{"location":"api/evaluate/iter-progressive-val-score/","title":"iter_progressive_val_score","text":"

    Evaluates the performance of a model on a streaming dataset and yields results.

    This does exactly the same as evaluate.progressive_val_score. The only difference is that this function returns an iterator, yielding results at every step. This can be useful if you want to have control over what you do with the results. For instance, you might want to plot the results.

    "},{"location":"api/evaluate/iter-progressive-val-score/#parameters","title":"Parameters","text":"
    • dataset

      Type \u2192 base.typing.Dataset

      The stream of observations against which the model will be evaluated.

    • model

      The model to evaluate.

    • metric

      Type \u2192 metrics.base.Metric

      The metric used to evaluate the model's predictions.

    • moment

      Type \u2192 str | typing.Callable | None

      Default \u2192 None

      The attribute used for measuring time. If a callable is passed, then it is expected to take as input a dict of features. If None, then the observations are implicitly timestamped in the order in which they arrive.

    • delay

      Type \u2192 str | int | dt.timedelta | typing.Callable | None

      Default \u2192 None

      The amount to wait before revealing the target associated with each observation to the model. This value is expected to be able to sum with the moment value. For instance, if moment is a datetime.date, then delay is expected to be a datetime.timedelta. If a callable is passed, then it is expected to take as input a dict of features and the target. If a str is passed, then it will be used to access the relevant field from the features. If None is passed, then no delay will be used, which leads to doing standard online validation.

    • step

      Default \u2192 1

      Iteration number at which to yield results. This only takes into account the predictions, and not the training steps.

    • measure_time

      Default \u2192 False

      Whether or not to measure the elapsed time.

    • measure_memory

      Default \u2192 False

      Whether or not to measure the memory usage of the model.

    "},{"location":"api/evaluate/iter-progressive-val-score/#examples","title":"Examples","text":"

    Take the following model:

    from river import linear_model\nfrom river import preprocessing\n\nmodel = (\n    preprocessing.StandardScaler() |\n    linear_model.LogisticRegression()\n)\n

    We can evaluate it on the Phishing dataset as so:

    from river import datasets\nfrom river import evaluate\nfrom river import metrics\n\nsteps = evaluate.iter_progressive_val_score(\n    model=model,\n    dataset=datasets.Phishing(),\n    metric=metrics.ROCAUC(),\n    step=200\n)\n\nfor step in steps:\n    print(step)\n
    {'ROCAUC': ROCAUC: 90.20%, 'Step': 200}\n{'ROCAUC': ROCAUC: 92.25%, 'Step': 400}\n{'ROCAUC': ROCAUC: 93.23%, 'Step': 600}\n{'ROCAUC': ROCAUC: 94.05%, 'Step': 800}\n{'ROCAUC': ROCAUC: 94.79%, 'Step': 1000}\n{'ROCAUC': ROCAUC: 95.07%, 'Step': 1200}\n{'ROCAUC': ROCAUC: 95.07%, 'Step': 1250}\n

    1. Beating the Hold-Out: Bounds for K-fold and Progressive Cross-Validation \u21a9

    2. Grzenda, M., Gomes, H.M. and Bifet, A., 2019. Delayed labelling evaluation for data streams. Data Mining and Knowledge Discovery, pp.1-30 \u21a9

    "},{"location":"api/evaluate/progressive-val-score/","title":"progressive_val_score","text":"

    Evaluates the performance of a model on a streaming dataset.

    This method is the canonical way to evaluate a model's performance. When used correctly, it allows you to exactly assess how a model would have performed in a production scenario.

    dataset is converted into a stream of questions and answers. At each step the model is either asked to predict an observation, or is either updated. The target is only revealed to the model after a certain amount of time, which is determined by the delay parameter. Note that under the hood this uses the stream.simulate_qa function to go through the data in arrival order.

    By default, there is no delay, which means that the samples are processed one after the other. When there is no delay, this function essentially performs progressive validation. When there is a delay, then we refer to it as delayed progressive validation.

    It is recommended to use this method when you want to determine a model's performance on a dataset. In particular, it is advised to use the delay parameter in order to get a reliable assessment. Indeed, in a production scenario, it is often the case that ground truths are made available after a certain amount of time. By using this method, you can reproduce this scenario and therefore truthfully assess what would have been the performance of a model on a given dataset.

    "},{"location":"api/evaluate/progressive-val-score/#parameters","title":"Parameters","text":"
    • dataset

      Type \u2192 base.typing.Dataset

      The stream of observations against which the model will be evaluated.

    • model

      The model to evaluate.

    • metric

      Type \u2192 metrics.base.Metric

      The metric used to evaluate the model's predictions.

    • moment

      Type \u2192 str | typing.Callable | None

      Default \u2192 None

      The attribute used for measuring time. If a callable is passed, then it is expected to take as input a dict of features. If None, then the observations are implicitly timestamped in the order in which they arrive.

    • delay

      Type \u2192 str | int | dt.timedelta | typing.Callable | None

      Default \u2192 None

      The amount to wait before revealing the target associated with each observation to the model. This value is expected to be able to sum with the moment value. For instance, if moment is a datetime.date, then delay is expected to be a datetime.timedelta. If a callable is passed, then it is expected to take as input a dict of features and the target. If a str is passed, then it will be used to access the relevant field from the features. If None is passed, then no delay will be used, which leads to doing standard online validation.

    • print_every

      Default \u2192 0

      Iteration number at which to print the current metric. This only takes into account the predictions, and not the training steps.

    • show_time

      Default \u2192 False

      Whether or not to display the elapsed time.

    • show_memory

      Default \u2192 False

      Whether or not to display the memory usage of the model.

    • print_kwargs

      Extra keyword arguments are passed to the print function. For instance, this allows providing a file argument, which indicates where to output progress.

    "},{"location":"api/evaluate/progressive-val-score/#examples","title":"Examples","text":"

    Take the following model:

    from river import linear_model\nfrom river import preprocessing\n\nmodel = (\n    preprocessing.StandardScaler() |\n    linear_model.LogisticRegression()\n)\n

    We can evaluate it on the Phishing dataset as so:

    from river import datasets\nfrom river import evaluate\nfrom river import metrics\n\nevaluate.progressive_val_score(\n    model=model,\n    dataset=datasets.Phishing(),\n    metric=metrics.ROCAUC(),\n    print_every=200\n)\n
    [200] ROCAUC: 90.20%\n[400] ROCAUC: 92.25%\n[600] ROCAUC: 93.23%\n[800] ROCAUC: 94.05%\n[1,000] ROCAUC: 94.79%\n[1,200] ROCAUC: 95.07%\n[1,250] ROCAUC: 95.07%\nROCAUC: 95.07%\n

    We haven't specified a delay, therefore this is strictly equivalent to the following piece of code:

    model = (\n    preprocessing.StandardScaler() |\n    linear_model.LogisticRegression()\n)\n\nmetric = metrics.ROCAUC()\n\nfor x, y in datasets.Phishing():\n    y_pred = model.predict_proba_one(x)\n    metric = metric.update(y, y_pred)\n    model = model.learn_one(x, y)\n\nmetric\n
    ROCAUC: 95.07%\n

    When print_every is specified, the current state is printed at regular intervals. Under the hood, Python's print method is being used. You can pass extra keyword arguments to modify its behavior. For instance, you may use the file argument if you want to log the progress to a file of your choice.

    with open('progress.log', 'w') as f:\n    metric = evaluate.progressive_val_score(\n        model=model,\n        dataset=datasets.Phishing(),\n        metric=metrics.ROCAUC(),\n        print_every=200,\n        file=f\n    )\n\nwith open('progress.log') as f:\n    for line in f.read().splitlines():\n        print(line)\n
    [200] ROCAUC: 94.00%\n[400] ROCAUC: 94.70%\n[600] ROCAUC: 95.17%\n[800] ROCAUC: 95.42%\n[1,000] ROCAUC: 95.82%\n[1,200] ROCAUC: 96.00%\n[1,250] ROCAUC: 96.04%\n

    Note that the performance is slightly better than above because we haven't used a fresh copy of the model. Instead, we've reused the existing model which has already done a full pass on the data.

    import os; os.remove('progress.log')\n
    1. Beating the Hold-Out: Bounds for K-fold and Progressive Cross-Validation \u21a9

    2. Grzenda, M., Gomes, H.M. and Bifet, A., 2019. Delayed labelling evaluation for data streams. Data Mining and Knowledge Discovery, pp.1-30 \u21a9

    "},{"location":"api/facto/FFMClassifier/","title":"FFMClassifier","text":"

    Field-aware Factorization Machine for binary classification.

    The model equation is defined by:

    \\[\\hat{y}(x) = w_{0} + \\sum_{j=1}^{p} w_{j} x_{j} + \\sum_{j=1}^{p} \\sum_{j'=j+1}^{p} \\langle \\mathbf{v}_{j, f_{j'}}, \\mathbf{v}_{j', f_j} \\rangle x_{j} x_{j'}\\]

    Where \\(\\mathbf{v}_{j, f_{j'}}\\) is the latent vector corresponding to \\(j\\) feature for \\(f_{j'}\\) field, and \\(\\mathbf{v}_{j', f_j}\\) is the latent vector corresponding to \\(j'\\) feature for \\(f_j\\) field.

    For more efficiency, this model automatically one-hot encodes strings features considering them as categorical variables. Field names are inferred from feature names by taking everything before the first underscore: feature_name.split('_')[0].

    "},{"location":"api/facto/FFMClassifier/#parameters","title":"Parameters","text":"
    • n_factors

      Default \u2192 10

      Dimensionality of the factorization or number of latent factors.

    • weight_optimizer

      Type \u2192 optim.base.Optimizer | None

      Default \u2192 None

      The sequential optimizer used for updating the feature weights. Note that the intercept is handled separately.

    • latent_optimizer

      Type \u2192 optim.base.Optimizer | None

      Default \u2192 None

      The sequential optimizer used for updating the latent factors.

    • loss

      Type \u2192 optim.losses.BinaryLoss | None

      Default \u2192 None

      The loss function to optimize for.

    • sample_normalization

      Default \u2192 False

      Whether to divide each element of x by x's L2-norm.

    • l1_weight

      Default \u2192 0.0

      Amount of L1 regularization used to push weights towards 0.

    • l2_weight

      Default \u2192 0.0

      Amount of L2 regularization used to push weights towards 0.

    • l1_latent

      Default \u2192 0.0

      Amount of L1 regularization used to push latent weights towards 0.

    • l2_latent

      Default \u2192 0.0

      Amount of L2 regularization used to push latent weights towards 0.

    • intercept

      Default \u2192 0.0

      Initial intercept value.

    • intercept_lr

      Type \u2192 optim.base.Scheduler | float

      Default \u2192 0.01

      Learning rate scheduler used for updating the intercept. An instance of optim.schedulers.Constant is used if a float is passed. No intercept will be used if this is set to 0.

    • weight_initializer

      Type \u2192 optim.initializers.Initializer | None

      Default \u2192 None

      Weights initialization scheme. Defaults to optim.initializers.Zeros()`.

    • latent_initializer

      Type \u2192 optim.initializers.Initializer | None

      Default \u2192 None

      Latent factors initialization scheme. Defaults to optim.initializers.Normal(mu=.0, sigma=.1, random_state=self.random_state)`.

    • clip_gradient

      Default \u2192 1000000000000.0

      Clips the absolute value of each gradient value.

    • seed

      Type \u2192 int | None

      Default \u2192 None

      Randomization seed used for reproducibility.

    "},{"location":"api/facto/FFMClassifier/#attributes","title":"Attributes","text":"
    • weights

      The current weights assigned to the features.

    • latents

      The current latent weights assigned to the features.

    "},{"location":"api/facto/FFMClassifier/#examples","title":"Examples","text":"

    from river import facto\n\ndataset = (\n    ({'user': 'Alice', 'item': 'Superman', 'time': .12}, True),\n    ({'user': 'Alice', 'item': 'Terminator', 'time': .13}, True),\n    ({'user': 'Alice', 'item': 'Star Wars', 'time': .14}, True),\n    ({'user': 'Alice', 'item': 'Notting Hill', 'time': .15}, False),\n    ({'user': 'Alice', 'item': 'Harry Potter ', 'time': .16}, True),\n    ({'user': 'Bob', 'item': 'Superman', 'time': .13}, True),\n    ({'user': 'Bob', 'item': 'Terminator', 'time': .12}, True),\n    ({'user': 'Bob', 'item': 'Star Wars', 'time': .16}, True),\n    ({'user': 'Bob', 'item': 'Notting Hill', 'time': .10}, False)\n)\n\nmodel = facto.FFMClassifier(\n    n_factors=10,\n    intercept=.5,\n    seed=42,\n)\n\nfor x, y in dataset:\n    model = model.learn_one(x, y)\n\nmodel.predict_one({'user': 'Bob', 'item': 'Harry Potter', 'time': .14})\n
    True\n

    "},{"location":"api/facto/FFMClassifier/#methods","title":"Methods","text":"debug_one

    Debugs the output of the FM regressor.

    Parameters

    • x \u2014 'dict'
    • decimals \u2014 'int' \u2014 defaults to 5

    Returns

    str: A table which explains the output.

    learn_one

    Update the model with a set of features x and a label y.

    Parameters

    • x \u2014 'dict'
    • y \u2014 'base.typing.ClfTarget'
    • sample_weight \u2014 defaults to 1.0

    Returns

    Classifier: self

    predict_one

    Predict the label of a set of features x.

    Parameters

    • x \u2014 'dict'
    • kwargs

    Returns

    base.typing.ClfTarget | None: The predicted label.

    predict_proba_one

    Predict the probability of each label for a dictionary of features x.

    Parameters

    • x

    Returns

    A dictionary that associates a probability which each label.

    1. Juan, Y., Zhuang, Y., Chin, W.S. and Lin, C.J., 2016, September. Field-aware factorization machines for CTR prediction. In Proceedings of the 10th ACM Conference on Recommender Systems (pp. 43-50). \u21a9

    "},{"location":"api/facto/FFMRegressor/","title":"FFMRegressor","text":"

    Field-aware Factorization Machine for regression.

    The model equation is defined by:

    \\[\\hat{y}(x) = w_{0} + \\sum_{j=1}^{p} w_{j} x_{j} + \\sum_{j=1}^{p} \\sum_{j'=j+1}^{p} \\langle \\mathbf{v}_{j, f_{j'}}, \\mathbf{v}_{j', f_j} \\rangle x_{j} x_{j'}\\]

    Where \\(\\mathbf{v}_{j, f_{j'}}\\) is the latent vector corresponding to \\(j\\) feature for \\(f_{j'}\\) field, and \\(\\mathbf{v}_{j', f_j}\\) is the latent vector corresponding to \\(j'\\) feature for \\(f_j\\) field.

    For more efficiency, this model automatically one-hot encodes strings features considering them as categorical variables. Field names are inferred from feature names by taking everything before the first underscore: feature_name.split('_')[0].

    "},{"location":"api/facto/FFMRegressor/#parameters","title":"Parameters","text":"
    • n_factors

      Default \u2192 10

      Dimensionality of the factorization or number of latent factors.

    • weight_optimizer

      Type \u2192 optim.base.Optimizer | None

      Default \u2192 None

      The sequential optimizer used for updating the feature weights. Note that the intercept is handled separately.

    • latent_optimizer

      Type \u2192 optim.base.Optimizer | None

      Default \u2192 None

      The sequential optimizer used for updating the latent factors.

    • loss

      Type \u2192 optim.losses.RegressionLoss | None

      Default \u2192 None

      The loss function to optimize for.

    • sample_normalization

      Default \u2192 False

      Whether to divide each element of x by x's L2-norm.

    • l1_weight

      Default \u2192 0.0

      Amount of L1 regularization used to push weights towards 0.

    • l2_weight

      Default \u2192 0.0

      Amount of L2 regularization used to push weights towards 0.

    • l1_latent

      Default \u2192 0.0

      Amount of L1 regularization used to push latent weights towards 0.

    • l2_latent

      Default \u2192 0.0

      Amount of L2 regularization used to push latent weights towards 0.

    • intercept

      Default \u2192 0.0

      Initial intercept value.

    • intercept_lr

      Type \u2192 optim.base.Scheduler | float

      Default \u2192 0.01

      Learning rate scheduler used for updating the intercept. An instance of optim.schedulers.Constant is used if a float is passed. No intercept will be used if this is set to 0.

    • weight_initializer

      Type \u2192 optim.initializers.Initializer | None

      Default \u2192 None

      Weights initialization scheme. Defaults to optim.initializers.Zeros()`.

    • latent_initializer

      Type \u2192 optim.initializers.Initializer | None

      Default \u2192 None

      Latent factors initialization scheme. Defaults to optim.initializers.Normal(mu=.0, sigma=.1, random_state=self.random_state)`.

    • clip_gradient

      Default \u2192 1000000000000.0

      Clips the absolute value of each gradient value.

    • seed

      Type \u2192 int | None

      Default \u2192 None

      Randomization seed used for reproducibility.

    "},{"location":"api/facto/FFMRegressor/#attributes","title":"Attributes","text":"
    • weights

      The current weights assigned to the features.

    • latents

      The current latent weights assigned to the features.

    "},{"location":"api/facto/FFMRegressor/#examples","title":"Examples","text":"

    from river import facto\n\ndataset = (\n    ({'user': 'Alice', 'item': 'Superman', 'time': .12}, 8),\n    ({'user': 'Alice', 'item': 'Terminator', 'time': .13}, 9),\n    ({'user': 'Alice', 'item': 'Star Wars', 'time': .14}, 8),\n    ({'user': 'Alice', 'item': 'Notting Hill', 'time': .15}, 2),\n    ({'user': 'Alice', 'item': 'Harry Potter ', 'time': .16}, 5),\n    ({'user': 'Bob', 'item': 'Superman', 'time': .13}, 8),\n    ({'user': 'Bob', 'item': 'Terminator', 'time': .12}, 9),\n    ({'user': 'Bob', 'item': 'Star Wars', 'time': .16}, 8),\n    ({'user': 'Bob', 'item': 'Notting Hill', 'time': .10}, 2)\n)\n\nmodel = facto.FFMRegressor(\n    n_factors=10,\n    intercept=5,\n    seed=42,\n)\n\nfor x, y in dataset:\n    model = model.learn_one(x, y)\n\nmodel.predict_one({'user': 'Bob', 'item': 'Harry Potter', 'time': .14})\n
    5.319945\n

    report = model.debug_one({'user': 'Bob', 'item': 'Harry Potter', 'time': .14})\n\nprint(report)\n
    Name                                       Value      Weight     Contribution\n                               Intercept    1.00000    5.23501        5.23501\n                                user_Bob    1.00000    0.11438        0.11438\n                                    time    0.14000    0.03186        0.00446\n    item_Harry Potter(time) - time(item)    0.14000    0.03153        0.00441\n             user_Bob(time) - time(user)    0.14000    0.02864        0.00401\n                       item_Harry Potter    1.00000    0.00000        0.00000\nuser_Bob(item) - item_Harry Potter(user)    1.00000   -0.04232       -0.04232\n

    "},{"location":"api/facto/FFMRegressor/#methods","title":"Methods","text":"debug_one

    Debugs the output of the FM regressor.

    Parameters

    • x \u2014 'dict'
    • decimals \u2014 'int' \u2014 defaults to 5

    Returns

    str: A table which explains the output.

    learn_one

    Fits to a set of features x and a real-valued target y.

    Parameters

    • x \u2014 'dict'
    • y \u2014 'base.typing.RegTarget'
    • sample_weight \u2014 defaults to 1.0

    Returns

    Regressor: self

    predict_one

    Predict the output of features x.

    Parameters

    • x

    Returns

    The prediction.

    1. Juan, Y., Zhuang, Y., Chin, W.S. and Lin, C.J., 2016, September. Field-aware factorization machines for CTR prediction. In Proceedings of the 10th ACM Conference on Recommender Systems (pp. 43-50). \u21a9

    "},{"location":"api/facto/FMClassifier/","title":"FMClassifier","text":"

    Factorization Machine for binary classification.

    The model equation is defined as:

    \\[\\hat{y}(x) = w_{0} + \\sum_{j=1}^{p} w_{j} x_{j} + \\sum_{j=1}^{p} \\sum_{j'=j+1}^{p} \\langle \\mathbf{v}_j, \\mathbf{v}_{j'} \\rangle x_{j} x_{j'}\\]

    Where \\(\\mathbf{v}_j\\) and \\(\\mathbf{v}_{j'}\\) are \\(j\\) and \\(j'\\) latent vectors, respectively.

    For more efficiency, this model automatically one-hot encodes strings features considering them as categorical variables.

    "},{"location":"api/facto/FMClassifier/#parameters","title":"Parameters","text":"
    • n_factors

      Default \u2192 10

      Dimensionality of the factorization or number of latent factors.

    • weight_optimizer

      Type \u2192 optim.base.Optimizer | None

      Default \u2192 None

      The sequential optimizer used for updating the feature weights. Note that the intercept is handled separately.

    • latent_optimizer

      Type \u2192 optim.base.Optimizer | None

      Default \u2192 None

      The sequential optimizer used for updating the latent factors.

    • loss

      Type \u2192 optim.losses.BinaryLoss | None

      Default \u2192 None

      The loss function to optimize for.

    • sample_normalization

      Default \u2192 False

      Whether to divide each element of x by x's L2-norm.

    • l1_weight

      Default \u2192 0.0

      Amount of L1 regularization used to push weights towards 0.

    • l2_weight

      Default \u2192 0.0

      Amount of L2 regularization used to push weights towards 0.

    • l1_latent

      Default \u2192 0.0

      Amount of L1 regularization used to push latent weights towards 0.

    • l2_latent

      Default \u2192 0.0

      Amount of L2 regularization used to push latent weights towards 0.

    • intercept

      Default \u2192 0.0

      Initial intercept value.

    • intercept_lr

      Type \u2192 optim.base.Scheduler | float

      Default \u2192 0.01

      Learning rate scheduler used for updating the intercept. An instance of optim.schedulers.Constant is used if a float is passed. No intercept will be used if this is set to 0.

    • weight_initializer

      Type \u2192 optim.initializers.Initializer | None

      Default \u2192 None

      Weights initialization scheme. Defaults to optim.initializers.Zeros()`.

    • latent_initializer

      Type \u2192 optim.initializers.Initializer | None

      Default \u2192 None

      Latent factors initialization scheme. Defaults to optim.initializers.Normal(mu=.0, sigma=.1, random_state=self.random_state)`.

    • clip_gradient

      Default \u2192 1000000000000.0

      Clips the absolute value of each gradient value.

    • seed

      Type \u2192 int | None

      Default \u2192 None

      Randomization seed used for reproducibility.

    "},{"location":"api/facto/FMClassifier/#attributes","title":"Attributes","text":"
    • weights

      The current weights assigned to the features.

    • latents

      The current latent weights assigned to the features.

    "},{"location":"api/facto/FMClassifier/#examples","title":"Examples","text":"

    from river import facto\n\ndataset = (\n    ({'user': 'Alice', 'item': 'Superman'}, True),\n    ({'user': 'Alice', 'item': 'Terminator'}, True),\n    ({'user': 'Alice', 'item': 'Star Wars'}, True),\n    ({'user': 'Alice', 'item': 'Notting Hill'}, False),\n    ({'user': 'Alice', 'item': 'Harry Potter '}, True),\n    ({'user': 'Bob', 'item': 'Superman'}, True),\n    ({'user': 'Bob', 'item': 'Terminator'}, True),\n    ({'user': 'Bob', 'item': 'Star Wars'}, True),\n    ({'user': 'Bob', 'item': 'Notting Hill'}, False)\n)\n\nmodel = facto.FMClassifier(\n    n_factors=10,\n    seed=42,\n)\n\nfor x, y in dataset:\n    _ = model.learn_one(x, y)\n\nmodel.predict_one({'Bob': 1, 'Harry Potter': 1})\n
    True\n

    "},{"location":"api/facto/FMClassifier/#methods","title":"Methods","text":"debug_one

    Debugs the output of the FM regressor.

    Parameters

    • x \u2014 'dict'
    • decimals \u2014 'int' \u2014 defaults to 5

    Returns

    str: A table which explains the output.

    learn_one

    Update the model with a set of features x and a label y.

    Parameters

    • x \u2014 'dict'
    • y \u2014 'base.typing.ClfTarget'
    • sample_weight \u2014 defaults to 1.0

    Returns

    Classifier: self

    predict_one

    Predict the label of a set of features x.

    Parameters

    • x \u2014 'dict'
    • kwargs

    Returns

    base.typing.ClfTarget | None: The predicted label.

    predict_proba_one

    Predict the probability of each label for a dictionary of features x.

    Parameters

    • x

    Returns

    A dictionary that associates a probability which each label.

    1. Rendle, S., 2010, December. Factorization machines. In 2010 IEEE International Conference on Data Mining (pp. 995-1000). IEEE. \u21a9

    2. Rendle, S., 2012, May. Factorization Machines with libFM. In ACM Transactions on Intelligent Systems and Technology 3, 3, Article 57, 22 pages. \u21a9

    "},{"location":"api/facto/FMRegressor/","title":"FMRegressor","text":"

    Factorization Machine for regression.

    The model equation is defined as:

    \\[\\hat{y}(x) = w_{0} + \\sum_{j=1}^{p} w_{j} x_{j} + \\sum_{j=1}^{p} \\sum_{j'=j+1}^{p} \\langle \\mathbf{v}_j, \\mathbf{v}_{j'} \\rangle x_{j} x_{j'}\\]

    Where \\(\\mathbf{v}_j\\) and \\(\\mathbf{v}_{j'}\\) are \\(j\\) and \\(j'\\) latent vectors, respectively.

    For more efficiency, this model automatically one-hot encodes strings features considering them as categorical variables.

    "},{"location":"api/facto/FMRegressor/#parameters","title":"Parameters","text":"
    • n_factors

      Default \u2192 10

      Dimensionality of the factorization or number of latent factors.

    • weight_optimizer

      Type \u2192 optim.base.Optimizer | None

      Default \u2192 None

      The sequential optimizer used for updating the feature weights. Note that the intercept is handled separately.

    • latent_optimizer

      Type \u2192 optim.base.Optimizer | None

      Default \u2192 None

      The sequential optimizer used for updating the latent factors.

    • loss

      Type \u2192 optim.losses.RegressionLoss | None

      Default \u2192 None

      The loss function to optimize for.

    • sample_normalization

      Default \u2192 False

      Whether to divide each element of x by x's L2-norm.

    • l1_weight

      Default \u2192 0.0

      Amount of L1 regularization used to push weights towards 0.

    • l2_weight

      Default \u2192 0.0

      Amount of L2 regularization used to push weights towards 0.

    • l1_latent

      Default \u2192 0.0

      Amount of L1 regularization used to push latent weights towards 0.

    • l2_latent

      Default \u2192 0.0

      Amount of L2 regularization used to push latent weights towards 0.

    • intercept

      Default \u2192 0.0

      Initial intercept value.

    • intercept_lr

      Type \u2192 optim.base.Scheduler | float

      Default \u2192 0.01

      Learning rate scheduler used for updating the intercept. An instance of optim.schedulers.Constant is used if a float is passed. No intercept will be used if this is set to 0.

    • weight_initializer

      Type \u2192 optim.initializers.Initializer | None

      Default \u2192 None

      Weights initialization scheme. Defaults to optim.initializers.Zeros()`.

    • latent_initializer

      Type \u2192 optim.initializers.Initializer | None

      Default \u2192 None

      Latent factors initialization scheme. Defaults to optim.initializers.Normal(mu=.0, sigma=.1, random_state=self.random_state)`.

    • clip_gradient

      Default \u2192 1000000000000.0

      Clips the absolute value of each gradient value.

    • seed

      Type \u2192 int | None

      Default \u2192 None

      Randomization seed used for reproducibility.

    "},{"location":"api/facto/FMRegressor/#attributes","title":"Attributes","text":"
    • weights

      The current weights assigned to the features.

    • latents

      The current latent weights assigned to the features.

    "},{"location":"api/facto/FMRegressor/#examples","title":"Examples","text":"

    from river import facto\n\ndataset = (\n    ({'user': 'Alice', 'item': 'Superman'}, 8),\n    ({'user': 'Alice', 'item': 'Terminator'}, 9),\n    ({'user': 'Alice', 'item': 'Star Wars'}, 8),\n    ({'user': 'Alice', 'item': 'Notting Hill'}, 2),\n    ({'user': 'Alice', 'item': 'Harry Potter '}, 5),\n    ({'user': 'Bob', 'item': 'Superman'}, 8),\n    ({'user': 'Bob', 'item': 'Terminator'}, 9),\n    ({'user': 'Bob', 'item': 'Star Wars'}, 8),\n    ({'user': 'Bob', 'item': 'Notting Hill'}, 2)\n)\n\nmodel = facto.FMRegressor(\n    n_factors=10,\n    intercept=5,\n    seed=42,\n)\n\nfor x, y in dataset:\n    _ = model.learn_one(x, y)\n\nmodel.predict_one({'Bob': 1, 'Harry Potter': 1})\n
    5.236504\n

    report = model.debug_one({'Bob': 1, 'Harry Potter': 1})\n\nprint(report)\n
    Name                 Value      Weight     Contribution\n         Intercept    1.00000    5.23426        5.23426\nBob - Harry Potter    1.00000    0.00224        0.00224\n      Harry Potter    1.00000    0.00000        0.00000\n               Bob    1.00000    0.00000        0.00000\n

    "},{"location":"api/facto/FMRegressor/#methods","title":"Methods","text":"debug_one

    Debugs the output of the FM regressor.

    Parameters

    • x \u2014 'dict'
    • decimals \u2014 'int' \u2014 defaults to 5

    Returns

    str: A table which explains the output.

    learn_one

    Fits to a set of features x and a real-valued target y.

    Parameters

    • x \u2014 'dict'
    • y \u2014 'base.typing.RegTarget'
    • sample_weight \u2014 defaults to 1.0

    Returns

    Regressor: self

    predict_one

    Predict the output of features x.

    Parameters

    • x

    Returns

    The prediction.

    1. Rendle, S., 2010, December. Factorization machines. In 2010 IEEE International Conference on Data Mining (pp. 995-1000). IEEE. \u21a9

    2. Rendle, S., 2012, May. Factorization Machines with libFM. In ACM Transactions on Intelligent Systems and Technology 3, 3, Article 57, 22 pages. \u21a9

    "},{"location":"api/facto/FwFMClassifier/","title":"FwFMClassifier","text":"

    Field-weighted Factorization Machine for binary classification.

    The model equation is defined as:

    \\[\\hat{y}(x) = w_{0} + \\sum_{j=1}^{p} w_{j} x_{j} + \\sum_{j=1}^{p} \\sum_{j'=j+1}^{p} r_{f_j, f_{j'}} \\langle \\mathbf{v}_j, \\mathbf{v}_{j'} \\rangle x_{j} x_{j'}\\]

    Where \\(f_j\\) and \\(f_{j'}\\) are \\(j\\) and \\(j'\\) fields, respectively, and \\(\\mathbf{v}_j\\) and \\(\\mathbf{v}_{j'}\\) are \\(j\\) and \\(j'\\) latent vectors, respectively.

    For more efficiency, this model automatically one-hot encodes strings features considering them as categorical variables. Field names are inferred from feature names by taking everything before the first underscore: feature_name.split('_')[0].

    "},{"location":"api/facto/FwFMClassifier/#parameters","title":"Parameters","text":"
    • n_factors

      Default \u2192 10

      Dimensionality of the factorization or number of latent factors.

    • weight_optimizer

      Type \u2192 optim.base.Optimizer | None

      Default \u2192 None

      The sequential optimizer used for updating the feature weights. Note that the intercept is handled separately.

    • latent_optimizer

      Type \u2192 optim.base.Optimizer | None

      Default \u2192 None

      The sequential optimizer used for updating the latent factors.

    • int_weight_optimizer

      Type \u2192 optim.base.Optimizer | None

      Default \u2192 None

      The sequential optimizer used for updating the field pairs interaction weights.

    • loss

      Type \u2192 optim.losses.BinaryLoss | None

      Default \u2192 None

      The loss function to optimize for.

    • sample_normalization

      Default \u2192 False

      Whether to divide each element of x by x's L2-norm.

    • l1_weight

      Default \u2192 0.0

      Amount of L1 regularization used to push weights towards 0.

    • l2_weight

      Default \u2192 0.0

      Amount of L2 regularization used to push weights towards 0.

    • l1_latent

      Default \u2192 0.0

      Amount of L1 regularization used to push latent weights towards 0.

    • l2_latent

      Default \u2192 0.0

      Amount of L2 regularization used to push latent weights towards 0.

    • intercept

      Default \u2192 0.0

      Initial intercept value.

    • intercept_lr

      Type \u2192 optim.base.Scheduler | float

      Default \u2192 0.01

      Learning rate scheduler used for updating the intercept. An instance of optim.schedulers.Constant is used if a float is passed. No intercept will be used if this is set to 0.

    • weight_initializer

      Type \u2192 optim.initializers.Initializer | None

      Default \u2192 None

      Weights initialization scheme. Defaults to optim.initializers.Zeros()`.

    • latent_initializer

      Type \u2192 optim.initializers.Initializer | None

      Default \u2192 None

      Latent factors initialization scheme. Defaults to optim.initializers.Normal(mu=.0, sigma=.1, random_state=self.random_state)`.

    • clip_gradient

      Default \u2192 1000000000000.0

      Clips the absolute value of each gradient value.

    • seed

      Type \u2192 int | None

      Default \u2192 None

      Randomization seed used for reproducibility.

    "},{"location":"api/facto/FwFMClassifier/#attributes","title":"Attributes","text":"
    • weights

      The current weights assigned to the features.

    • latents

      The current latent weights assigned to the features.

    • interaction_weights

      The current interaction strengths of field pairs.

    "},{"location":"api/facto/FwFMClassifier/#examples","title":"Examples","text":"

    from river import facto\n\ndataset = (\n    ({'user': 'Alice', 'item': 'Superman'}, True),\n    ({'user': 'Alice', 'item': 'Terminator'}, True),\n    ({'user': 'Alice', 'item': 'Star Wars'}, True),\n    ({'user': 'Alice', 'item': 'Notting Hill'}, False),\n    ({'user': 'Alice', 'item': 'Harry Potter '}, True),\n    ({'user': 'Bob', 'item': 'Superman'}, True),\n    ({'user': 'Bob', 'item': 'Terminator'}, True),\n    ({'user': 'Bob', 'item': 'Star Wars'}, True),\n    ({'user': 'Bob', 'item': 'Notting Hill'}, False)\n)\n\nmodel = facto.FwFMClassifier(\n    n_factors=10,\n    seed=42,\n)\n\nfor x, y in dataset:\n    model = model.learn_one(x, y)\n\nmodel.predict_one({'Bob': 1, 'Harry Potter': 1})\n
    True\n

    "},{"location":"api/facto/FwFMClassifier/#methods","title":"Methods","text":"debug_one

    Debugs the output of the FM regressor.

    Parameters

    • x \u2014 'dict'
    • decimals \u2014 'int' \u2014 defaults to 5

    Returns

    str: A table which explains the output.

    learn_one

    Update the model with a set of features x and a label y.

    Parameters

    • x \u2014 'dict'
    • y \u2014 'base.typing.ClfTarget'
    • sample_weight \u2014 defaults to 1.0

    Returns

    Classifier: self

    predict_one

    Predict the label of a set of features x.

    Parameters

    • x \u2014 'dict'
    • kwargs

    Returns

    base.typing.ClfTarget | None: The predicted label.

    predict_proba_one

    Predict the probability of each label for a dictionary of features x.

    Parameters

    • x

    Returns

    A dictionary that associates a probability which each label.

    1. Junwei Pan, Jian Xu, Alfonso Lobos Ruiz, Wenliang Zhao, Shengjun Pan, Yu Sun, and Quan Lu, 2018, April. Field-weighted Factorization Machines for Click-Through Rate Prediction in Display Advertising. In Proceedings of the 2018 World Wide Web Conference on World Wide Web. International World Wide Web Conferences Steering Committee, (pp. 1349\u20131357). \u21a9

    "},{"location":"api/facto/FwFMRegressor/","title":"FwFMRegressor","text":"

    Field-weighted Factorization Machine for regression.

    The model equation is defined as:

    \\[\\hat{y}(x) = w_{0} + \\sum_{j=1}^{p} w_{j} x_{j} + \\sum_{j=1}^{p} \\sum_{j'=j+1}^{p} r_{f_j, f_{j'}} \\langle \\mathbf{v}_j, \\mathbf{v}_{j'} \\rangle x_{j} x_{j'}\\]

    Where \\(f_j\\) and \\(f_{j'}\\) are \\(j\\) and \\(j'\\) fields, respectively, and \\(\\mathbf{v}_j\\) and \\(\\mathbf{v}_{j'}\\) are \\(j\\) and \\(j'\\) latent vectors, respectively.

    For more efficiency, this model automatically one-hot encodes strings features considering them as categorical variables. Field names are inferred from feature names by taking everything before the first underscore: feature_name.split('_')[0].

    "},{"location":"api/facto/FwFMRegressor/#parameters","title":"Parameters","text":"
    • n_factors

      Default \u2192 10

      Dimensionality of the factorization or number of latent factors.

    • weight_optimizer

      Type \u2192 optim.base.Optimizer | None

      Default \u2192 None

      The sequential optimizer used for updating the feature weights. Note that the intercept is handled separately.

    • latent_optimizer

      Type \u2192 optim.base.Optimizer | None

      Default \u2192 None

      The sequential optimizer used for updating the latent factors.

    • int_weight_optimizer

      Type \u2192 optim.base.Optimizer | None

      Default \u2192 None

      The sequential optimizer used for updating the field pairs interaction weights.

    • loss

      Type \u2192 optim.losses.RegressionLoss | None

      Default \u2192 None

      The loss function to optimize for.

    • sample_normalization

      Default \u2192 False

      Whether to divide each element of x by x's L2-norm.

    • l1_weight

      Default \u2192 0.0

      Amount of L1 regularization used to push weights towards 0.

    • l2_weight

      Default \u2192 0.0

      Amount of L2 regularization used to push weights towards 0.

    • l1_latent

      Default \u2192 0.0

      Amount of L1 regularization used to push latent weights towards 0.

    • l2_latent

      Default \u2192 0.0

      Amount of L2 regularization used to push latent weights towards 0.

    • intercept

      Default \u2192 0.0

      Initial intercept value.

    • intercept_lr

      Type \u2192 optim.base.Scheduler | float

      Default \u2192 0.01

      Learning rate scheduler used for updating the intercept. An instance of optim.schedulers.Constant is used if a float is passed. No intercept will be used if this is set to 0.

    • weight_initializer

      Type \u2192 optim.initializers.Initializer | None

      Default \u2192 None

      Weights initialization scheme. Defaults to optim.initializers.Zeros()`.

    • latent_initializer

      Type \u2192 optim.initializers.Initializer | None

      Default \u2192 None

      Latent factors initialization scheme. Defaults to optim.initializers.Normal(mu=.0, sigma=.1, random_state=self.random_state)`.

    • clip_gradient

      Default \u2192 1000000000000.0

      Clips the absolute value of each gradient value.

    • seed

      Type \u2192 int | None

      Default \u2192 None

      Randomization seed used for reproducibility.

    "},{"location":"api/facto/FwFMRegressor/#attributes","title":"Attributes","text":"
    • weights

      The current weights assigned to the features.

    • latents

      The current latent weights assigned to the features.

    • interaction_weights

      The current interaction strengths of field pairs.

    "},{"location":"api/facto/FwFMRegressor/#examples","title":"Examples","text":"

    from river import facto\n\ndataset = (\n    ({'user': 'Alice', 'item': 'Superman'}, 8),\n    ({'user': 'Alice', 'item': 'Terminator'}, 9),\n    ({'user': 'Alice', 'item': 'Star Wars'}, 8),\n    ({'user': 'Alice', 'item': 'Notting Hill'}, 2),\n    ({'user': 'Alice', 'item': 'Harry Potter '}, 5),\n    ({'user': 'Bob', 'item': 'Superman'}, 8),\n    ({'user': 'Bob', 'item': 'Terminator'}, 9),\n    ({'user': 'Bob', 'item': 'Star Wars'}, 8),\n    ({'user': 'Bob', 'item': 'Notting Hill'}, 2)\n)\n\nmodel = facto.FwFMRegressor(\n    n_factors=10,\n    intercept=5,\n    seed=42,\n)\n\nfor x, y in dataset:\n    model = model.learn_one(x, y)\n\nmodel.predict_one({'Bob': 1, 'Harry Potter': 1})\n
    5.236501\n

    report = model.debug_one({'Bob': 1, 'Harry Potter': 1})\n\nprint(report)\n
    Name                                    Value      Weight     Contribution\n                            Intercept    1.00000    5.23426        5.23426\nBob(Harry Potter) - Harry Potter(Bob)    1.00000    0.00224        0.00224\n                         Harry Potter    1.00000    0.00000        0.00000\n                                  Bob    1.00000    0.00000        0.00000\n

    "},{"location":"api/facto/FwFMRegressor/#methods","title":"Methods","text":"debug_one

    Debugs the output of the FM regressor.

    Parameters

    • x \u2014 'dict'
    • decimals \u2014 'int' \u2014 defaults to 5

    Returns

    str: A table which explains the output.

    learn_one

    Fits to a set of features x and a real-valued target y.

    Parameters

    • x \u2014 'dict'
    • y \u2014 'base.typing.RegTarget'
    • sample_weight \u2014 defaults to 1.0

    Returns

    Regressor: self

    predict_one

    Predict the output of features x.

    Parameters

    • x

    Returns

    The prediction.

    1. Junwei Pan, Jian Xu, Alfonso Lobos Ruiz, Wenliang Zhao, Shengjun Pan, Yu Sun, and Quan Lu, 2018, April. Field-weighted Factorization Machines for Click-Through Rate Prediction in Display Advertising. In Proceedings of the 2018 World Wide Web Conference on World Wide Web. International World Wide Web Conferences Steering Committee, (pp. 1349\u20131357). \u21a9

    "},{"location":"api/facto/HOFMClassifier/","title":"HOFMClassifier","text":"

    Higher-Order Factorization Machine for binary classification.

    The model equation is defined as:

    \\[\\hat{y}(x) = w_{0} + \\sum_{j=1}^{p} w_{j} x_{j} + \\sum_{l=2}^{d} \\sum_{j_1=1}^{p} \\cdots \\sum_{j_l=j_{l-1}+1}^{p} \\left(\\prod_{j'=1}^{l} x_{j_{j'}} \\right) \\left(\\sum_{f=1}^{k_l} \\prod_{j'=1}^{l} v_{j_{j'}, f}^{(l)} \\right)\\]

    For more efficiency, this model automatically one-hot encodes strings features considering them as categorical variables.

    "},{"location":"api/facto/HOFMClassifier/#parameters","title":"Parameters","text":"
    • degree

      Default \u2192 3

      Polynomial degree or model order.

    • n_factors

      Default \u2192 10

      Dimensionality of the factorization or number of latent factors.

    • weight_optimizer

      Type \u2192 optim.base.Optimizer | None

      Default \u2192 None

      The sequential optimizer used for updating the feature weights. Note that the intercept is handled separately.

    • latent_optimizer

      Type \u2192 optim.base.Optimizer | None

      Default \u2192 None

      The sequential optimizer used for updating the latent factors.

    • loss

      Type \u2192 optim.losses.BinaryLoss | None

      Default \u2192 None

      The loss function to optimize for.

    • sample_normalization

      Default \u2192 False

      Whether to divide each element of x by x's L2-norm.

    • l1_weight

      Default \u2192 0.0

      Amount of L1 regularization used to push weights towards 0.

    • l2_weight

      Default \u2192 0.0

      Amount of L2 regularization used to push weights towards 0.

    • l1_latent

      Default \u2192 0.0

      Amount of L1 regularization used to push latent weights towards 0.

    • l2_latent

      Default \u2192 0.0

      Amount of L2 regularization used to push latent weights towards 0.

    • intercept

      Default \u2192 0.0

      Initial intercept value.

    • intercept_lr

      Type \u2192 optim.base.Scheduler | float

      Default \u2192 0.01

      Learning rate scheduler used for updating the intercept. An instance of optim.schedulers.Constant is used if a float is passed. No intercept will be used if this is set to 0.

    • weight_initializer

      Type \u2192 optim.initializers.Initializer | None

      Default \u2192 None

      Weights initialization scheme. Defaults to optim.initializers.Zeros()`.

    • latent_initializer

      Type \u2192 optim.initializers.Initializer | None

      Default \u2192 None

      Latent factors initialization scheme. Defaults to optim.initializers.Normal(mu=.0, sigma=.1, random_state=self.random_state)`.

    • clip_gradient

      Default \u2192 1000000000000.0

      Clips the absolute value of each gradient value.

    • seed

      Type \u2192 int | None

      Default \u2192 None

      Randomization seed used for reproducibility.

    "},{"location":"api/facto/HOFMClassifier/#attributes","title":"Attributes","text":"
    • weights

      The current weights assigned to the features.

    • latents

      The current latent weights assigned to the features.

    "},{"location":"api/facto/HOFMClassifier/#examples","title":"Examples","text":"

    from river import facto\n\ndataset = (\n    ({'user': 'Alice', 'item': 'Superman', 'time': .12}, True),\n    ({'user': 'Alice', 'item': 'Terminator', 'time': .13}, True),\n    ({'user': 'Alice', 'item': 'Star Wars', 'time': .14}, True),\n    ({'user': 'Alice', 'item': 'Notting Hill', 'time': .15}, False),\n    ({'user': 'Alice', 'item': 'Harry Potter ', 'time': .16}, True),\n    ({'user': 'Bob', 'item': 'Superman', 'time': .13}, True),\n    ({'user': 'Bob', 'item': 'Terminator', 'time': .12}, True),\n    ({'user': 'Bob', 'item': 'Star Wars', 'time': .16}, True),\n    ({'user': 'Bob', 'item': 'Notting Hill', 'time': .10}, False)\n)\n\nmodel = facto.HOFMClassifier(\n    degree=3,\n    n_factors=10,\n    intercept=.5,\n    seed=42,\n)\n\nfor x, y in dataset:\n    _ = model.learn_one(x, y)\n\nmodel.predict_one({'user': 'Bob', 'item': 'Harry Potter', 'time': .14})\n
    True\n

    "},{"location":"api/facto/HOFMClassifier/#methods","title":"Methods","text":"debug_one

    Debugs the output of the FM regressor.

    Parameters

    • x \u2014 'dict'
    • decimals \u2014 'int' \u2014 defaults to 5

    Returns

    str: A table which explains the output.

    learn_one

    Update the model with a set of features x and a label y.

    Parameters

    • x \u2014 'dict'
    • y \u2014 'base.typing.ClfTarget'
    • sample_weight \u2014 defaults to 1.0

    Returns

    Classifier: self

    predict_one

    Predict the label of a set of features x.

    Parameters

    • x \u2014 'dict'
    • kwargs

    Returns

    base.typing.ClfTarget | None: The predicted label.

    predict_proba_one

    Predict the probability of each label for a dictionary of features x.

    Parameters

    • x

    Returns

    A dictionary that associates a probability which each label.

    1. Rendle, S., 2010, December. Factorization machines. In 2010 IEEE International Conference on Data Mining (pp. 995-1000). IEEE. \u21a9

    "},{"location":"api/facto/HOFMRegressor/","title":"HOFMRegressor","text":"

    Higher-Order Factorization Machine for regression.

    The model equation is defined as:

    \\[\\hat{y}(x) = w_{0} + \\sum_{j=1}^{p} w_{j} x_{j} + \\sum_{l=2}^{d} \\sum_{j_1=1}^{p} \\cdots \\sum_{j_l=j_{l-1}+1}^{p} \\left(\\prod_{j'=1}^{l} x_{j_{j'}} \\right) \\left(\\sum_{f=1}^{k_l} \\prod_{j'=1}^{l} v_{j_{j'}, f}^{(l)} \\right)\\]

    For more efficiency, this model automatically one-hot encodes strings features considering them as categorical variables.

    "},{"location":"api/facto/HOFMRegressor/#parameters","title":"Parameters","text":"
    • degree

      Default \u2192 3

      Polynomial degree or model order.

    • n_factors

      Default \u2192 10

      Dimensionality of the factorization or number of latent factors.

    • weight_optimizer

      Type \u2192 optim.base.Optimizer | None

      Default \u2192 None

      The sequential optimizer used for updating the feature weights. Note thatthe intercept is handled separately.

    • latent_optimizer

      Type \u2192 optim.base.Optimizer | None

      Default \u2192 None

      The sequential optimizer used for updating the latent factors.

    • loss

      Type \u2192 optim.losses.RegressionLoss | None

      Default \u2192 None

      The loss function to optimize for.

    • sample_normalization

      Default \u2192 False

      Whether to divide each element of x by x's L2-norm.

    • l1_weight

      Default \u2192 0.0

      Amount of L1 regularization used to push weights towards 0.

    • l2_weight

      Default \u2192 0.0

      Amount of L2 regularization used to push weights towards 0.

    • l1_latent

      Default \u2192 0.0

      Amount of L1 regularization used to push latent weights towards 0.

    • l2_latent

      Default \u2192 0.0

      Amount of L2 regularization used to push latent weights towards 0.

    • intercept

      Default \u2192 0.0

      Initial intercept value.

    • intercept_lr

      Type \u2192 optim.base.Scheduler | float

      Default \u2192 0.01

      Learning rate scheduler used for updating the intercept. An instance of optim.schedulers.Constant is used if a float is passed. No intercept will be used if this is set to 0.

    • weight_initializer

      Type \u2192 optim.initializers.Initializer | None

      Default \u2192 None

      Weights initialization scheme. Defaults to optim.initializers.Zeros()`.

    • latent_initializer

      Type \u2192 optim.initializers.Initializer | None

      Default \u2192 None

      Latent factors initialization scheme. Defaults to optim.initializers.Normal(mu=.0, sigma=.1, random_state=self.random_state)`.

    • clip_gradient

      Default \u2192 1000000000000.0

      Clips the absolute value of each gradient value.

    • seed

      Type \u2192 int | None

      Default \u2192 None

      Randomization seed used for reproducibility.

    "},{"location":"api/facto/HOFMRegressor/#attributes","title":"Attributes","text":"
    • weights

      The current weights assigned to the features.

    • latents

      The current latent weights assigned to the features.

    "},{"location":"api/facto/HOFMRegressor/#examples","title":"Examples","text":"

    from river import facto\n\ndataset = (\n    ({'user': 'Alice', 'item': 'Superman', 'time': .12}, 8),\n    ({'user': 'Alice', 'item': 'Terminator', 'time': .13}, 9),\n    ({'user': 'Alice', 'item': 'Star Wars', 'time': .14}, 8),\n    ({'user': 'Alice', 'item': 'Notting Hill', 'time': .15}, 2),\n    ({'user': 'Alice', 'item': 'Harry Potter ', 'time': .16}, 5),\n    ({'user': 'Bob', 'item': 'Superman', 'time': .13}, 8),\n    ({'user': 'Bob', 'item': 'Terminator', 'time': .12}, 9),\n    ({'user': 'Bob', 'item': 'Star Wars', 'time': .16}, 8),\n    ({'user': 'Bob', 'item': 'Notting Hill', 'time': .10}, 2)\n)\n\nmodel = facto.HOFMRegressor(\n    degree=3,\n    n_factors=10,\n    intercept=5,\n    seed=42,\n)\n\nfor x, y in dataset:\n    _ = model.learn_one(x, y)\n\nmodel.predict_one({'user': 'Bob', 'item': 'Harry Potter', 'time': .14})\n
    5.311745\n

    report = model.debug_one({'user': 'Bob', 'item': 'Harry Potter', 'time': .14})\n\nprint(report)\n
    Name                                  Value      Weight     Contribution\n                          Intercept    1.00000    5.23495        5.23495\n                           user_Bob    1.00000    0.11436        0.11436\n                               time    0.14000    0.03185        0.00446\n                    user_Bob - time    0.14000    0.00884        0.00124\nuser_Bob - item_Harry Potter - time    0.14000    0.00117        0.00016\n                  item_Harry Potter    1.00000    0.00000        0.00000\n           item_Harry Potter - time    0.14000   -0.00695       -0.00097\n       user_Bob - item_Harry Potter    1.00000   -0.04246       -0.04246\n

    "},{"location":"api/facto/HOFMRegressor/#methods","title":"Methods","text":"debug_one

    Debugs the output of the FM regressor.

    Parameters

    • x \u2014 'dict'
    • decimals \u2014 'int' \u2014 defaults to 5

    Returns

    str: A table which explains the output.

    learn_one

    Fits to a set of features x and a real-valued target y.

    Parameters

    • x \u2014 'dict'
    • y \u2014 'base.typing.RegTarget'
    • sample_weight \u2014 defaults to 1.0

    Returns

    Regressor: self

    predict_one

    Predict the output of features x.

    Parameters

    • x

    Returns

    The prediction.

    1. Rendle, S., 2010, December. Factorization machines. In 2010 IEEE International Conference on Data Mining (pp. 995-1000). IEEE. \u21a9

    "},{"location":"api/feature-extraction/Agg/","title":"Agg","text":"

    Computes a streaming aggregate.

    This transformer allows to compute an aggregate statistic, very much like the groupby method from pandas, but on a streaming dataset. This makes use of the streaming statistics from the stats module.

    When learn_one is called, the running statistic how of group by is updated with the value of on. Meanwhile, the output of transform_one is a single-element dictionary, where the key is the name of the aggregate and the value is the current value of the statistic for the relevant group. The key is automatically inferred from the parameters.

    Note that you can use a compose.TransformerUnion to extract many aggregate statistics in a concise manner.

    "},{"location":"api/feature-extraction/Agg/#parameters","title":"Parameters","text":"
    • on

      Type \u2192 str

      The feature on which to compute the aggregate statistic.

    • by

      Type \u2192 str | list[str] | None

      The feature by which to group the data. All the data is included in the aggregate if this is None.

    • how

      Type \u2192 stats.base.Univariate | utils.Rolling | utils.TimeRolling

      The statistic to compute.

    "},{"location":"api/feature-extraction/Agg/#attributes","title":"Attributes","text":"
    • state

      Return the current values for each group as a series.

    "},{"location":"api/feature-extraction/Agg/#examples","title":"Examples","text":"

    Consider the following dataset:

    X = [\n    {'country': 'France', 'place': 'Taco Bell', 'revenue': 42},\n    {'country': 'Sweden', 'place': 'Burger King', 'revenue': 16},\n    {'country': 'France', 'place': 'Burger King', 'revenue': 24},\n    {'country': 'Sweden', 'place': 'Taco Bell', 'revenue': 58},\n    {'country': 'Sweden', 'place': 'Burger King', 'revenue': 20},\n    {'country': 'France', 'place': 'Taco Bell', 'revenue': 50},\n    {'country': 'France', 'place': 'Burger King', 'revenue': 10},\n    {'country': 'Sweden', 'place': 'Taco Bell', 'revenue': 80}\n]\n

    As an example, we can calculate the average (how) revenue (on) for each place (by):

    from river import feature_extraction as fx\nfrom river import stats\n\nagg = fx.Agg(\n    on='revenue',\n    by='place',\n    how=stats.Mean()\n)\n\nfor x in X:\n    agg = agg.learn_one(x)\n    print(agg.transform_one(x))\n
    {'revenue_mean_by_place': 42.0}\n{'revenue_mean_by_place': 16.0}\n{'revenue_mean_by_place': 20.0}\n{'revenue_mean_by_place': 50.0}\n{'revenue_mean_by_place': 20.0}\n{'revenue_mean_by_place': 50.0}\n{'revenue_mean_by_place': 17.5}\n{'revenue_mean_by_place': 57.5}\n

    You can compute an aggregate over multiple keys by passing a tuple to the by argument. For instance, we can compute the maximum (how) revenue (on) per place as well as per day (by):

    agg = fx.Agg(\n    on='revenue',\n    by=['place', 'country'],\n    how=stats.Max()\n)\n\nfor x in X:\n    agg = agg.learn_one(x)\n    print(agg.transform_one(x))\n
    {'revenue_max_by_place_and_country': 42}\n{'revenue_max_by_place_and_country': 16}\n{'revenue_max_by_place_and_country': 24}\n{'revenue_max_by_place_and_country': 58}\n{'revenue_max_by_place_and_country': 20}\n{'revenue_max_by_place_and_country': 50}\n{'revenue_max_by_place_and_country': 24}\n{'revenue_max_by_place_and_country': 80}\n

    You can use a compose.TransformerUnion in order to calculate multiple aggregates in one go. The latter can be constructed by using the + operator:

    agg = (\n    fx.Agg(on='revenue', by='place', how=stats.Mean()) +\n    fx.Agg(on='revenue', by=['place', 'country'], how=stats.Max())\n)\n\nimport pprint\nfor x in X:\n    agg = agg.learn_one(x)\n    pprint.pprint(agg.transform_one(x))\n
    {'revenue_max_by_place_and_country': 42, 'revenue_mean_by_place': 42.0}\n{'revenue_max_by_place_and_country': 16, 'revenue_mean_by_place': 16.0}\n{'revenue_max_by_place_and_country': 24, 'revenue_mean_by_place': 20.0}\n{'revenue_max_by_place_and_country': 58, 'revenue_mean_by_place': 50.0}\n{'revenue_max_by_place_and_country': 20, 'revenue_mean_by_place': 20.0}\n{'revenue_max_by_place_and_country': 50, 'revenue_mean_by_place': 50.0}\n{'revenue_max_by_place_and_country': 24, 'revenue_mean_by_place': 17.5}\n{'revenue_max_by_place_and_country': 80, 'revenue_mean_by_place': 57.5}\n

    The state property returns a pandas.Series, which can be useful for visualizing the current state.

    agg[0].state\n
    Taco Bell      57.5\nBurger King    17.5\nName: revenue_mean_by_place, dtype: float64\n

    agg[1].state\n
    place        country\nTaco Bell    France     50\nBurger King  Sweden     20\n             France     24\nTaco Bell    Sweden     80\nName: revenue_max_by_place_and_country, dtype: int64\n

    This transformer can also be used in conjunction with utils.TimeRolling. The latter requires a t argument, which is a timestamp that indicates when the current row was observed. For instance, we can calculate the average (how) revenue (on) for each place (by) over the last 7 days (t):

    import datetime as dt\nimport random\nimport string\nfrom river import utils\n\nagg = fx.Agg(\n    on=\"value\",\n    by=\"group\",\n    how=utils.TimeRolling(stats.Mean(), dt.timedelta(days=7))\n)\n\nfor day in range(366):\n    g = random.choice(string.ascii_lowercase)\n    x = {\n        \"group\": g,\n        \"value\": string.ascii_lowercase.index(g) + random.random(),\n    }\n    t = dt.datetime(2023, 1, 1) + dt.timedelta(days=day)\n    agg = agg.learn_one(x, t=t)\n\nlen(agg.state)\n
    26\n

    "},{"location":"api/feature-extraction/Agg/#methods","title":"Methods","text":"learn_one

    Update with a set of features x.

    A lot of transformers don't actually have to do anything during the learn_one step because they are stateless. For this reason the default behavior of this function is to do nothing. Transformers that however do something during the learn_one can override this method.

    Parameters

    • x \u2014 'dict'
    • t \u2014 defaults to None

    Returns

    Transformer: self

    transform_one

    Transform a set of features x.

    Parameters

    • x \u2014 'dict'

    Returns

    dict: The transformed values.

    1. Streaming groupbys in pandas for big datasets \u21a9

    "},{"location":"api/feature-extraction/BagOfWords/","title":"BagOfWords","text":"

    Counts tokens in sentences.

    This transformer can be used to counts tokens in a given piece of text. It takes care of normalizing the text before tokenizing it. In mini-batch settings, this transformers allows to convert a series of pandas of text into sparse dataframe.

    Note that the parameters are identical to those of feature_extraction.TFIDF.

    "},{"location":"api/feature-extraction/BagOfWords/#parameters","title":"Parameters","text":"
    • on

      Type \u2192 str | None

      Default \u2192 None

      The name of the feature that contains the text to vectorize. If None, then each learn_one and transform_one will assume that each x that is provided is a str, andnot a dict.

    • strip_accents

      Default \u2192 True

      Whether or not to strip accent characters.

    • lowercase

      Default \u2192 True

      Whether or not to convert all characters to lowercase.

    • preprocessor

      Type \u2192 typing.Callable | None

      Default \u2192 None

      An optional preprocessing function which overrides the strip_accents and lowercase steps, while preserving the tokenizing and n-grams generation steps.

    • stop_words

      Type \u2192 set[str] | None

      Default \u2192 None

      An optional set of tokens to remove.

    • tokenizer_pattern

      Default \u2192 (?u)\\b\\w[\\w\\-]+\\b

      The tokenization pattern which is used when no tokenizer function is passed. A single capture group may optionally be specified.

    • tokenizer

      Type \u2192 typing.Callable | None

      Default \u2192 None

      A function used to convert preprocessed text into a dict of tokens. By default, a regex formula that works well in most cases is used.

    • ngram_range

      Default \u2192 (1, 1)

      The lower and upper boundary of the range n-grams to be extracted. All values of n such that min_n <= n <= max_n will be used. For example an ngram_range of (1, 1) means only unigrams, (1, 2) means unigrams and bigrams, and (2, 2) means only bigrams.

    "},{"location":"api/feature-extraction/BagOfWords/#examples","title":"Examples","text":"

    By default, BagOfWords will take as input a sentence, preprocess it, tokenize the preprocessed text, and then return a collections.Counter containing the number of occurrences of each token.

    from river import feature_extraction as fx\n\ncorpus = [\n    'This is the first document.',\n    'This document is the second document.',\n    'And this is the third one.',\n    'Is this the first document?',\n]\n\nbow = fx.BagOfWords()\n\nfor sentence in corpus:\n    print(bow.transform_one(sentence))\n
    {'this': 1, 'is': 1, 'the': 1, 'first': 1, 'document': 1}\n{'this': 1, 'document': 2, 'is': 1, 'the': 1, 'second': 1}\n{'and': 1, 'this': 1, 'is': 1, 'the': 1, 'third': 1, 'one': 1}\n{'is': 1, 'this': 1, 'the': 1, 'first': 1, 'document': 1}\n

    Note that learn_one does not have to be called because BagOfWords is stateless. You can call it but it won't do anything.

    In the above example, a string is passed to transform_one. You can also indicate which field to access if the string is stored in a dictionary:

    bow = fx.BagOfWords(on='sentence')\n\nfor sentence in corpus:\n    x = {'sentence': sentence}\n    print(bow.transform_one(x))\n
    {'this': 1, 'is': 1, 'the': 1, 'first': 1, 'document': 1}\n{'this': 1, 'document': 2, 'is': 1, 'the': 1, 'second': 1}\n{'and': 1, 'this': 1, 'is': 1, 'the': 1, 'third': 1, 'one': 1}\n{'is': 1, 'this': 1, 'the': 1, 'first': 1, 'document': 1}\n

    The ngram_range parameter can be used to extract n-grams (including unigrams):

    ngrammer = fx.BagOfWords(ngram_range=(1, 2))\n\nngrams = ngrammer.transform_one('I love the smell of napalm in the morning')\nfor ngram, count in ngrams.items():\n    print(ngram, count)\n
    love 1\nthe 2\nsmell 1\nof 1\nnapalm 1\nin 1\nmorning 1\n('love', 'the') 1\n('the', 'smell') 1\n('smell', 'of') 1\n('of', 'napalm') 1\n('napalm', 'in') 1\n('in', 'the') 1\n('the', 'morning') 1\n

    BagOfWord allows to build a term-frequency pandas sparse dataframe with the transform_many method.

    import pandas as pd\nX = pd.Series(['Hello world', 'Hello River'], index = ['river', 'rocks'])\nbow = fx.BagOfWords()\nbow.transform_many(X=X)\n
           hello  world  river\nriver      1      1      0\nrocks      1      0      1\n

    "},{"location":"api/feature-extraction/BagOfWords/#methods","title":"Methods","text":"learn_many learn_one

    Update with a set of features x.

    A lot of transformers don't actually have to do anything during the learn_one step because they are stateless. For this reason the default behavior of this function is to do nothing. Transformers that however do something during the learn_one can override this method.

    Parameters

    • x \u2014 'dict'

    Returns

    Transformer: self

    process_text transform_many

    Transform pandas series of string into term-frequency pandas sparse dataframe.

    Parameters

    • X \u2014 'pd.Series'

    transform_one

    Transform a set of features x.

    Parameters

    • x \u2014 'dict'

    Returns

    dict: The transformed values.

    "},{"location":"api/feature-extraction/PolynomialExtender/","title":"PolynomialExtender","text":"

    Polynomial feature extender.

    Generate features consisting of all polynomial combinations of the features with degree less than or equal to the specified degree.

    Be aware that the number of outputted features scales polynomially in the number of input features and exponentially in the degree. High degrees can cause overfitting.

    "},{"location":"api/feature-extraction/PolynomialExtender/#parameters","title":"Parameters","text":"
    • degree

      Default \u2192 2

      The maximum degree of the polynomial features.

    • interaction_only

      Default \u2192 False

      If True then only combinations that include an element at most once will be computed.

    • include_bias

      Default \u2192 False

      Whether or not to include a dummy feature which is always equal to 1.

    • bias_name

      Default \u2192 bias

      Name to give to the bias feature.

    "},{"location":"api/feature-extraction/PolynomialExtender/#examples","title":"Examples","text":"

    from river import feature_extraction as fx\n\nX = [\n    {'x': 0, 'y': 1},\n    {'x': 2, 'y': 3},\n    {'x': 4, 'y': 5}\n]\n\npoly = fx.PolynomialExtender(degree=2, include_bias=True)\nfor x in X:\n    print(poly.transform_one(x))\n
    {'x': 0, 'y': 1, 'x*x': 0, 'x*y': 0, 'y*y': 1, 'bias': 1}\n{'x': 2, 'y': 3, 'x*x': 4, 'x*y': 6, 'y*y': 9, 'bias': 1}\n{'x': 4, 'y': 5, 'x*x': 16, 'x*y': 20, 'y*y': 25, 'bias': 1}\n

    X = [\n    {'x': 0, 'y': 1, 'z': 2},\n    {'x': 2, 'y': 3, 'z': 2},\n    {'x': 4, 'y': 5, 'z': 2}\n]\n\npoly = fx.PolynomialExtender(degree=3, interaction_only=True)\nfor x in X:\n    print(poly.transform_one(x))\n
    {'x': 0, 'y': 1, 'z': 2, 'x*y': 0, 'x*z': 0, 'y*z': 2, 'x*y*z': 0}\n{'x': 2, 'y': 3, 'z': 2, 'x*y': 6, 'x*z': 4, 'y*z': 6, 'x*y*z': 12}\n{'x': 4, 'y': 5, 'z': 2, 'x*y': 20, 'x*z': 8, 'y*z': 10, 'x*y*z': 40}\n

    Polynomial features are typically used for a linear model to capture interactions between features. This may done by setting up a pipeline, as so:

    from river import datasets\nfrom river import evaluate\nfrom river import linear_model as lm\nfrom river import metrics\nfrom river import preprocessing as pp\n\ndataset = datasets.Phishing()\n\nmodel = (\n    fx.PolynomialExtender() |\n    pp.StandardScaler() |\n    lm.LogisticRegression()\n)\n\nmetric = metrics.Accuracy()\n\nevaluate.progressive_val_score(dataset, model, metric)\n
    Accuracy: 88.88%\n

    "},{"location":"api/feature-extraction/PolynomialExtender/#methods","title":"Methods","text":"learn_one

    Update with a set of features x.

    A lot of transformers don't actually have to do anything during the learn_one step because they are stateless. For this reason the default behavior of this function is to do nothing. Transformers that however do something during the learn_one can override this method.

    Parameters

    • x \u2014 'dict'

    Returns

    Transformer: self

    transform_one

    Transform a set of features x.

    Parameters

    • x \u2014 'dict'

    Returns

    dict: The transformed values.

    "},{"location":"api/feature-extraction/RBFSampler/","title":"RBFSampler","text":"

    Extracts random features which approximate an RBF kernel.

    This is a powerful way to give non-linear capacity to linear classifiers. This method is also called \"random Fourier features\" in the literature.

    "},{"location":"api/feature-extraction/RBFSampler/#parameters","title":"Parameters","text":"
    • gamma

      Default \u2192 1.0

      RBF kernel parameter in (-gamma * x^2).

    • n_components

      Default \u2192 100

      Number of samples per original feature. Equals the dimensionality of the computed feature space.

    • seed

      Type \u2192 int | None

      Default \u2192 None

      Random number seed.

    "},{"location":"api/feature-extraction/RBFSampler/#examples","title":"Examples","text":"

    from river import feature_extraction as fx\nfrom river import linear_model as lm\nfrom river import optim\nfrom river import stream\n\nX = [[0, 0], [1, 1], [1, 0], [0, 1]]\nY = [0, 0, 1, 1]\n\nmodel = lm.LogisticRegression(optimizer=optim.SGD(.1))\n\nfor x, y in stream.iter_array(X, Y):\n    model = model.learn_one(x, y)\n    y_pred = model.predict_one(x)\n    print(y, int(y_pred))\n
    0 0\n0 0\n1 0\n1 1\n

    model = (\n    fx.RBFSampler(seed=3) |\n    lm.LogisticRegression(optimizer=optim.SGD(.1))\n)\n\nfor x, y in stream.iter_array(X, Y):\n    model = model.learn_one(x, y)\n    y_pred = model.predict_one(x)\n    print(y, int(y_pred))\n
    0 0\n0 0\n1 1\n1 1\n

    "},{"location":"api/feature-extraction/RBFSampler/#methods","title":"Methods","text":"learn_one

    Update with a set of features x.

    A lot of transformers don't actually have to do anything during the learn_one step because they are stateless. For this reason the default behavior of this function is to do nothing. Transformers that however do something during the learn_one can override this method.

    Parameters

    • x \u2014 'dict'

    Returns

    Transformer: self

    transform_one

    Transform a set of features x.

    Parameters

    • x \u2014 'dict'
    • y \u2014 defaults to None

    Returns

    dict: The transformed values.

    1. Rahimi, A. and Recht, B., 2008. Random features for large-scale kernel machines. In Advances in neural information processing systems (pp. 1177-1184 \u21a9

    "},{"location":"api/feature-extraction/TFIDF/","title":"TFIDF","text":"

    Computes TF-IDF values from sentences.

    The TF-IDF formula is the same one as scikit-learn. The only difference is the fact that the document frequencies are determined online, whereas in a batch setting they can be determined by performing an initial pass through the data.

    Note that the parameters are identical to those of feature_extraction.BagOfWords.

    "},{"location":"api/feature-extraction/TFIDF/#parameters","title":"Parameters","text":"
    • normalize

      Default \u2192 True

      Whether or not the TF-IDF values by their L2 norm.

    • on

      Type \u2192 str | None

      Default \u2192 None

      The name of the feature that contains the text to vectorize. If None, then the input is treated as a document instead of a set of features.

    • strip_accents

      Default \u2192 True

      Whether or not to strip accent characters.

    • lowercase

      Default \u2192 True

      Whether or not to convert all characters to lowercase.

    • preprocessor

      Type \u2192 typing.Callable | None

      Default \u2192 None

      An optional preprocessing function which overrides the strip_accents and lowercase steps, while preserving the tokenizing and n-grams generation steps.

    • tokenizer

      Type \u2192 typing.Callable | None

      Default \u2192 None

      A function used to convert preprocessed text into a dict of tokens. By default, a regex formula that works well in most cases is used.

    • ngram_range

      Default \u2192 (1, 1)

      The lower and upper boundary of the range n-grams to be extracted. All values of n such that min_n <= n <= max_n will be used. For example an ngram_range of (1, 1) means only unigrams, (1, 2) means unigrams and bigrams, and (2, 2) means only bigrams. Only works if tokenizer is not set to False.

    "},{"location":"api/feature-extraction/TFIDF/#attributes","title":"Attributes","text":"
    • dfs (collections.defaultdict))

      Document counts.

    • n (int)

      Number of scanned documents.

    "},{"location":"api/feature-extraction/TFIDF/#examples","title":"Examples","text":"

    from river import feature_extraction\n\ntfidf = feature_extraction.TFIDF()\n\ncorpus = [\n    'This is the first document.',\n    'This document is the second document.',\n    'And this is the third one.',\n    'Is this the first document?',\n]\n\nfor sentence in corpus:\n    tfidf = tfidf.learn_one(sentence)\n    print(tfidf.transform_one(sentence))\n
    {'this': 0.447, 'is': 0.447, 'the': 0.447, 'first': 0.447, 'document': 0.447}\n{'this': 0.333, 'document': 0.667, 'is': 0.333, 'the': 0.333, 'second': 0.469}\n{'and': 0.497, 'this': 0.293, 'is': 0.293, 'the': 0.293, 'third': 0.497, 'one': 0.497}\n{'is': 0.384, 'this': 0.384, 'the': 0.384, 'first': 0.580, 'document': 0.469}\n

    In the above example, a string is passed to transform_one. You can also indicate which field to access if the string is stored in a dictionary:

    tfidf = feature_extraction.TFIDF(on='sentence')\n\nfor sentence in corpus:\n    x = {'sentence': sentence}\n    tfidf = tfidf.learn_one(x)\n    print(tfidf.transform_one(x))\n
    {'this': 0.447, 'is': 0.447, 'the': 0.447, 'first': 0.447, 'document': 0.447}\n{'this': 0.333, 'document': 0.667, 'is': 0.333, 'the': 0.333, 'second': 0.469}\n{'and': 0.497, 'this': 0.293, 'is': 0.293, 'the': 0.293, 'third': 0.497, 'one': 0.497}\n{'is': 0.384, 'this': 0.384, 'the': 0.384, 'first': 0.580, 'document': 0.469}\n

    "},{"location":"api/feature-extraction/TFIDF/#methods","title":"Methods","text":"learn_many learn_one

    Update with a set of features x.

    A lot of transformers don't actually have to do anything during the learn_one step because they are stateless. For this reason the default behavior of this function is to do nothing. Transformers that however do something during the learn_one can override this method.

    Parameters

    • x \u2014 'dict'

    Returns

    Transformer: self

    process_text transform_many

    Transform pandas series of string into term-frequency pandas sparse dataframe.

    Parameters

    • X \u2014 'pd.Series'

    transform_one

    Transform a set of features x.

    Parameters

    • x \u2014 'dict'

    Returns

    dict: The transformed values.

    "},{"location":"api/feature-extraction/TargetAgg/","title":"TargetAgg","text":"

    Computes a streaming aggregate of the target values.

    This transformer is identical to feature_extraction.Agg, the only difference is that it operates on the target rather than on a feature. At each step, the running statistic how of target values in group by is updated with the target. It is therefore a supervised transformer.

    "},{"location":"api/feature-extraction/TargetAgg/#parameters","title":"Parameters","text":"
    • by

      Type \u2192 str | list[str] | None

      The feature by which to group the target values. All the data is included in the aggregate if this is None.

    • how

      Type \u2192 stats.base.Univariate | utils.Rolling | utils.TimeRolling

      The statistic to compute.

    • target_name

      Default \u2192 y

      The target name which is used in the result.

    "},{"location":"api/feature-extraction/TargetAgg/#attributes","title":"Attributes","text":"
    • state

      Return the current values for each group as a series.

    • target_name

    "},{"location":"api/feature-extraction/TargetAgg/#examples","title":"Examples","text":"

    Consider the following dataset, where the second value of each value is the target:

    dataset = [\n    ({'country': 'France', 'place': 'Taco Bell'}, 42),\n    ({'country': 'Sweden', 'place': 'Burger King'}, 16),\n    ({'country': 'France', 'place': 'Burger King'}, 24),\n    ({'country': 'Sweden', 'place': 'Taco Bell'}, 58),\n    ({'country': 'Sweden', 'place': 'Burger King'}, 20),\n    ({'country': 'France', 'place': 'Taco Bell'}, 50),\n    ({'country': 'France', 'place': 'Burger King'}, 10),\n    ({'country': 'Sweden', 'place': 'Taco Bell'}, 80)\n]\n

    As an example, let's perform a target encoding of the place feature. Instead of simply updating a running average, we use a stats.BayesianMean which allows us to incorporate some prior knowledge. This makes subsequent models less prone to overfitting. Indeed, it dampens the fact that too few samples might have been seen within a group.

    from river import feature_extraction\nfrom river import stats\n\nagg = feature_extraction.TargetAgg(\n    by='place',\n    how=stats.BayesianMean(\n        prior=3,\n        prior_weight=1\n    )\n)\n\nfor x, y in dataset:\n    print(agg.transform_one(x))\n    agg = agg.learn_one(x, y)\n
    {'y_bayes_mean_by_place': 3.0}\n{'y_bayes_mean_by_place': 3.0}\n{'y_bayes_mean_by_place': 9.5}\n{'y_bayes_mean_by_place': 22.5}\n{'y_bayes_mean_by_place': 14.333}\n{'y_bayes_mean_by_place': 34.333}\n{'y_bayes_mean_by_place': 15.75}\n{'y_bayes_mean_by_place': 38.25}\n

    Just like with feature_extraction.Agg, we can specify multiple features on which to group the data:

    agg = feature_extraction.TargetAgg(\n    by=['place', 'country'],\n    how=stats.BayesianMean(\n        prior=3,\n        prior_weight=1\n    )\n)\n\nfor x, y in dataset:\n    print(agg.transform_one(x))\n    agg = agg.learn_one(x, y)\n
    {'y_bayes_mean_by_place_and_country': 3.0}\n{'y_bayes_mean_by_place_and_country': 3.0}\n{'y_bayes_mean_by_place_and_country': 3.0}\n{'y_bayes_mean_by_place_and_country': 3.0}\n{'y_bayes_mean_by_place_and_country': 9.5}\n{'y_bayes_mean_by_place_and_country': 22.5}\n{'y_bayes_mean_by_place_and_country': 13.5}\n{'y_bayes_mean_by_place_and_country': 30.5}\n

    agg.state\n
    place        country\nTaco Bell    France     31.666667\nBurger King  Sweden     13.000000\n             France     12.333333\nTaco Bell    Sweden     47.000000\nName: y_bayes_mean_by_place_and_country, dtype: float64\n

    This transformer can also be used in conjunction with utils.TimeRolling. The latter requires a t argument, which is a timestamp that indicates when the current row was observed. For instance, we can calculate the average (how) revenue (on) for each place (by) over the last 7 days (t):

    import datetime as dt\nimport random\nimport string\nfrom river import utils\n\nagg = feature_extraction.TargetAgg(\n    by=\"group\",\n    how=utils.TimeRolling(stats.Mean(), dt.timedelta(days=7))\n)\n\nfor day in range(366):\n    g = random.choice(string.ascii_lowercase)\n    x = {\"group\": g}\n    y = string.ascii_lowercase.index(g) + random.random()\n    t = dt.datetime(2023, 1, 1) + dt.timedelta(days=day)\n    agg = agg.learn_one(x, y, t=t)\n
    "},{"location":"api/feature-extraction/TargetAgg/#methods","title":"Methods","text":"learn_one

    Update with a set of features x and a target y.

    Parameters

    • x \u2014 'dict'
    • y \u2014 'base.typing.Target'
    • t \u2014 defaults to None

    Returns

    SupervisedTransformer: self

    transform_one

    Transform a set of features x.

    Parameters

    • x \u2014 'dict'

    Returns

    dict: The transformed values.

    1. Streaming groupbys in pandas for big datasets

    "},{"location":"api/feature-selection/PoissonInclusion/","title":"PoissonInclusion","text":"

    Randomly selects features with an inclusion trial.

    When a new feature is encountered, it is selected with probability p. The number of times a feature needs to beseen before it is added to the model follows a geometric distribution with expected value 1 / p. This feature selection method is meant to be used when you have a very large amount of sparse features.

    "},{"location":"api/feature-selection/PoissonInclusion/#parameters","title":"Parameters","text":"
    • p

      Type \u2192 float

      Probability of including a feature the first time it is encountered.

    • seed

      Type \u2192 int | None

      Default \u2192 None

      Random seed value used for reproducibility.

    "},{"location":"api/feature-selection/PoissonInclusion/#examples","title":"Examples","text":"

    from river import datasets\nfrom river import feature_selection\nfrom river import stream\n\nselector = feature_selection.PoissonInclusion(p=0.1, seed=42)\n\ndataset = iter(datasets.TrumpApproval())\n\nfeature_names = next(dataset)[0].keys()\nn = 0\n\nwhile True:\n    x, y = next(dataset)\n    xt = selector.transform_one(x)\n    if xt.keys() == feature_names:\n        break\n    n += 1\n\nn\n
    12\n

    "},{"location":"api/feature-selection/PoissonInclusion/#methods","title":"Methods","text":"learn_one

    Update with a set of features x.

    A lot of transformers don't actually have to do anything during the learn_one step because they are stateless. For this reason the default behavior of this function is to do nothing. Transformers that however do something during the learn_one can override this method.

    Parameters

    • x \u2014 'dict'

    Returns

    Transformer: self

    transform_one

    Transform a set of features x.

    Parameters

    • x \u2014 'dict'

    Returns

    dict: The transformed values.

    1. McMahan, H.B., Holt, G., Sculley, D., Young, M., Ebner, D., Grady, J., Nie, L., Phillips, T., Davydov, E., Golovin, D. and Chikkerur, S., 2013, August. Ad click prediction: a view from the trenches. In Proceedings of the 19th ACM SIGKDD international conference on Knowledge discovery and data mining (pp. 1222-1230) \u21a9

    "},{"location":"api/feature-selection/SelectKBest/","title":"SelectKBest","text":"

    Removes all but the \\(k\\) highest scoring features.

    "},{"location":"api/feature-selection/SelectKBest/#parameters","title":"Parameters","text":"
    • similarity

      Type \u2192 stats.base.Bivariate

    • k

      Default \u2192 10

      The number of features to keep.

    "},{"location":"api/feature-selection/SelectKBest/#attributes","title":"Attributes","text":"
    • similarities (dict)

      The similarity instances used for each feature.

    • leaderboard (dict)

      The actual similarity measures.

    "},{"location":"api/feature-selection/SelectKBest/#examples","title":"Examples","text":"

    from pprint import pprint\nfrom river import feature_selection\nfrom river import stats\nfrom river import stream\nfrom sklearn import datasets\n\nX, y = datasets.make_regression(\n    n_samples=100,\n    n_features=10,\n    n_informative=2,\n    random_state=42\n)\n\nselector = feature_selection.SelectKBest(\n    similarity=stats.PearsonCorr(),\n    k=2\n)\n\nfor xi, yi, in stream.iter_array(X, y):\n    selector = selector.learn_one(xi, yi)\n\npprint(selector.leaderboard)\n
    Counter({9: 0.7898,\n        7: 0.5444,\n        8: 0.1062,\n        2: 0.0638,\n        4: 0.0538,\n        5: 0.0271,\n        1: -0.0312,\n        6: -0.0657,\n        3: -0.1501,\n        0: -0.1895})\n

    selector.transform_one(xi)\n
    {7: -1.2795, 9: -1.8408}\n

    "},{"location":"api/feature-selection/SelectKBest/#methods","title":"Methods","text":"learn_one

    Update with a set of features x and a target y.

    Parameters

    • x \u2014 'dict'
    • y \u2014 'base.typing.Target'

    Returns

    SupervisedTransformer: self

    transform_one

    Transform a set of features x.

    Parameters

    • x \u2014 'dict'

    Returns

    dict: The transformed values.

    "},{"location":"api/feature-selection/VarianceThreshold/","title":"VarianceThreshold","text":"

    Removes low-variance features.

    "},{"location":"api/feature-selection/VarianceThreshold/#parameters","title":"Parameters","text":"
    • threshold

      Default \u2192 0

      Only features with a variance above the threshold will be kept.

    • min_samples

      Default \u2192 2

      The minimum number of samples required to perform selection.

    "},{"location":"api/feature-selection/VarianceThreshold/#attributes","title":"Attributes","text":"
    • variances (dict)

      The variance of each feature.

    "},{"location":"api/feature-selection/VarianceThreshold/#examples","title":"Examples","text":"

    from river import feature_selection\nfrom river import stream\n\nX = [\n    [0, 2, 0, 3],\n    [0, 1, 4, 3],\n    [0, 1, 1, 3]\n]\n\nselector = feature_selection.VarianceThreshold()\n\nfor x, _ in stream.iter_array(X):\n    print(selector.learn_one(x).transform_one(x))\n
    {0: 0, 1: 2, 2: 0, 3: 3}\n{1: 1, 2: 4}\n{1: 1, 2: 1}\n

    "},{"location":"api/feature-selection/VarianceThreshold/#methods","title":"Methods","text":"check_feature learn_one

    Update with a set of features x.

    A lot of transformers don't actually have to do anything during the learn_one step because they are stateless. For this reason the default behavior of this function is to do nothing. Transformers that however do something during the learn_one can override this method.

    Parameters

    • x \u2014 'dict'

    Returns

    Transformer: self

    transform_one

    Transform a set of features x.

    Parameters

    • x \u2014 'dict'

    Returns

    dict: The transformed values.

    "},{"location":"api/forest/AMFClassifier/","title":"AMFClassifier","text":"

    Aggregated Mondrian Forest classifier for online learning.

    This implementation is truly online1, in the sense that a single pass is performed, and that predictions can be produced anytime.

    Each node in a tree predicts according to the distribution of the labels it contains. This distribution is regularized using a \"Jeffreys\" prior with parameter dirichlet. For each class with count labels in the node and n_samples samples in it, the prediction of a node is given by

    \\(\\frac{count + dirichlet}{n_{samples} + dirichlet \\times n_{classes}}\\).

    The prediction for a sample is computed as the aggregated predictions of all the subtrees along the path leading to the leaf node containing the sample. The aggregation weights are exponential weights with learning rate step and log-loss when use_aggregation is True.

    This computation is performed exactly thanks to a context tree weighting algorithm. More details can be found in the paper cited in the references below.

    The final predictions are the average class probabilities predicted by each of the n_estimators trees in the forest.

    "},{"location":"api/forest/AMFClassifier/#parameters","title":"Parameters","text":"
    • n_estimators

      Type \u2192 int

      Default \u2192 10

      The number of trees in the forest.

    • step

      Type \u2192 float

      Default \u2192 1.0

      Step-size for the aggregation weights. Default is 1 for classification with the log-loss, which is usually the best choice.

    • use_aggregation

      Type \u2192 bool

      Default \u2192 True

      Controls if aggregation is used in the trees. It is highly recommended to leave it as True.

    • dirichlet

      Type \u2192 float

      Default \u2192 0.5

      Regularization level of the class frequencies used for predictions in each node. A rule of thumb is to set this to 1 / n_classes, where n_classes is the expected number of classes which might appear. Default is dirichlet = 0.5, which works well for binary classification problems.

    • split_pure

      Type \u2192 bool

      Default \u2192 False

      Controls if nodes that contains only sample of the same class should be split (\"pure\" nodes). Default is False, namely pure nodes are not split, but True can be sometimes better.

    • seed

      Type \u2192 int | None

      Default \u2192 None

      Random seed for reproducibility.

    "},{"location":"api/forest/AMFClassifier/#attributes","title":"Attributes","text":"
    • models
    "},{"location":"api/forest/AMFClassifier/#examples","title":"Examples","text":"

    from river import datasets\nfrom river import evaluate\nfrom river import forest\nfrom river import metrics\n\ndataset = datasets.Bananas().take(500)\n\nmodel = forest.AMFClassifier(\n    n_estimators=10,\n    use_aggregation=True,\n    dirichlet=0.5,\n    seed=1\n)\n\nmetric = metrics.Accuracy()\n\nevaluate.progressive_val_score(dataset, model, metric)\n
    Accuracy: 85.37%\n

    "},{"location":"api/forest/AMFClassifier/#methods","title":"Methods","text":"learn_one

    Update the model with a set of features x and a label y.

    Parameters

    • x
    • y

    Returns

    self

    predict_one

    Predict the label of a set of features x.

    Parameters

    • x \u2014 'dict'
    • kwargs

    Returns

    base.typing.ClfTarget | None: The predicted label.

    predict_proba_one

    Predict the probability of each label for a dictionary of features x.

    Parameters

    • x

    Returns

    A dictionary that associates a probability which each label.

    "},{"location":"api/forest/AMFClassifier/#notes","title":"Notes","text":"

    Only log_loss used for the computation of the aggregation weights is supported for now, namely the log-loss for multi-class classification.

    1. Mourtada, J., Ga\u00efffas, S., & Scornet, E. (2021). AMF: Aggregated Mondrian forests for online learning. Journal of the Royal Statistical Society Series B: Statistical Methodology, 83(3), 505-533.\u00a0\u21a9

    "},{"location":"api/forest/AMFRegressor/","title":"AMFRegressor","text":"

    Aggregated Mondrian Forest regressor for online learning.

    This algorithm is truly online, in the sense that a single pass is performed, and that predictions can be produced anytime.

    Each node in a tree predicts according to the average of the labels it contains. The prediction for a sample is computed as the aggregated predictions of all the subtrees along the path leading to the leaf node containing the sample. The aggregation weights are exponential weights with learning rate step using a squared loss when use_aggregation is True.

    This computation is performed exactly thanks to a context tree weighting algorithm. More details can be found in the original paper1.

    The final predictions are the average of the predictions of each of the n_estimators trees in the forest.

    "},{"location":"api/forest/AMFRegressor/#parameters","title":"Parameters","text":"
    • n_estimators

      Type \u2192 int

      Default \u2192 10

      The number of trees in the forest.

    • step

      Type \u2192 float

      Default \u2192 1.0

      Step-size for the aggregation weights.

    • use_aggregation

      Type \u2192 bool

      Default \u2192 True

      Controls if aggregation is used in the trees. It is highly recommended to leave it as True.

    • seed

      Type \u2192 int

      Default \u2192 None

      Random seed for reproducibility.

    "},{"location":"api/forest/AMFRegressor/#attributes","title":"Attributes","text":"
    • models
    "},{"location":"api/forest/AMFRegressor/#examples","title":"Examples","text":"

    from river import datasets\nfrom river import evaluate\nfrom river import forest\nfrom river import metrics\n\ndataset = datasets.TrumpApproval()\nmodel = forest.AMFRegressor(seed=42)\nmetric = metrics.MAE()\n\nevaluate.progressive_val_score(dataset, model, metric)\n
    MAE: 0.268533\n

    "},{"location":"api/forest/AMFRegressor/#methods","title":"Methods","text":"learn_one

    Fits to a set of features x and a real-valued target y.

    Parameters

    • x
    • y

    Returns

    self

    predict_one

    Predict the output of features x.

    Parameters

    • x

    Returns

    The prediction.

    1. Mourtada, J., Ga\u00efffas, S., & Scornet, E. (2021). AMF: Aggregated Mondrian forests for online learning. Journal of the Royal Statistical Society Series B: Statistical Methodology, 83(3), 505-533.\u00a0\u21a9

    "},{"location":"api/forest/ARFClassifier/","title":"ARFClassifier","text":"

    Adaptive Random Forest classifier.

    The 3 most important aspects of Adaptive Random Forest 1 are:

    1. inducing diversity through re-sampling

    2. inducing diversity through randomly selecting subsets of features for node splits

    3. drift detectors per base tree, which cause selective resets in response to drifts

    It also allows training background trees, which start training if a warning is detected and replace the active tree if the warning escalates to a drift.

    "},{"location":"api/forest/ARFClassifier/#parameters","title":"Parameters","text":"
    • n_models

      Type \u2192 int

      Default \u2192 10

      Number of trees in the ensemble.

    • max_features

      Type \u2192 bool | str | int

      Default \u2192 sqrt

      Max number of attributes for each node split. - If int, then consider max_features at each split. - If float, then max_features is a percentage and int(max_features * n_features) features are considered per split. - If \"sqrt\", then max_features=sqrt(n_features). - If \"log2\", then max_features=log2(n_features). - If None, then max_features=n_features.

    • lambda_value

      Type \u2192 int

      Default \u2192 6

      The lambda value for bagging (lambda=6 corresponds to Leveraging Bagging).

    • metric

      Type \u2192 metrics.base.MultiClassMetric | None

      Default \u2192 None

      Metric used to track trees performance within the ensemble. Defaults to metrics.Accuracy()`.

    • disable_weighted_vote

      Default \u2192 False

      If True, disables the weighted vote prediction.

    • drift_detector

      Type \u2192 base.DriftDetector | None

      Default \u2192 None

      Drift Detection method. Set to None to disable Drift detection. Defaults to drift.ADWIN(delta=0.001)`.

    • warning_detector

      Type \u2192 base.DriftDetector | None

      Default \u2192 None

      Warning Detection method. Set to None to disable warning detection. Defaults to drift.ADWIN(delta=0.01)`.

    • grace_period

      Type \u2192 int

      Default \u2192 50

      [Tree parameter] Number of instances a leaf should observe between split attempts.

    • max_depth

      Type \u2192 int | None

      Default \u2192 None

      [Tree parameter] The maximum depth a tree can reach. If None, the tree will grow indefinitely.

    • split_criterion

      Type \u2192 str

      Default \u2192 info_gain

      [Tree parameter] Split criterion to use. - 'gini' - Gini - 'info_gain' - Information Gain - 'hellinger' - Hellinger Distance

    • delta

      Type \u2192 float

      Default \u2192 0.01

      [Tree parameter] Allowed error in split decision, a value closer to 0 takes longer to decide.

    • tau

      Type \u2192 float

      Default \u2192 0.05

      [Tree parameter] Threshold below which a split will be forced to break ties.

    • leaf_prediction

      Type \u2192 str

      Default \u2192 nba

      [Tree parameter] Prediction mechanism used at leafs. - 'mc' - Majority Class - 'nb' - Naive Bayes - 'nba' - Naive Bayes Adaptive

    • nb_threshold

      Type \u2192 int

      Default \u2192 0

      [Tree parameter] Number of instances a leaf should observe before allowing Naive Bayes.

    • nominal_attributes

      Type \u2192 list | None

      Default \u2192 None

      [Tree parameter] List of Nominal attributes. If empty, then assume that all attributes are numerical.

    • splitter

      Type \u2192 Splitter | None

      Default \u2192 None

      [Tree parameter] The Splitter or Attribute Observer (AO) used to monitor the class statistics of numeric features and perform splits. Splitters are available in the tree.splitter module. Different splitters are available for classification and regression tasks. Classification and regression splitters can be distinguished by their property is_target_class. This is an advanced option. Special care must be taken when choosing different splitters. By default, tree.splitter.GaussianSplitter is used if splitter is None.

    • binary_split

      Type \u2192 bool

      Default \u2192 False

      [Tree parameter] If True, only allow binary splits.

    • min_branch_fraction

      Type \u2192 float

      Default \u2192 0.01

      [Tree parameter] The minimum percentage of observed data required for branches resulting from split candidates. To validate a split candidate, at least two resulting branches must have a percentage of samples greater than min_branch_fraction. This criterion prevents unnecessary splits when the majority of instances are concentrated in a single branch.

    • max_share_to_split

      Type \u2192 float

      Default \u2192 0.99

      [Tree parameter] Only perform a split in a leaf if the proportion of elements in the majority class is smaller than this parameter value. This parameter avoids performing splits when most of the data belongs to a single class.

    • max_size

      Type \u2192 float

      Default \u2192 100.0

      [Tree parameter] Maximum memory (MB) consumed by the tree.

    • memory_estimate_period

      Type \u2192 int

      Default \u2192 2000000

      [Tree parameter] Number of instances between memory consumption checks.

    • stop_mem_management

      Type \u2192 bool

      Default \u2192 False

      [Tree parameter] If True, stop growing as soon as memory limit is hit.

    • remove_poor_attrs

      Type \u2192 bool

      Default \u2192 False

      [Tree parameter] If True, disable poor attributes to reduce memory usage.

    • merit_preprune

      Type \u2192 bool

      Default \u2192 True

      [Tree parameter] If True, enable merit-based tree pre-pruning.

    • seed

      Type \u2192 int | None

      Default \u2192 None

      Random seed for reproducibility.

    "},{"location":"api/forest/ARFClassifier/#attributes","title":"Attributes","text":"
    • models
    "},{"location":"api/forest/ARFClassifier/#examples","title":"Examples","text":"

    from river import evaluate\nfrom river import forest\nfrom river import metrics\nfrom river.datasets import synth\n\ndataset = synth.ConceptDriftStream(\n    seed=42,\n    position=500,\n    width=40\n).take(1000)\n\nmodel = forest.ARFClassifier(seed=8, leaf_prediction=\"mc\")\n\nmetric = metrics.Accuracy()\n\nevaluate.progressive_val_score(dataset, model, metric)\n
    Accuracy: 71.17%\n

    The total number of warnings and drifts detected, respectively

    model.n_warnings_detected(), model.n_drifts_detected()\n
    (2, 1)\n

    The number of warnings detected by tree number 2

    model.n_warnings_detected(2)\n
    1\n

    And the corresponding number of actual concept drift detected

    model.n_drifts_detected(2)\n
    1\n

    "},{"location":"api/forest/ARFClassifier/#methods","title":"Methods","text":"learn_one n_drifts_detected

    Get the total number of concept drifts detected, or such number on an individual tree basis (optionally).

    If drift detection is disabled, will return None.

    Parameters

    • tree_id \u2014 'int | None' \u2014 defaults to None

    Returns

    int | None: The number of concept drifts detected.

    n_warnings_detected

    Get the total number of concept drift warnings detected, or the number on an individual tree basis (optionally).

    If warning detection is disabled, will return None.

    Parameters

    • tree_id \u2014 'int | None' \u2014 defaults to None

    Returns

    int | None: The number of concept drift warnings detected.

    predict_one

    Predict the label of a set of features x.

    Parameters

    • x \u2014 'dict'
    • kwargs

    Returns

    base.typing.ClfTarget | None: The predicted label.

    predict_proba_one

    Predict the probability of each label for a dictionary of features x.

    Parameters

    • x \u2014 'dict'

    Returns

    dict[base.typing.ClfTarget, float]: A dictionary that associates a probability which each label.

    1. Heitor Murilo Gomes, Albert Bifet, Jesse Read, Jean Paul Barddal, Fabricio Enembreck, Bernhard Pfharinger, Geoff Holmes, Talel Abdessalem. Adaptive random forests for evolving data stream classification. In Machine Learning, DOI: 10.1007/s10994-017-5642-8, Springer, 2017.\u00a0\u21a9

    "},{"location":"api/forest/ARFRegressor/","title":"ARFRegressor","text":"

    Adaptive Random Forest regressor.

    The 3 most important aspects of Adaptive Random Forest 1 are:

    1. inducing diversity through re-sampling

    2. inducing diversity through randomly selecting subsets of features for node splits

    3. drift detectors per base tree, which cause selective resets in response to drifts

    Notice that this implementation is slightly different from the original algorithm proposed in 2. The HoeffdingTreeRegressor is used as base learner, instead of FIMT-DD. It also adds a new strategy to monitor the predictions and check for concept drifts. The deviations of the predictions to the target are monitored and normalized in the [0, 1] range to fulfill ADWIN's requirements. We assume that the data subjected to the normalization follows a normal distribution, and thus, lies within the interval of the mean \\(\\pm3\\sigma\\).

    "},{"location":"api/forest/ARFRegressor/#parameters","title":"Parameters","text":"
    • n_models

      Type \u2192 int

      Default \u2192 10

      Number of trees in the ensemble.

    • max_features

      Default \u2192 sqrt

      Max number of attributes for each node split. - If int, then consider max_features at each split. - If float, then max_features is a percentage and int(max_features * n_features) features are considered per split. - If \"sqrt\", then max_features=sqrt(n_features). - If \"log2\", then max_features=log2(n_features). - If None, then max_features=n_features.

    • aggregation_method

      Type \u2192 str

      Default \u2192 median

      The method to use to aggregate predictions in the ensemble. - 'mean' - 'median' - If selected will disable the weighted vote.

    • lambda_value

      Type \u2192 int

      Default \u2192 6

      The lambda value for bagging (lambda=6 corresponds to Leveraging Bagging).

    • metric

      Type \u2192 metrics.base.RegressionMetric | None

      Default \u2192 None

      Metric used to track trees performance within the ensemble. Depending, on the configuration, this metric is also used to weight predictions from the members of the ensemble. Defaults to metrics.MSE()`.

    • disable_weighted_vote

      Default \u2192 True

      If True, disables the weighted vote prediction, i.e. does not assign weights to individual tree's predictions and uses the arithmetic mean instead. Otherwise will use the metric value to weight predictions.

    • drift_detector

      Type \u2192 base.DriftDetector | None

      Default \u2192 None

      Drift Detection method. Set to None to disable Drift detection. Defaults to drift.ADWIN(0.001)`.

    • warning_detector

      Type \u2192 base.DriftDetector | None

      Default \u2192 None

      Warning Detection method. Set to None to disable warning detection. Defaults to drift.ADWIN(0.01)`.

    • grace_period

      Type \u2192 int

      Default \u2192 50

      [Tree parameter] Number of instances a leaf should observe between split attempts.

    • max_depth

      Type \u2192 int | None

      Default \u2192 None

      [Tree parameter] The maximum depth a tree can reach. If None, the tree will grow indefinitely.

    • delta

      Type \u2192 float

      Default \u2192 0.01

      [Tree parameter] Allowed error in split decision, a value closer to 0 takes longer to decide.

    • tau

      Type \u2192 float

      Default \u2192 0.05

      [Tree parameter] Threshold below which a split will be forced to break ties.

    • leaf_prediction

      Type \u2192 str

      Default \u2192 adaptive

      [Tree parameter] Prediction mechanism used at leaves. - 'mean' - Target mean - 'model' - Uses the model defined in leaf_model - 'adaptive' - Chooses between 'mean' and 'model' dynamically

    • leaf_model

      Type \u2192 base.Regressor | None

      Default \u2192 None

      [Tree parameter] The regression model used to provide responses if leaf_prediction='model'. If not provided, an instance of linear_model.LinearRegression with the default hyperparameters is used.

    • model_selector_decay

      Type \u2192 float

      Default \u2192 0.95

      [Tree parameter] The exponential decaying factor applied to the learning models' squared errors, that are monitored if leaf_prediction='adaptive'. Must be between 0 and 1. The closer to 1, the more importance is going to be given to past observations. On the other hand, if its value approaches 0, the recent observed errors are going to have more influence on the final decision.

    • nominal_attributes

      Type \u2192 list | None

      Default \u2192 None

      [Tree parameter] List of Nominal attributes. If empty, then assume that all attributes are numerical.

    • splitter

      Type \u2192 Splitter | None

      Default \u2192 None

      [Tree parameter] The Splitter or Attribute Observer (AO) used to monitor the class statistics of numeric features and perform splits. Splitters are available in the tree.splitter module. Different splitters are available for classification and regression tasks. Classification and regression splitters can be distinguished by their property is_target_class. This is an advanced option. Special care must be taken when choosing different splitters.By default, tree.splitter.EBSTSplitter is used if splitter is None.

    • min_samples_split

      Type \u2192 int

      Default \u2192 5

      [Tree parameter] The minimum number of samples every branch resulting from a split candidate must have to be considered valid.

    • binary_split

      Type \u2192 bool

      Default \u2192 False

      [Tree parameter] If True, only allow binary splits.

    • max_size

      Type \u2192 float

      Default \u2192 500.0

      [Tree parameter] Maximum memory (MB) consumed by the tree.

    • memory_estimate_period

      Type \u2192 int

      Default \u2192 2000000

      [Tree parameter] Number of instances between memory consumption checks.

    • stop_mem_management

      Type \u2192 bool

      Default \u2192 False

      [Tree parameter] If True, stop growing as soon as memory limit is hit.

    • remove_poor_attrs

      Type \u2192 bool

      Default \u2192 False

      [Tree parameter] If True, disable poor attributes to reduce memory usage.

    • merit_preprune

      Type \u2192 bool

      Default \u2192 True

      [Tree parameter] If True, enable merit-based tree pre-pruning.

    • seed

      Type \u2192 int | None

      Default \u2192 None

      Random seed for reproducibility.

    "},{"location":"api/forest/ARFRegressor/#attributes","title":"Attributes","text":"
    • models

    • valid_aggregation_method

      Valid aggregation_method values.

    "},{"location":"api/forest/ARFRegressor/#examples","title":"Examples","text":"

    from river import datasets\nfrom river import evaluate\nfrom river import forest\nfrom river import metrics\nfrom river import preprocessing\n\ndataset = datasets.TrumpApproval()\n\nmodel = (\n    preprocessing.StandardScaler() |\n    forest.ARFRegressor(seed=42)\n)\n\nmetric = metrics.MAE()\n\nevaluate.progressive_val_score(dataset, model, metric)\n
    MAE: 0.788619\n

    "},{"location":"api/forest/ARFRegressor/#methods","title":"Methods","text":"learn_one n_drifts_detected

    Get the total number of concept drifts detected, or such number on an individual tree basis (optionally).

    If drift detection is disabled, will return None.

    Parameters

    • tree_id \u2014 'int | None' \u2014 defaults to None

    Returns

    int | None: The number of concept drifts detected.

    n_warnings_detected

    Get the total number of concept drift warnings detected, or the number on an individual tree basis (optionally).

    If warning detection is disabled, will return None.

    Parameters

    • tree_id \u2014 'int | None' \u2014 defaults to None

    Returns

    int | None: The number of concept drift warnings detected.

    predict_one

    Predict the output of features x.

    Parameters

    • x \u2014 'dict'

    Returns

    base.typing.RegTarget: The prediction.

    1. Gomes, H.M., Bifet, A., Read, J., Barddal, J.P., Enembreck, F., Pfharinger, B., Holmes, G. and Abdessalem, T., 2017. Adaptive random forests for evolving data stream classification. Machine Learning, 106(9-10), pp.1469-1495.\u00a0\u21a9

    2. Gomes, H.M., Barddal, J.P., Boiko, L.E., Bifet, A., 2018. Adaptive random forests for data stream regression. ESANN 2018.\u00a0\u21a9

    "},{"location":"api/forest/OXTRegressor/","title":"OXTRegressor","text":"

    Online Extra Trees regressor.

    The online Extra Trees1 ensemble takes some steps further into randomization when compared to Adaptive Random Forests (ARF). A subspace of the feature space is considered at each split attempt, as ARF does, and online bagging or subbagging can also be (optionally) used. Nonetheless, Extra Trees randomizes the split candidates evaluated by each leaf node (just a single split is tested by numerical feature, which brings significant speedups to the ensemble), and might also randomize the maximum depth of the forest members, as well as the size of the feature subspace processed by each of its trees' leaves.

    On the other hand, OXT suffers from a cold-start problem. As the splits are random, the predictive performance in small samples is usually worse than using a deterministic split approach, such as the one used by ARF.

    "},{"location":"api/forest/OXTRegressor/#parameters","title":"Parameters","text":"
    • n_models

      Type \u2192 int

      Default \u2192 10

      The number of trees in the ensemble.

    • max_features

      Type \u2192 bool | str | int

      Default \u2192 sqrt

      Max number of attributes for each node split. - If int, then consider max_features at each split. - If float, then max_features is a percentage and int(max_features * n_features) features are considered per split. - If \"sqrt\", then max_features=sqrt(n_features). - If \"log2\", then max_features=log2(n_features). - If \"random\", then max_features will assume a different random number in the interval [2, n_features] for each tree leaf. - If None, then max_features=n_features.

    • resampling_strategy

      Type \u2192 str | None

      Default \u2192 subbagging

      The chosen instance resampling strategy: - If None, no resampling will be done and the trees will process all instances. - If 'baggging', online bagging will be performed (sampling with replacement). - If 'subbagging', online subbagging will be performed (sampling without replacement).

    • resampling_rate

      Type \u2192 int | float

      Default \u2192 0.5

      Only valid if resampling_strategy is not None. Controls the parameters of the resampling strategy.. - If resampling_strategy='bagging', must be an integer greater than or equal to 1 that parameterizes the poisson distribution used to simulate bagging in online learning settings. It acts as the lambda parameter of Oza Bagging and Leveraging Bagging. - If resampling_strategy='subbagging', must be a float in the interval \\((0, 1]\\) that controls the chance of each instance being used by a tree for learning.

    • detection_mode

      Type \u2192 str

      Default \u2192 all

      The concept drift detection mode in which the forest operates. Valid values are: - \"all\": creates both warning and concept drift detectors. If a warning is detected, an alternate tree starts being trained in the background. If the warning trigger escalates to a concept drift, the affected tree is replaced by the alternate tree. - \"drop\": only the concept drift detectors are created. If a drift is detected, the affected tree is dropped and replaced by a new tree. - \"off\": disables the concept drift adaptation capabilities. The forest will act as if the processed stream is stationary.

    • warning_detector

      Type \u2192 base.DriftDetector | None

      Default \u2192 None

      The detector that will be used to trigger concept drift warnings. Defaults to drift.ADWIN(0.01)`.

    • drift_detector

      Type \u2192 base.DriftDetector | None

      Default \u2192 None

      The detector used to detect concept drifts. Defaults to drift.ADWIN(0.001)`.

    • max_depth

      Type \u2192 int | None

      Default \u2192 None

      The maximum depth the ensemble members might reach. If None, the trees will grow indefinitely.

    • randomize_tree_depth

      Type \u2192 bool

      Default \u2192 False

      Whether or not randomize the maximum depth of each tree in the ensemble. If max_depth is provided, it is going to act as an upper bound to generate the maximum depth for each tree.

    • track_metric

      Type \u2192 metrics.base.RegressionMetric | None

      Default \u2192 None

      The performance metric used to weight predictions. Defaults to metrics.MAE()`.

    • disable_weighted_vote

      Type \u2192 bool

      Default \u2192 True

      Defines whether or not to use predictions weighted by each trees' prediction performance.

    • split_buffer_size

      Type \u2192 int

      Default \u2192 5

      Defines the size of the buffer used by the tree splitters when determining the feature range and a random split point in this interval.

    • seed

      Type \u2192 int | None

      Default \u2192 None

      Random seed to support reproducibility.

    • grace_period

      Type \u2192 int

      Default \u2192 50

      [Tree parameter] Number of instances a leaf should observe between split attempts.

    • delta

      Type \u2192 float

      Default \u2192 0.01

      [Tree parameter] Allowed error in split decision, a value closer to 0 takes longer to decide.

    • tau

      Type \u2192 float

      Default \u2192 0.05

      [Tree parameter] Threshold below which a split will be forced to break ties.

    • leaf_prediction

      Type \u2192 str

      Default \u2192 adaptive

      [Tree parameter] Prediction mechanism used at leaves. - 'mean' - Target mean - 'model' - Uses the model defined in leaf_model - 'adaptive' - Chooses between 'mean' and 'model' dynamically

    • leaf_model

      Type \u2192 base.Regressor | None

      Default \u2192 None

      [Tree parameter] The regression model used to provide responses if leaf_prediction='model'. If not provided, an instance of linear_model.LinearRegression with the default hyperparameters is used.

    • model_selector_decay

      Type \u2192 float

      Default \u2192 0.95

      [Tree parameter] The exponential decaying factor applied to the learning models' squared errors, that are monitored if leaf_prediction='adaptive'. Must be between 0 and 1. The closer to 1, the more importance is going to be given to past observations. On the other hand, if its value approaches 0, the recent observed errors are going to have more influence on the final decision.

    • nominal_attributes

      Type \u2192 list | None

      Default \u2192 None

      [Tree parameter] List of Nominal attributes. If empty, then assume that all attributes are numerical.

    • min_samples_split

      Type \u2192 int

      Default \u2192 5

      [Tree parameter] The minimum number of samples every branch resulting from a split candidate must have to be considered valid.

    • binary_split

      Type \u2192 bool

      Default \u2192 False

      [Tree parameter] If True, only allow binary splits.

    • max_size

      Type \u2192 int

      Default \u2192 500

      [Tree parameter] Maximum memory (MB) consumed by the tree.

    • memory_estimate_period

      Type \u2192 int

      Default \u2192 2000000

      [Tree parameter] Number of instances between memory consumption checks.

    • stop_mem_management

      Type \u2192 bool

      Default \u2192 False

      [Tree parameter] If True, stop growing as soon as memory limit is hit.

    • remove_poor_attrs

      Type \u2192 bool

      Default \u2192 False

      [Tree parameter] If True, disable poor attributes to reduce memory usage.

    • merit_preprune

      Type \u2192 bool

      Default \u2192 True

      [Tree parameter] If True, enable merit-based tree pre-pruning.

    "},{"location":"api/forest/OXTRegressor/#attributes","title":"Attributes","text":"
    • instances_per_tree

      The number of instances processed by each one of the current forest members. Each time a concept drift is detected, the count corresponding to the affected tree is reset.

    • models

    • n_drifts

      The number of concept drifts detected per ensemble member.

    • n_tree_swaps

      The number of performed alternate tree swaps. Not applicable if the warning detectors are disabled.

    • n_warnings

      The number of warnings detected per ensemble member.

    • total_instances

      The total number of instances processed by the ensemble.

    "},{"location":"api/forest/OXTRegressor/#examples","title":"Examples","text":"

    from river import datasets\nfrom river import evaluate\nfrom river import metrics\nfrom river import forest\n\ndataset = datasets.synth.Friedman(seed=42).take(5000)\n\nmodel = forest.OXTRegressor(n_models=3, seed=42)\n\nmetric = metrics.RMSE()\n\nevaluate.progressive_val_score(dataset, model, metric)\n
    RMSE: 3.127311\n

    "},{"location":"api/forest/OXTRegressor/#methods","title":"Methods","text":"learn_one predict_one

    Predict the output of features x.

    Parameters

    • x \u2014 'dict'

    Returns

    base.typing.RegTarget: The prediction.

    "},{"location":"api/forest/OXTRegressor/#notes","title":"Notes","text":"

    As the Online Extra Trees change the way in which Hoeffding Trees perform split attempts and monitor numerical input features, some of the parameters of the vanilla Hoeffding Tree algorithms are not available.

    1. Mastelini, S. M., Nakano, F. K., Vens, C., & de Leon Ferreira, A. C. P. (2022). Online Extra Trees Regressor. IEEE Transactions on Neural Networks and Learning Systems.\u00a0\u21a9

    "},{"location":"api/imblearn/ChebyshevOverSampler/","title":"ChebyshevOverSampler","text":"

    Over-sampling for imbalanced regression using Chebyshev's inequality.

    Chebyshev's inequality can be used to define the probability of target observations being frequent values (w.r.t. the distribution mean).

    Let \\(Y\\) be a random variable with finite expected value \\(\\overline{y}\\) and non-zero variance \\(\\sigma^2\\). For any real number \\(t > 0\\), the Chebyshev's inequality states that, for a wide class of unimodal probability distributions: \\(Pr(|y-\\overline{y}| \\ge t\\sigma) \\le \\dfrac{1}{t^2}\\).

    Taking \\(t=\\dfrac{|y-\\overline{y}|}{\\sigma}\\), and assuming \\(t > 1\\), the Chebyshev\u2019s inequality for an observation \\(y\\) becomes: \\(P(|y - \\overline{y}|=t) = \\dfrac{\\sigma^2}{|y-\\overline{y}|}\\).

    Alternatively, one can use \\(t\\) directly to estimate a frequency weight \\(\\kappa = \\lceil t\\rceil\\) and define an over-sampling strategy for extreme and rare target values1. Each incoming instance is used \\(\\kappa\\) times to update the underlying regressor. Frequent target values contribute only once to the underlying regressor, whereas rares cases are used multiple times for training.

    "},{"location":"api/imblearn/ChebyshevOverSampler/#parameters","title":"Parameters","text":"
    • regressor

      Type \u2192 base.Regressor

      The regression model that will receive the biased sample.

    "},{"location":"api/imblearn/ChebyshevOverSampler/#examples","title":"Examples","text":"

    from river import datasets\nfrom river import evaluate\nfrom river import imblearn\nfrom river import metrics\nfrom river import preprocessing\nfrom river import rules\n\nmodel = (\n    preprocessing.StandardScaler() |\n    imblearn.ChebyshevOverSampler(\n        regressor=rules.AMRules(\n            n_min=50, delta=0.01\n        )\n    )\n)\n\nevaluate.progressive_val_score(\n    datasets.TrumpApproval(),\n    model,\n    metrics.MAE(),\n    print_every=500\n)\n
    [500] MAE: 1.673902\n[1,000] MAE: 1.743046\n[1,001] MAE: 1.741335\nMAE: 1.741335\n

    "},{"location":"api/imblearn/ChebyshevOverSampler/#methods","title":"Methods","text":"learn_one

    Fits to a set of features x and a real-valued target y.

    Parameters

    • x
    • y
    • kwargs

    Returns

    self

    predict_one

    Predict the output of features x.

    Parameters

    • x
    • kwargs

    Returns

    The prediction.

    1. Aminian, Ehsan, Rita P. Ribeiro, and Jo\u00e3o Gama. \"Chebyshev approaches for imbalanced data streams regression models.\" Data Mining and Knowledge Discovery 35.6 (2021): 2389-2466.\u00a0\u21a9

    "},{"location":"api/imblearn/ChebyshevUnderSampler/","title":"ChebyshevUnderSampler","text":"

    Under-sampling for imbalanced regression using Chebyshev's inequality.

    Chebyshev's inequality can be used to define the probability of target observations being frequent values (w.r.t. the distribution mean).

    Let \\(Y\\) be a random variable with finite expected value \\(\\overline{y}\\) and non-zero variance \\(\\sigma^2\\). For any real number \\(t > 0\\), the Chebyshev's inequality states that, for a wide class of unimodal probability distributions: \\(Pr(|y-\\overline{y}| \\ge t\\sigma) \\le \\dfrac{1}{t^2}\\).

    Taking \\(t=\\dfrac{|y-\\overline{y}|}{\\sigma}\\), and assuming \\(t > 1\\), the Chebyshev\u2019s inequality for an observation \\(y\\) becomes: \\(P(|y - \\overline{y}|=t) = \\dfrac{\\sigma^2}{|y-\\overline{y}|}\\). The reciprocal of this probability is used for under-sampling1 the most frequent cases. Extreme valued or rare cases have higher probabilities of selection, whereas the most frequent cases are likely to be discarded. Still, frequent cases have a small chance of being selected (controlled via the sp parameter) in case few rare instances were observed.

    "},{"location":"api/imblearn/ChebyshevUnderSampler/#parameters","title":"Parameters","text":"
    • regressor

      Type \u2192 base.Regressor

      The regression model that will receive the biased sample.

    • sp

      Type \u2192 float

      Default \u2192 0.15

      Second chance probability. Even if an example is not initially selected for training, it still has a small chance of being selected in case the number of rare case observed so far is small.

    • seed

      Type \u2192 int | None

      Default \u2192 None

      Random seed to support reproducibility.

    "},{"location":"api/imblearn/ChebyshevUnderSampler/#examples","title":"Examples","text":"

    from river import datasets\nfrom river import evaluate\nfrom river import imblearn\nfrom river import metrics\nfrom river import preprocessing\nfrom river import rules\n\nmodel = (\n    preprocessing.StandardScaler() |\n    imblearn.ChebyshevUnderSampler(\n        regressor=rules.AMRules(\n            n_min=50, delta=0.01,\n        ),\n        seed=42\n    )\n)\n\nevaluate.progressive_val_score(\n    datasets.TrumpApproval(),\n    model,\n    metrics.MAE(),\n    print_every=500\n)\n
    [500] MAE: 1.787162\n[1,000] MAE: 1.515711\n[1,001] MAE: 1.515236\nMAE: 1.515236\n

    "},{"location":"api/imblearn/ChebyshevUnderSampler/#methods","title":"Methods","text":"learn_one

    Fits to a set of features x and a real-valued target y.

    Parameters

    • x
    • y
    • kwargs

    Returns

    self

    predict_one

    Predict the output of features x.

    Parameters

    • x
    • kwargs

    Returns

    The prediction.

    1. Aminian, Ehsan, Rita P. Ribeiro, and Jo\u00e3o Gama. \"Chebyshev approaches for imbalanced data streams regression models.\" Data Mining and Knowledge Discovery 35.6 (2021): 2389-2466.\u00a0\u21a9

    "},{"location":"api/imblearn/HardSamplingClassifier/","title":"HardSamplingClassifier","text":"

    Hard sampling classifier.

    This wrapper enables a model to retrain on past samples who's output was hard to predict. This works by storing the hardest samples in a buffer of a fixed size. When a new sample arrives, the wrapped model is either trained on one of the buffered samples with a probability p or on the new sample with a probability (1 - p).

    The hardness of an observation is evaluated with a loss function that compares the sample's ground truth with the wrapped model's prediction. If the buffer is not full, then the sample is added to the buffer. If the buffer is full and the new sample has a bigger loss than the lowest loss in the buffer, then the sample takes it's place.

    "},{"location":"api/imblearn/HardSamplingClassifier/#parameters","title":"Parameters","text":"
    • classifier

      Type \u2192 base.Classifier

    • size

      Type \u2192 int

      Size of the buffer.

    • p

      Type \u2192 float

      Probability of updating the model with a sample from the buffer instead of a new incoming sample.

    • loss

      Type \u2192 optim.losses.BinaryLoss | optim.losses.MultiClassLoss | None

      Default \u2192 None

      Criterion used to evaluate the hardness of a sample.

    • seed

      Type \u2192 int | None

      Default \u2192 None

      Random seed.

    "},{"location":"api/imblearn/HardSamplingClassifier/#attributes","title":"Attributes","text":"
    • classifier
    "},{"location":"api/imblearn/HardSamplingClassifier/#examples","title":"Examples","text":"

    from river import datasets\nfrom river import evaluate\nfrom river import imblearn\nfrom river import linear_model\nfrom river import metrics\nfrom river import optim\nfrom river import preprocessing\n\nmodel = (\n    preprocessing.StandardScaler() |\n    imblearn.HardSamplingClassifier(\n        classifier=linear_model.LogisticRegression(),\n        p=0.1,\n        size=40,\n        seed=42,\n    )\n)\n\nevaluate.progressive_val_score(\n    dataset=datasets.Phishing(),\n    model=model,\n    metric=metrics.ROCAUC(),\n    print_every=500,\n)\n
    [500] ROCAUC: 92.78%\n[1,000] ROCAUC: 94.76%\n[1,250] ROCAUC: 95.06%\nROCAUC: 95.06%\n

    "},{"location":"api/imblearn/HardSamplingClassifier/#methods","title":"Methods","text":"learn_one predict_one predict_proba_one

    Predict the probability of each label for a dictionary of features x.

    Parameters

    • x
    • kwargs

    Returns

    A dictionary that associates a probability which each label.

    "},{"location":"api/imblearn/HardSamplingRegressor/","title":"HardSamplingRegressor","text":"

    Hard sampling regressor.

    This wrapper enables a model to retrain on past samples who's output was hard to predict. This works by storing the hardest samples in a buffer of a fixed size. When a new sample arrives, the wrapped model is either trained on one of the buffered samples with a probability p or on the new sample with a probability (1 - p).

    The hardness of an observation is evaluated with a loss function that compares the sample's ground truth with the wrapped model's prediction. If the buffer is not full, then the sample is added to the buffer. If the buffer is full and the new sample has a bigger loss than the lowest loss in the buffer, then the sample takes it's place.

    "},{"location":"api/imblearn/HardSamplingRegressor/#parameters","title":"Parameters","text":"
    • regressor

      Type \u2192 base.Regressor

    • size

      Type \u2192 int

      Size of the buffer.

    • p

      Type \u2192 float

      Probability of updating the model with a sample from the buffer instead of a new incoming sample.

    • loss

      Type \u2192 optim.losses.RegressionLoss | None

      Default \u2192 None

      Criterion used to evaluate the hardness of a sample.

    • seed

      Type \u2192 int | None

      Default \u2192 None

      Random seed.

    "},{"location":"api/imblearn/HardSamplingRegressor/#attributes","title":"Attributes","text":"
    • regressor
    "},{"location":"api/imblearn/HardSamplingRegressor/#examples","title":"Examples","text":"

    from river import datasets\nfrom river import evaluate\nfrom river import imblearn\nfrom river import linear_model\nfrom river import metrics\nfrom river import optim\nfrom river import preprocessing\n\nmodel = (\n    preprocessing.StandardScaler() |\n    imblearn.HardSamplingRegressor(\n        regressor=linear_model.LinearRegression(),\n        p=.2,\n        size=30,\n        seed=42,\n    )\n)\n\nevaluate.progressive_val_score(\n    datasets.TrumpApproval(),\n    model,\n    metrics.MAE(),\n    print_every=500\n)\n
    [500] MAE: 2.274021\n[1,000] MAE: 1.392399\n[1,001] MAE: 1.391246\nMAE: 1.391246\n

    "},{"location":"api/imblearn/HardSamplingRegressor/#methods","title":"Methods","text":"learn_one predict_one"},{"location":"api/imblearn/RandomOverSampler/","title":"RandomOverSampler","text":"

    Random over-sampling.

    This is a wrapper for classifiers. It will train the provided classifier by over-sampling the stream of given observations so that the class distribution seen by the classifier follows a given desired distribution. The implementation is a discrete version of reverse rejection sampling.

    See Working with imbalanced data for example usage.

    "},{"location":"api/imblearn/RandomOverSampler/#parameters","title":"Parameters","text":"
    • classifier

      Type \u2192 base.Classifier

    • desired_dist

      Type \u2192 dict

      The desired class distribution. The keys are the classes whilst the values are the desired class percentages. The values must sum up to 1.

    • seed

      Type \u2192 int | None

      Default \u2192 None

      Random seed for reproducibility.

    "},{"location":"api/imblearn/RandomOverSampler/#examples","title":"Examples","text":"

    from river import datasets\nfrom river import evaluate\nfrom river import imblearn\nfrom river import linear_model\nfrom river import metrics\nfrom river import preprocessing\n\nmodel = imblearn.RandomOverSampler(\n    (\n        preprocessing.StandardScaler() |\n        linear_model.LogisticRegression()\n    ),\n    desired_dist={False: 0.4, True: 0.6},\n    seed=42\n)\n\ndataset = datasets.CreditCard().take(3000)\n\nmetric = metrics.LogLoss()\n\nevaluate.progressive_val_score(dataset, model, metric)\n
    LogLoss: 0.0457...\n

    "},{"location":"api/imblearn/RandomOverSampler/#methods","title":"Methods","text":"learn_one

    Update the model with a set of features x and a label y.

    Parameters

    • x \u2014 'dict'
    • y \u2014 'base.typing.ClfTarget'
    • kwargs

    Returns

    Classifier: self

    predict_one

    Predict the label of a set of features x.

    Parameters

    • x
    • kwargs

    Returns

    The predicted label.

    predict_proba_one

    Predict the probability of each label for a dictionary of features x.

    Parameters

    • x
    • kwargs

    Returns

    A dictionary that associates a probability which each label.

    "},{"location":"api/imblearn/RandomSampler/","title":"RandomSampler","text":"

    Random sampling by mixing under-sampling and over-sampling.

    This is a wrapper for classifiers. It will train the provided classifier by both under-sampling and over-sampling the stream of given observations so that the class distribution seen by the classifier follows a given desired distribution.

    See Working with imbalanced data for example usage.

    "},{"location":"api/imblearn/RandomSampler/#parameters","title":"Parameters","text":"
    • classifier

      Type \u2192 base.Classifier

    • desired_dist

      Type \u2192 dict

      The desired class distribution. The keys are the classes whilst the values are the desired class percentages. The values must sum up to 1. If set to None, then the observations will be sampled uniformly at random, which is stricly equivalent to using ensemble.BaggingClassifier.

    • sampling_rate

      Default \u2192 1.0

      The desired ratio of data to sample.

    • seed

      Type \u2192 int | None

      Default \u2192 None

      Random seed for reproducibility.

    "},{"location":"api/imblearn/RandomSampler/#examples","title":"Examples","text":"

    from river import datasets\nfrom river import evaluate\nfrom river import imblearn\nfrom river import linear_model\nfrom river import metrics\nfrom river import preprocessing\n\nmodel = imblearn.RandomSampler(\n    (\n        preprocessing.StandardScaler() |\n        linear_model.LogisticRegression()\n    ),\n    desired_dist={False: 0.4, True: 0.6},\n    sampling_rate=0.8,\n    seed=42\n)\n\ndataset = datasets.CreditCard().take(3000)\n\nmetric = metrics.LogLoss()\n\nevaluate.progressive_val_score(dataset, model, metric)\n
    LogLoss: 0.09...\n

    "},{"location":"api/imblearn/RandomSampler/#methods","title":"Methods","text":"learn_one

    Update the model with a set of features x and a label y.

    Parameters

    • x \u2014 'dict'
    • y \u2014 'base.typing.ClfTarget'
    • kwargs

    Returns

    Classifier: self

    predict_one

    Predict the label of a set of features x.

    Parameters

    • x
    • kwargs

    Returns

    The predicted label.

    predict_proba_one

    Predict the probability of each label for a dictionary of features x.

    Parameters

    • x
    • kwargs

    Returns

    A dictionary that associates a probability which each label.

    "},{"location":"api/imblearn/RandomUnderSampler/","title":"RandomUnderSampler","text":"

    Random under-sampling.

    This is a wrapper for classifiers. It will train the provided classifier by under-sampling the stream of given observations so that the class distribution seen by the classifier follows a given desired distribution. The implementation is a discrete version of rejection sampling.

    See Working with imbalanced data for example usage.

    "},{"location":"api/imblearn/RandomUnderSampler/#parameters","title":"Parameters","text":"
    • classifier

      Type \u2192 base.Classifier

    • desired_dist

      Type \u2192 dict

      The desired class distribution. The keys are the classes whilst the values are the desired class percentages. The values must sum up to 1.

    • seed

      Type \u2192 int | None

      Default \u2192 None

      Random seed for reproducibility.

    "},{"location":"api/imblearn/RandomUnderSampler/#examples","title":"Examples","text":"

    from river import datasets\nfrom river import evaluate\nfrom river import imblearn\nfrom river import linear_model\nfrom river import metrics\nfrom river import preprocessing\n\nmodel = imblearn.RandomUnderSampler(\n    (\n        preprocessing.StandardScaler() |\n        linear_model.LogisticRegression()\n    ),\n    desired_dist={False: 0.4, True: 0.6},\n    seed=42\n)\n\ndataset = datasets.CreditCard().take(3000)\n\nmetric = metrics.LogLoss()\n\nevaluate.progressive_val_score(dataset, model, metric)\n
    LogLoss: 0.0336...\n

    "},{"location":"api/imblearn/RandomUnderSampler/#methods","title":"Methods","text":"learn_one

    Update the model with a set of features x and a label y.

    Parameters

    • x \u2014 'dict'
    • y \u2014 'base.typing.ClfTarget'
    • kwargs

    Returns

    Classifier: self

    predict_one

    Predict the label of a set of features x.

    Parameters

    • x
    • kwargs

    Returns

    The predicted label.

    predict_proba_one

    Predict the probability of each label for a dictionary of features x.

    Parameters

    • x
    • kwargs

    Returns

    A dictionary that associates a probability which each label.

    1. Under-sampling a dataset with desired ratios \u21a9

    2. Wikipedia article on rejection sampling \u21a9

    "},{"location":"api/linear-model/ALMAClassifier/","title":"ALMAClassifier","text":"

    Approximate Large Margin Algorithm (ALMA).

    "},{"location":"api/linear-model/ALMAClassifier/#parameters","title":"Parameters","text":"
    • p

      Default \u2192 2

    • alpha

      Default \u2192 0.9

    • B

      Default \u2192 1.1111111111111112

    • C

      Default \u2192 1.4142135623730951

    "},{"location":"api/linear-model/ALMAClassifier/#attributes","title":"Attributes","text":"
    • w (collections.defaultdict)

      The current weights.

    • k (int)

      The number of instances seen during training.

    "},{"location":"api/linear-model/ALMAClassifier/#examples","title":"Examples","text":"

    from river import datasets\nfrom river import evaluate\nfrom river import linear_model\nfrom river import metrics\nfrom river import preprocessing\n\ndataset = datasets.Phishing()\n\nmodel = (\n    preprocessing.StandardScaler() |\n    linear_model.ALMAClassifier()\n)\n\nmetric = metrics.Accuracy()\n\nevaluate.progressive_val_score(dataset, model, metric)\n
    Accuracy: 82.56%\n

    "},{"location":"api/linear-model/ALMAClassifier/#methods","title":"Methods","text":"learn_one

    Update the model with a set of features x and a label y.

    Parameters

    • x \u2014 'dict'
    • y \u2014 'base.typing.ClfTarget'

    Returns

    Classifier: self

    predict_one

    Predict the label of a set of features x.

    Parameters

    • x \u2014 'dict'
    • kwargs

    Returns

    base.typing.ClfTarget | None: The predicted label.

    predict_proba_one

    Predict the probability of each label for a dictionary of features x.

    Parameters

    • x \u2014 'dict'

    Returns

    dict[base.typing.ClfTarget, float]: A dictionary that associates a probability which each label.

    1. Gentile, Claudio. \"A new approximate maximal margin classification algorithm.\" Journal of Machine Learning Research 2.Dec (2001): 213-242 \u21a9

    "},{"location":"api/linear-model/BayesianLinearRegression/","title":"BayesianLinearRegression","text":"

    Bayesian linear regression.

    An advantage of Bayesian linear regression over standard linear regression is that features do not have to scaled beforehand. Another attractive property is that this flavor of linear regression is somewhat insensitive to its hyperparameters. Finally, this model can output instead a predictive distribution rather than just a point estimate.

    The downside is that the learning step runs in O(n^2) time, whereas the learning step of standard linear regression takes O(n) time.

    "},{"location":"api/linear-model/BayesianLinearRegression/#parameters","title":"Parameters","text":"
    • alpha

      Default \u2192 1

      Prior parameter.

    • beta

      Default \u2192 1

      Noise parameter.

    • smoothing

      Type \u2192 float

      Default \u2192 None

      Smoothing allows the model to gradually \"forget\" the past, and focus on the more recent data. It thus enables the model to deal with concept drift. Due to the current implementation, activating smoothing may slow down the model.

    "},{"location":"api/linear-model/BayesianLinearRegression/#examples","title":"Examples","text":"

    from river import datasets\nfrom river import evaluate\nfrom river import linear_model\nfrom river import metrics\n\ndataset = datasets.TrumpApproval()\nmodel = linear_model.BayesianLinearRegression()\nmetric = metrics.MAE()\n\nevaluate.progressive_val_score(dataset, model, metric)\n
    MAE: 0.586...\n

    x, _ = next(iter(dataset))\nmodel.predict_one(x)\n
    43.852...\n

    model.predict_one(x, with_dist=True)\n
    \ud835\udca9(\u03bc=43.85..., \u03c3=1.00...)\n

    The smoothing parameter can be set to make the model robust to drift. The parameter is expected to be between 0 and 1. To exemplify, let's generate some simulation data with an abrupt concept drift right in the middle.

    import itertools\nimport random\n\ndef random_data(coefs, n, seed=42):\n    rng = random.Random(seed)\n    for _ in range(n):\n        x = {i: rng.random() for i, c in enumerate(coefs)}\n        y = sum(c * xi for c, xi in zip(coefs, x.values()))\n        yield x, y\n

    Here's how the model performs without any smoothing:

    model = linear_model.BayesianLinearRegression()\ndataset = itertools.chain(\n    random_data([0.1, 3], 100),\n    random_data([10, -2], 100)\n)\nmetric = metrics.MAE()\nevaluate.progressive_val_score(dataset, model, metric)\n
    MAE: 1.284...\n

    And here's how it performs with some smoothing:

    model = linear_model.BayesianLinearRegression(smoothing=0.8)\ndataset = itertools.chain(\n    random_data([0.1, 3], 100),\n    random_data([10, -2], 100)\n)\nmetric = metrics.MAE()\nevaluate.progressive_val_score(dataset, model, metric)\n
    MAE: 0.159...\n

    Smoothing allows the model to gradually \"forget\" the past, and focus on the more recent data.

    Note how this works better than standard linear regression, even when using an aggressive learning rate.

    from river import optim\nmodel = linear_model.LinearRegression(optimizer=optim.SGD(0.5))\ndataset = itertools.chain(\n    random_data([0.1, 3], 100),\n    random_data([10, -2], 100)\n)\nmetric = metrics.MAE()\nevaluate.progressive_val_score(dataset, model, metric)\n
    MAE: 0.242...\n

    "},{"location":"api/linear-model/BayesianLinearRegression/#methods","title":"Methods","text":"learn_one

    Fits to a set of features x and a real-valued target y.

    Parameters

    • x \u2014 'dict'
    • y \u2014 'base.typing.RegTarget'

    Returns

    Regressor: self

    predict_many predict_one

    Predict the output of features x.

    Parameters

    • x \u2014 'dict'
    • with_dist \u2014 defaults to False

    Returns

    base.typing.RegTarget: The prediction.

    1. Pattern Recognition and Machine Learning, page 52 \u2014 Christopher M. Bishop \u21a9

    2. Bayesian/Streaming Algorithms \u2014 Vincent Warmerdam \u21a9

    3. Bayesian linear regression for practitioners \u2014 Max Halford \u21a9

    "},{"location":"api/linear-model/LinearRegression/","title":"LinearRegression","text":"

    Linear regression.

    This estimator supports learning with mini-batches. On top of the single instance methods, it provides the following methods: learn_many, predict_many, predict_proba_many. Each method takes as input a pandas.DataFrame where each column represents a feature.

    It is generally a good idea to scale the data beforehand in order for the optimizer to converge. You can do this online with a preprocessing.StandardScaler.

    "},{"location":"api/linear-model/LinearRegression/#parameters","title":"Parameters","text":"
    • optimizer

      Type \u2192 optim.base.Optimizer | None

      Default \u2192 None

      The sequential optimizer used for updating the weights. Note that the intercept updates are handled separately.

    • loss

      Type \u2192 optim.losses.RegressionLoss | None

      Default \u2192 None

      The loss function to optimize for.

    • l2

      Default \u2192 0.0

      Amount of L2 regularization used to push weights towards 0. For now, only one type of penalty can be used. The joint use of L1 and L2 is not explicitly supported.

    • l1

      Default \u2192 0.0

      Amount of L1 regularization used to push weights towards 0. For now, only one type of penalty can be used. The joint use of L1 and L2 is not explicitly supported.

    • intercept_init

      Default \u2192 0.0

      Initial intercept value.

    • intercept_lr

      Type \u2192 optim.base.Scheduler | float

      Default \u2192 0.01

      Learning rate scheduler used for updating the intercept. A optim.schedulers.Constant is used if a float is provided. The intercept is not updated when this is set to 0.

    • clip_gradient

      Default \u2192 1000000000000.0

      Clips the absolute value of each gradient value.

    • initializer

      Type \u2192 optim.base.Initializer | None

      Default \u2192 None

      Weights initialization scheme.

    "},{"location":"api/linear-model/LinearRegression/#attributes","title":"Attributes","text":"
    • weights (dict)

      The current weights.

    "},{"location":"api/linear-model/LinearRegression/#examples","title":"Examples","text":"

    from river import datasets\nfrom river import evaluate\nfrom river import linear_model\nfrom river import metrics\nfrom river import preprocessing\n\ndataset = datasets.TrumpApproval()\n\nmodel = (\n    preprocessing.StandardScaler() |\n    linear_model.LinearRegression(intercept_lr=.1)\n)\nmetric = metrics.MAE()\n\nevaluate.progressive_val_score(dataset, model, metric)\n
    MAE: 0.558735\n

    model['LinearRegression'].intercept\n
    35.617670\n

    You can call the debug_one method to break down a prediction. This works even if the linear regression is part of a pipeline.

    x, y = next(iter(dataset))\nreport = model.debug_one(x)\nprint(report)\n
    0. Input\n--------\ngallup: 43.84321 (float)\nipsos: 46.19925 (float)\nmorning_consult: 48.31875 (float)\nordinal_date: 736389 (int)\nrasmussen: 44.10469 (float)\nyou_gov: 43.63691 (float)\n<BLANKLINE>\n1. StandardScaler\n-----------------\ngallup: 1.18810 (float)\nipsos: 2.10348 (float)\nmorning_consult: 2.73545 (float)\nordinal_date: -1.73032 (float)\nrasmussen: 1.26872 (float)\nyou_gov: 1.48391 (float)\n<BLANKLINE>\n2. LinearRegression\n-------------------\nName              Value      Weight      Contribution\n      Intercept    1.00000    35.61767       35.61767\n          ipsos    2.10348     0.62689        1.31866\nmorning_consult    2.73545     0.24180        0.66144\n         gallup    1.18810     0.43568        0.51764\n      rasmussen    1.26872     0.28118        0.35674\n        you_gov    1.48391     0.03123        0.04634\n   ordinal_date   -1.73032     3.45162       -5.97242\n<BLANKLINE>\nPrediction: 32.54607\n

    "},{"location":"api/linear-model/LinearRegression/#methods","title":"Methods","text":"debug_one

    Debugs the output of the linear regression.

    Parameters

    • x \u2014 'dict'
    • decimals \u2014 'int' \u2014 defaults to 5

    Returns

    str: A table which explains the output.

    learn_many

    Update the model with a mini-batch of features X and real-valued targets y.

    Parameters

    • X \u2014 'pd.DataFrame'
    • y \u2014 'pd.Series'
    • w \u2014 'float | pd.Series' \u2014 defaults to 1

    Returns

    MiniBatchRegressor: self

    learn_one

    Fits to a set of features x and a real-valued target y.

    Parameters

    • x \u2014 'dict'
    • y \u2014 'base.typing.RegTarget'
    • w \u2014 defaults to 1.0

    Returns

    Regressor: self

    predict_many

    Predict the outcome for each given sample.

    Parameters

    • X

    Returns

    The predicted outcomes.

    predict_one

    Predict the output of features x.

    Parameters

    • x

    Returns

    The prediction.

    "},{"location":"api/linear-model/LogisticRegression/","title":"LogisticRegression","text":"

    Logistic regression.

    This estimator supports learning with mini-batches. On top of the single instance methods, it provides the following methods: learn_many, predict_many, predict_proba_many. Each method takes as input a pandas.DataFrame where each column represents a feature.

    It is generally a good idea to scale the data beforehand in order for the optimizer to converge. You can do this online with a preprocessing.StandardScaler.

    "},{"location":"api/linear-model/LogisticRegression/#parameters","title":"Parameters","text":"
    • optimizer

      Type \u2192 optim.base.Optimizer | None

      Default \u2192 None

      The sequential optimizer used for updating the weights. Note that the intercept is handled separately.

    • loss

      Type \u2192 optim.losses.BinaryLoss | None

      Default \u2192 None

      The loss function to optimize for. Defaults to optim.losses.Log.

    • l2

      Default \u2192 0.0

      Amount of L2 regularization used to push weights towards 0. For now, only one type of penalty can be used. The joint use of L1 and L2 is not explicitly supported.

    • l1

      Default \u2192 0.0

      Amount of L1 regularization used to push weights towards 0. For now, only one type of penalty can be used. The joint use of L1 and L2 is not explicitly supported.

    • intercept_init

      Default \u2192 0.0

      Initial intercept value.

    • intercept_lr

      Type \u2192 float | optim.base.Scheduler

      Default \u2192 0.01

      Learning rate scheduler used for updating the intercept. A optim.schedulers.Constant is used if a float is provided. The intercept is not updated when this is set to 0.

    • clip_gradient

      Default \u2192 1000000000000.0

      Clips the absolute value of each gradient value.

    • initializer

      Type \u2192 optim.base.Initializer | None

      Default \u2192 None

      Weights initialization scheme.

    "},{"location":"api/linear-model/LogisticRegression/#attributes","title":"Attributes","text":"
    • weights

      The current weights.

    "},{"location":"api/linear-model/LogisticRegression/#examples","title":"Examples","text":"

    from river import datasets\nfrom river import evaluate\nfrom river import linear_model\nfrom river import metrics\nfrom river import optim\nfrom river import preprocessing\n\ndataset = datasets.Phishing()\n\nmodel = (\n    preprocessing.StandardScaler() |\n    linear_model.LogisticRegression(optimizer=optim.SGD(.1))\n)\n\nmetric = metrics.Accuracy()\n\nevaluate.progressive_val_score(dataset, model, metric)\n
    Accuracy: 88.96%\n

    "},{"location":"api/linear-model/LogisticRegression/#methods","title":"Methods","text":"learn_many

    Update the model with a mini-batch of features X and boolean targets y.

    Parameters

    • X \u2014 'pd.DataFrame'
    • y \u2014 'pd.Series'
    • w \u2014 'float | pd.Series' \u2014 defaults to 1

    Returns

    MiniBatchClassifier: self

    learn_one

    Update the model with a set of features x and a label y.

    Parameters

    • x \u2014 'dict'
    • y \u2014 'base.typing.ClfTarget'
    • w \u2014 defaults to 1.0

    Returns

    Classifier: self

    predict_many

    Predict the outcome for each given sample.

    Parameters

    • X \u2014 'pd.DataFrame'

    Returns

    pd.Series: The predicted labels.

    predict_one

    Predict the label of a set of features x.

    Parameters

    • x \u2014 'dict'
    • kwargs

    Returns

    base.typing.ClfTarget | None: The predicted label.

    predict_proba_many

    Predict the outcome probabilities for each given sample.

    Parameters

    • X \u2014 'pd.DataFrame'

    Returns

    pd.DataFrame: A dataframe with probabilities of True and False for each sample.

    predict_proba_one

    Predict the probability of each label for a dictionary of features x.

    Parameters

    • x

    Returns

    A dictionary that associates a probability which each label.

    "},{"location":"api/linear-model/PAClassifier/","title":"PAClassifier","text":"

    Passive-aggressive learning for classification.

    "},{"location":"api/linear-model/PAClassifier/#parameters","title":"Parameters","text":"
    • C

      Default \u2192 1.0

    • mode

      Default \u2192 1

    • learn_intercept

      Default \u2192 True

    "},{"location":"api/linear-model/PAClassifier/#examples","title":"Examples","text":"

    The following example is taken from this blog post.

    from river import linear_model\nfrom river import metrics\nfrom river import stream\nimport numpy as np\nfrom sklearn import datasets\nfrom sklearn import model_selection\n\nnp.random.seed(1000)\nX, y = datasets.make_classification(\n    n_samples=5000,\n    n_features=4,\n    n_informative=2,\n    n_redundant=0,\n    n_repeated=0,\n    n_classes=2,\n    n_clusters_per_class=2\n)\n\nX_train, X_test, y_train, y_test = model_selection.train_test_split(\n    X,\n    y,\n    test_size=0.35,\n    random_state=1000\n)\n\nmodel = linear_model.PAClassifier(\n    C=0.01,\n    mode=1\n)\n\nfor xi, yi in stream.iter_array(X_train, y_train):\n    y_pred = model.learn_one(xi, yi)\n\nmetric = metrics.Accuracy() + metrics.LogLoss()\n\nfor xi, yi in stream.iter_array(X_test, y_test):\n    metric = metric.update(yi, model.predict_proba_one(xi))\n\nprint(metric)\n
    Accuracy: 88.46%, LogLoss: 0.325727\n

    "},{"location":"api/linear-model/PAClassifier/#methods","title":"Methods","text":"learn_one

    Update the model with a set of features x and a label y.

    Parameters

    • x
    • y

    Returns

    self

    predict_one

    Predict the label of a set of features x.

    Parameters

    • x \u2014 'dict'
    • kwargs

    Returns

    base.typing.ClfTarget | None: The predicted label.

    predict_proba_one

    Predict the probability of each label for a dictionary of features x.

    Parameters

    • x

    Returns

    A dictionary that associates a probability which each label.

    1. Crammer, K., Dekel, O., Keshet, J., Shalev-Shwartz, S. and Singer, Y., 2006. Online passive-aggressive algorithms. Journal of Machine Learning Research, 7(Mar), pp.551-585 \u21a9

    "},{"location":"api/linear-model/PARegressor/","title":"PARegressor","text":"

    Passive-aggressive learning for regression.

    "},{"location":"api/linear-model/PARegressor/#parameters","title":"Parameters","text":"
    • C

      Default \u2192 1.0

    • mode

      Default \u2192 1

    • eps

      Default \u2192 0.1

    • learn_intercept

      Default \u2192 True

    "},{"location":"api/linear-model/PARegressor/#examples","title":"Examples","text":"

    The following example is taken from this blog post.

    from river import linear_model\nfrom river import metrics\nfrom river import stream\nimport numpy as np\nfrom sklearn import datasets\n\nnp.random.seed(1000)\nX, y = datasets.make_regression(n_samples=500, n_features=4)\n\nmodel = linear_model.PARegressor(\n    C=0.01,\n    mode=2,\n    eps=0.1,\n    learn_intercept=False\n)\nmetric = metrics.MAE() + metrics.MSE()\n\nfor xi, yi in stream.iter_array(X, y):\n    y_pred = model.predict_one(xi)\n    model = model.learn_one(xi, yi)\n    metric = metric.update(yi, y_pred)\n\nprint(metric)\n
    MAE: 9.809402, MSE: 472.393532\n

    "},{"location":"api/linear-model/PARegressor/#methods","title":"Methods","text":"learn_one

    Fits to a set of features x and a real-valued target y.

    Parameters

    • x
    • y

    Returns

    self

    predict_one

    Predict the output of features x.

    Parameters

    • x

    Returns

    The prediction.

    1. Crammer, K., Dekel, O., Keshet, J., Shalev-Shwartz, S. and Singer, Y., 2006. Online passive-aggressive algorithms. Journal of Machine Learning Research, 7(Mar), pp.551-585. \u21a9

    "},{"location":"api/linear-model/Perceptron/","title":"Perceptron","text":"

    Perceptron classifier.

    In this implementation, the Perceptron is viewed as a special case of the logistic regression. The loss function that is used is the Hinge loss with a threshold set to 0, whilst the learning rate of the stochastic gradient descent procedure is set to 1 for both the weights and the intercept.

    "},{"location":"api/linear-model/Perceptron/#parameters","title":"Parameters","text":"
    • l2

      Default \u2192 0.0

      Amount of L2 regularization used to push weights towards 0.

    • clip_gradient

      Default \u2192 1000000000000.0

      Clips the absolute value of each gradient value.

    • initializer

      Type \u2192 optim.initializers.Initializer | None

      Default \u2192 None

      Weights initialization scheme.

    "},{"location":"api/linear-model/Perceptron/#attributes","title":"Attributes","text":"
    • weights

      The current weights.

    "},{"location":"api/linear-model/Perceptron/#examples","title":"Examples","text":"

    from river import datasets\nfrom river import evaluate\nfrom river import linear_model as lm\nfrom river import metrics\nfrom river import preprocessing as pp\n\ndataset = datasets.Phishing()\n\nmodel = pp.StandardScaler() | lm.Perceptron()\n\nmetric = metrics.Accuracy()\n\nevaluate.progressive_val_score(dataset, model, metric)\n
    Accuracy: 85.84%\n

    "},{"location":"api/linear-model/Perceptron/#methods","title":"Methods","text":"learn_many

    Update the model with a mini-batch of features X and boolean targets y.

    Parameters

    • X \u2014 'pd.DataFrame'
    • y \u2014 'pd.Series'
    • w \u2014 'float | pd.Series' \u2014 defaults to 1

    Returns

    MiniBatchClassifier: self

    learn_one

    Update the model with a set of features x and a label y.

    Parameters

    • x \u2014 'dict'
    • y \u2014 'base.typing.ClfTarget'
    • w \u2014 defaults to 1.0

    Returns

    Classifier: self

    predict_many

    Predict the outcome for each given sample.

    Parameters

    • X \u2014 'pd.DataFrame'

    Returns

    pd.Series: The predicted labels.

    predict_one

    Predict the label of a set of features x.

    Parameters

    • x \u2014 'dict'
    • kwargs

    Returns

    base.typing.ClfTarget | None: The predicted label.

    predict_proba_many

    Predict the outcome probabilities for each given sample.

    Parameters

    • X \u2014 'pd.DataFrame'

    Returns

    pd.DataFrame: A dataframe with probabilities of True and False for each sample.

    predict_proba_one

    Predict the probability of each label for a dictionary of features x.

    Parameters

    • x

    Returns

    A dictionary that associates a probability which each label.

    "},{"location":"api/linear-model/SoftmaxRegression/","title":"SoftmaxRegression","text":"

    Softmax regression is a generalization of logistic regression to multiple classes.

    Softmax regression is also known as \"multinomial logistic regression\". There are a set weights for each class, hence the weights attribute is a nested collections.defaultdict. The main advantage of using this instead of a one-vs-all logistic regression is that the probabilities will be calibrated. Moreover softmax regression is more robust to outliers.

    "},{"location":"api/linear-model/SoftmaxRegression/#parameters","title":"Parameters","text":"
    • optimizer

      Type \u2192 optim.base.Optimizer | None

      Default \u2192 None

      The sequential optimizer used to tune the weights.

    • loss

      Type \u2192 optim.losses.MultiClassLoss | None

      Default \u2192 None

      The loss function to optimize for.

    • l2

      Default \u2192 0

      Amount of L2 regularization used to push weights towards 0.

    "},{"location":"api/linear-model/SoftmaxRegression/#attributes","title":"Attributes","text":"
    • weights (collections.defaultdict)
    "},{"location":"api/linear-model/SoftmaxRegression/#examples","title":"Examples","text":"

    from river import datasets\nfrom river import evaluate\nfrom river import linear_model\nfrom river import metrics\nfrom river import optim\nfrom river import preprocessing\n\ndataset = datasets.ImageSegments()\n\nmodel = preprocessing.StandardScaler()\nmodel |= linear_model.SoftmaxRegression()\n\nmetric = metrics.MacroF1()\n\nevaluate.progressive_val_score(dataset, model, metric)\n
    MacroF1: 81.88%\n

    "},{"location":"api/linear-model/SoftmaxRegression/#methods","title":"Methods","text":"learn_one

    Update the model with a set of features x and a label y.

    Parameters

    • x \u2014 'dict'
    • y \u2014 'base.typing.ClfTarget'

    Returns

    Classifier: self

    predict_one

    Predict the label of a set of features x.

    Parameters

    • x \u2014 'dict'
    • kwargs

    Returns

    base.typing.ClfTarget | None: The predicted label.

    predict_proba_one

    Predict the probability of each label for a dictionary of features x.

    Parameters

    • x \u2014 'dict'

    Returns

    dict[base.typing.ClfTarget, float]: A dictionary that associates a probability which each label.

    1. Course on classification stochastic gradient descent \u21a9

    2. Binary vs. Multi-Class Logistic Regression \u21a9

    "},{"location":"api/linear-model/base/GLM/","title":"GLM","text":"

    Generalized Linear Model.

    This serves as a base class for linear and logistic regression.

    "},{"location":"api/linear-model/base/GLM/#parameters","title":"Parameters","text":"
    • optimizer

      The sequential optimizer used for updating the weights. Note that the intercept updates are handled separately.

    • loss

      The loss function to optimize for.

    • l2

      Amount of L2 regularization used to push weights towards 0. For now, only one type of penalty can be used. The joint use of L1 and L2 is not explicitly supported.

    • l1

      Amount of L1 regularization used to push weights towards 0. For now, only one type of penalty can be used. The joint use of L1 and L2 is not explicitly supported.

    • intercept_init

      Initial intercept value.

    • intercept_lr

      Learning rate scheduler used for updating the intercept. A optim.schedulers.Constant is used if a float is provided. The intercept is not updated when this is set to 0.

    • clip_gradient

      Clips the absolute value of each gradient value.

    • initializer

      Weights initialization scheme.

    "},{"location":"api/linear-model/base/GLM/#attributes","title":"Attributes","text":"
    • weights
    "},{"location":"api/linear-model/base/GLM/#methods","title":"Methods","text":"learn_many learn_one"},{"location":"api/metrics/Accuracy/","title":"Accuracy","text":"

    Accuracy score, which is the percentage of exact matches.

    "},{"location":"api/metrics/Accuracy/#parameters","title":"Parameters","text":"
    • cm

      Type \u2192 confusion.ConfusionMatrix | None

      Default \u2192 None

      This parameter allows sharing the same confusion matrix between multiple metrics. Sharing a confusion matrix reduces the amount of storage and computation time.

    "},{"location":"api/metrics/Accuracy/#attributes","title":"Attributes","text":"
    • bigger_is_better

      Indicate if a high value is better than a low one or not.

    • requires_labels

      Indicates if labels are required, rather than probabilities.

    • works_with_weights

      Indicate whether the model takes into consideration the effect of sample weights

    "},{"location":"api/metrics/Accuracy/#examples","title":"Examples","text":"

    from river import metrics\n\ny_true = [True, False, True, True, True]\ny_pred = [True, True, False, True, True]\n\nmetric = metrics.Accuracy()\nfor yt, yp in zip(y_true, y_pred):\n    metric = metric.update(yt, yp)\n\nmetric\n
    Accuracy: 60.00%\n

    "},{"location":"api/metrics/Accuracy/#methods","title":"Methods","text":"get

    Return the current value of the metric.

    is_better_than

    Indicate if the current metric is better than another one.

    Parameters

    • other

    revert

    Revert the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    update

    Update the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    works_with

    Indicates whether or not a metric can work with a given model.

    Parameters

    • model \u2014 'base.Estimator'

    "},{"location":"api/metrics/AdjustedMutualInfo/","title":"AdjustedMutualInfo","text":"

    Adjusted Mutual Information between two clusterings.

    Adjusted Mutual Information (AMI) is an adjustment of the Mutual Information score that accounts for chance. It corrects the effect of agreement solely due to chance between clusterings, similar to the way the Adjusted Rand Index corrects the Rand Index. It is closely related to variation of information. The adjusted measure, however, is no longer metrical.

    For two clusterings \\(U\\) and \\(V\\), the Adjusted Mutual Information is calculated as:

    \\[ AMI(U, V) = \\frac{MI(U, V) - E(MI(U, V))}{avg(H(U), H(V)) - E(MI(U, V))} \\]

    This metric is independent of the permutation of the class or cluster label values; furthermore, it is also symmetric. This can be useful to measure the agreement of two label assignments strategies on the same dataset, regardless of the ground truth.

    However, due to the complexity of the Expected Mutual Info Score, the computation of this metric is an order of magnitude slower than most other metrics, in general.

    "},{"location":"api/metrics/AdjustedMutualInfo/#parameters","title":"Parameters","text":"
    • cm

      Default \u2192 None

      This parameter allows sharing the same confusion matrix between multiple metrics. Sharing a confusion matrix reduces the amount of storage and computation time.

    • average_method

      Default \u2192 arithmetic

      This parameter defines how to compute the normalizer in the denominator. Possible options include min, max, arithmetic and geometric.

    "},{"location":"api/metrics/AdjustedMutualInfo/#attributes","title":"Attributes","text":"
    • bigger_is_better

      Indicate if a high value is better than a low one or not.

    • requires_labels

      Indicates if labels are required, rather than probabilities.

    • works_with_weights

      Indicate whether the model takes into consideration the effect of sample weights

    "},{"location":"api/metrics/AdjustedMutualInfo/#examples","title":"Examples","text":"

    from river import metrics\n\ny_true = [1, 1, 2, 2, 3, 3]\ny_pred = [1, 1, 1, 2, 2, 2]\n\nmetric = metrics.AdjustedMutualInfo()\nfor yt, yp in zip(y_true, y_pred):\n    print(metric.update(yt, yp).get())\n
    1.0\n1.0\n0.0\n0.0\n0.105891\n0.298792\n

    metric\n
    AdjustedMutualInfo: 0.298792\n

    "},{"location":"api/metrics/AdjustedMutualInfo/#methods","title":"Methods","text":"get

    Return the current value of the metric.

    is_better_than

    Indicate if the current metric is better than another one.

    Parameters

    • other

    revert

    Revert the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    update

    Update the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    works_with

    Indicates whether or not a metric can work with a given model.

    Parameters

    • model \u2014 'base.Estimator'

    1. Wikipedia contributors. (2021, March 17). Mutual information. In Wikipedia, The Free Encyclopedia, from https://en.wikipedia.org/w/index.php?title=Mutual_information&oldid=1012714929\u00a0\u21a9

    "},{"location":"api/metrics/AdjustedRand/","title":"AdjustedRand","text":"

    Adjusted Rand Index.

    The Adjusted Rand Index is the corrected-for-chance version of the Rand Index 1 2. Such a correction for chance establishes a baseline by using the expected similarity of all pair-wise comparisions between clusterings specified by a random model.

    Traditionally, the Rand Index was corrected using the Permutation Model for Clustering. However, the premises of the permutation model are frequently violated; in many clustering scenarios, either the number of clusters or the size distribution of those clusters vary drastically. Variations of the adjusted Rand Index account for different models of random clusterings.

    Though the Rand Index may only yield a value between 0 and 1, the Adjusted Rand index can yield negative values if the index is less than the expected index.

    "},{"location":"api/metrics/AdjustedRand/#parameters","title":"Parameters","text":"
    • cm

      Type \u2192 confusion.ConfusionMatrix | None

      Default \u2192 None

      This parameter allows sharing the same confusion matrix between multiple metrics. Sharing a confusion matrix reduces the amount of storage and computation time.

    "},{"location":"api/metrics/AdjustedRand/#attributes","title":"Attributes","text":"
    • bigger_is_better

      Indicate if a high value is better than a low one or not.

    • requires_labels

      Indicates if labels are required, rather than probabilities.

    • works_with_weights

      Indicate whether the model takes into consideration the effect of sample weights

    "},{"location":"api/metrics/AdjustedRand/#examples","title":"Examples","text":"

    from river import metrics\n\ny_true = [0, 0, 0, 1, 1, 1]\ny_pred = [0, 0, 1, 1, 2, 2]\n\nmetric = metrics.AdjustedRand()\n\nfor yt, yp in zip(y_true, y_pred):\n    print(metric.update(yt, yp).get())\n
    1.0\n1.0\n0.0\n0.0\n0.09090909090909091\n0.24242424242424243\n

    metric\n
    AdjustedRand: 0.242424\n

    "},{"location":"api/metrics/AdjustedRand/#methods","title":"Methods","text":"get

    Return the current value of the metric.

    is_better_than

    Indicate if the current metric is better than another one.

    Parameters

    • other

    revert

    Revert the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    update

    Update the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    works_with

    Indicates whether or not a metric can work with a given model.

    Parameters

    • model \u2014 'base.Estimator'

    1. Wikipedia contributors. (2021, January 13). Rand index. In Wikipedia, The Free Encyclopedia, from https://en.wikipedia.org/w/index.php?title=Rand_index&oldid=1000098911\u00a0\u21a9

    2. W. M. Rand (1971). \"Objective criteria for the evaluation of clustering methods\". Journal of the American Statistical Association. American Statistical Association. 66 (336): 846\u2013850. arXiv:1704.01036. doi:10.2307/2284239. JSTOR 2284239.\u00a0\u21a9

    "},{"location":"api/metrics/BalancedAccuracy/","title":"BalancedAccuracy","text":"

    Balanced accuracy.

    Balanced accuracy is the average of recall obtained on each class. It is used to deal with imbalanced datasets in binary and multi-class classification problems.

    "},{"location":"api/metrics/BalancedAccuracy/#parameters","title":"Parameters","text":"
    • cm

      Type \u2192 confusion.ConfusionMatrix | None

      Default \u2192 None

      This parameter allows sharing the same confusion matrix between multiple metrics. Sharing a confusion matrix reduces the amount of storage and computation time.

    "},{"location":"api/metrics/BalancedAccuracy/#attributes","title":"Attributes","text":"
    • bigger_is_better

      Indicate if a high value is better than a low one or not.

    • requires_labels

      Indicates if labels are required, rather than probabilities.

    • works_with_weights

      Indicate whether the model takes into consideration the effect of sample weights

    "},{"location":"api/metrics/BalancedAccuracy/#examples","title":"Examples","text":"

    from river import metrics\ny_true = [True, False, True, True, False, True]\ny_pred = [True, False, True, True, True, False]\n\nmetric = metrics.BalancedAccuracy()\nfor yt, yp in zip(y_true, y_pred):\n    metric = metric.update(yt, yp)\n\nmetric\n
    BalancedAccuracy: 62.50%\n

    y_true = [0, 1, 0, 0, 1, 0]\ny_pred = [0, 1, 0, 0, 0, 1]\nmetric = metrics.BalancedAccuracy()\nfor yt, yp in zip(y_true, y_pred):\n    metric = metric.update(yt, yp)\n\nmetric\n
    BalancedAccuracy: 62.50%\n

    "},{"location":"api/metrics/BalancedAccuracy/#methods","title":"Methods","text":"get

    Return the current value of the metric.

    is_better_than

    Indicate if the current metric is better than another one.

    Parameters

    • other

    revert

    Revert the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    update

    Update the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    works_with

    Indicates whether or not a metric can work with a given model.

    Parameters

    • model \u2014 'base.Estimator'

    "},{"location":"api/metrics/ClassificationReport/","title":"ClassificationReport","text":"

    A report for monitoring a classifier.

    This class maintains a set of metrics and updates each of them every time update is called. You can print this class at any time during a model's lifetime to get a tabular visualization of various metrics.

    You can wrap a metrics.ClassificationReport with utils.Rolling in order to obtain a classification report over a window of observations. You can also wrap it with utils.TimeRolling to obtain a report over a period of time.

    "},{"location":"api/metrics/ClassificationReport/#parameters","title":"Parameters","text":"
    • decimals

      Default \u2192 2

      The number of decimals to display in each cell.

    • cm

      Default \u2192 None

      This parameter allows sharing the same confusion matrix between multiple metrics. Sharing a confusion matrix reduces the amount of storage and computation time.

    "},{"location":"api/metrics/ClassificationReport/#attributes","title":"Attributes","text":"
    • bigger_is_better

      Indicate if a high value is better than a low one or not.

    • requires_labels

      Indicates if labels are required, rather than probabilities.

    • works_with_weights

      Indicate whether the model takes into consideration the effect of sample weights

    "},{"location":"api/metrics/ClassificationReport/#examples","title":"Examples","text":"

    from river import metrics\n\ny_true = ['pear', 'apple', 'banana', 'banana', 'banana']\ny_pred = ['apple', 'pear', 'banana', 'banana', 'apple']\n\nreport = metrics.ClassificationReport()\n\nfor yt, yp in zip(y_true, y_pred):\n    report = report.update(yt, yp)\n\nprint(report)\n
                   Precision   Recall   F1       Support\n<BLANKLINE>\n   apple       0.00%    0.00%    0.00%         1\n  banana     100.00%   66.67%   80.00%         3\n    pear       0.00%    0.00%    0.00%         1\n<BLANKLINE>\n   Macro      33.33%   22.22%   26.67%\n   Micro      40.00%   40.00%   40.00%\nWeighted      60.00%   40.00%   48.00%\n<BLANKLINE>\n                 40.00% accuracy\n

    "},{"location":"api/metrics/ClassificationReport/#methods","title":"Methods","text":"get

    Return the current value of the metric.

    is_better_than

    Indicate if the current metric is better than another one.

    Parameters

    • other

    revert

    Revert the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    update

    Update the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    works_with

    Indicates whether or not a metric can work with a given model.

    Parameters

    • model \u2014 'base.Estimator'

    "},{"location":"api/metrics/CohenKappa/","title":"CohenKappa","text":"

    Cohen's Kappa score.

    Cohen's Kappa expresses the level of agreement between two annotators on a classification problem. It is defined as

    \\[ \\kappa = (p_o - p_e) / (1 - p_e) \\]

    where \\(p_o\\) is the empirical probability of agreement on the label assigned to any sample (prequential accuracy), and \\(p_e\\) is the expected agreement when both annotators assign labels randomly.

    "},{"location":"api/metrics/CohenKappa/#parameters","title":"Parameters","text":"
    • cm

      Type \u2192 confusion.ConfusionMatrix | None

      Default \u2192 None

      This parameter allows sharing the same confusion matrix between multiple metrics. Sharing a confusion matrix reduces the amount of storage and computation time.

    "},{"location":"api/metrics/CohenKappa/#attributes","title":"Attributes","text":"
    • bigger_is_better

      Indicate if a high value is better than a low one or not.

    • requires_labels

      Indicates if labels are required, rather than probabilities.

    • works_with_weights

      Indicate whether the model takes into consideration the effect of sample weights

    "},{"location":"api/metrics/CohenKappa/#examples","title":"Examples","text":"

    from river import metrics\n\ny_true = ['cat', 'ant', 'cat', 'cat', 'ant', 'bird']\ny_pred = ['ant', 'ant', 'cat', 'cat', 'ant', 'cat']\n\nmetric = metrics.CohenKappa()\n\nfor yt, yp in zip(y_true, y_pred):\n    metric = metric.update(yt, yp)\n\nmetric\n
    CohenKappa: 42.86%\n

    "},{"location":"api/metrics/CohenKappa/#methods","title":"Methods","text":"get

    Return the current value of the metric.

    is_better_than

    Indicate if the current metric is better than another one.

    Parameters

    • other

    revert

    Revert the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    update

    Update the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    works_with

    Indicates whether or not a metric can work with a given model.

    Parameters

    • model \u2014 'base.Estimator'

    1. J. Cohen (1960). \"A coefficient of agreement for nominal scales\". Educational and Psychological Measurement 20(1):37-46. doi:10.1177/001316446002000104.\u00a0\u21a9

    "},{"location":"api/metrics/Completeness/","title":"Completeness","text":"

    Completeness Score.

    Completeness 1 is symmetrical to homogeneity. In order to satisfy the completeness criteria, a clustering must assign all of those datapoints that are members of a single class to a single cluster. To evaluate completeness, we examine the distribution cluster assignments within each class. In a perfectly complete clustering solution, each of these distributions will be completely skewed to a single cluster.

    We can evaluate this degree of skew by calculating the conditional entropy of the proposed cluster distribution given the class of the component data points. However, in the worst case scenario, each class is represented by every cluster with a distribution equal to the distribution of cluster sizes. Therefore, symmetric to the claculation above, we define completeness as:

    \\[ c = \\begin{cases} 1 if H(K) = 0, \\\\ 1 - \\frac{H(K|C)}{H(K)} otherwise. \\end{cases}. \\]"},{"location":"api/metrics/Completeness/#parameters","title":"Parameters","text":"
    • cm

      Type \u2192 confusion.ConfusionMatrix | None

      Default \u2192 None

      This parameter allows sharing the same confusion matrix between multiple metrics. Sharing a confusion matrix reduces the amount of storage and computation time.

    "},{"location":"api/metrics/Completeness/#attributes","title":"Attributes","text":"
    • bigger_is_better

      Indicate if a high value is better than a low one or not.

    • requires_labels

      Indicates if labels are required, rather than probabilities.

    • works_with_weights

      Indicate whether the model takes into consideration the effect of sample weights

    "},{"location":"api/metrics/Completeness/#examples","title":"Examples","text":"

    from river import metrics\n\ny_true = [1, 1, 2, 2, 3, 3]\ny_pred = [1, 1, 1, 2, 2, 2]\n\nmetric = metrics.Completeness()\nfor yt, yp in zip(y_true, y_pred):\n    print(metric.update(yt, yp).get())\n
    1.0\n1.0\n1.0\n0.3836885465963443\n0.5880325916843805\n0.6666666666666667\n

    metric\n
    Completeness: 66.67%\n

    "},{"location":"api/metrics/Completeness/#methods","title":"Methods","text":"get

    Return the current value of the metric.

    is_better_than

    Indicate if the current metric is better than another one.

    Parameters

    • other

    revert

    Revert the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    update

    Update the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    works_with

    Indicates whether or not a metric can work with a given model.

    Parameters

    • model \u2014 'base.Estimator'

    1. Andrew Rosenberg and Julia Hirschberg (2007). V-Measure: A conditional entropy-based external cluster evaluation measure. Proceedings of the 2007 Joing Conference on Empirical Methods in Natural Language Processing and Computational Natural Language Learning, pp. 410 - 420, Prague, June 2007.\u00a0\u21a9

    "},{"location":"api/metrics/ConfusionMatrix/","title":"ConfusionMatrix","text":"

    Confusion Matrix for binary and multi-class classification.

    "},{"location":"api/metrics/ConfusionMatrix/#parameters","title":"Parameters","text":"
    • classes

      Default \u2192 None

      The initial set of classes. This is optional and serves only for displaying purposes.

    "},{"location":"api/metrics/ConfusionMatrix/#attributes","title":"Attributes","text":"
    • classes

    • total_false_negatives

    • total_false_positives

    • total_true_negatives

    • total_true_positives

    "},{"location":"api/metrics/ConfusionMatrix/#examples","title":"Examples","text":"

    from river import metrics\n\ny_true = ['cat', 'ant', 'cat', 'cat', 'ant', 'bird']\ny_pred = ['ant', 'ant', 'cat', 'cat', 'ant', 'cat']\n\ncm = metrics.ConfusionMatrix()\n\nfor yt, yp in zip(y_true, y_pred):\n    cm = cm.update(yt, yp)\n\ncm\n
           ant  bird   cat\n ant     2     0     0\nbird     0     0     1\n cat     1     0     2\n

    cm['bird']['cat']\n
    1.0\n

    "},{"location":"api/metrics/ConfusionMatrix/#methods","title":"Methods","text":"false_negatives false_positives revert support true_negatives true_positives update"},{"location":"api/metrics/ConfusionMatrix/#notes","title":"Notes","text":"

    This confusion matrix is a 2D matrix of shape (n_classes, n_classes), corresponding to a single-target (binary and multi-class) classification task.

    Each row represents true (actual) class-labels, while each column corresponds to the predicted class-labels. For example, an entry in position [1, 2] means that the true class-label is 1, and the predicted class-label is 2 (incorrect prediction).

    This structure is used to keep updated statistics about a single-output classifier's performance and to compute multiple evaluation metrics.

    "},{"location":"api/metrics/CrossEntropy/","title":"CrossEntropy","text":"

    Multiclass generalization of the logarithmic loss.

    "},{"location":"api/metrics/CrossEntropy/#attributes","title":"Attributes","text":"
    • bigger_is_better

      Indicate if a high value is better than a low one or not.

    • requires_labels

      Indicates if labels are required, rather than probabilities.

    • works_with_weights

      Indicate whether the model takes into consideration the effect of sample weights

    "},{"location":"api/metrics/CrossEntropy/#examples","title":"Examples","text":"

    from river import metrics\n\ny_true = [0, 1, 2, 2]\ny_pred = [\n    {0: 0.29450637, 1: 0.34216758, 2: 0.36332605},\n    {0: 0.21290077, 1: 0.32728332, 2: 0.45981591},\n    {0: 0.42860913, 1: 0.33380113, 2: 0.23758974},\n    {0: 0.44941979, 1: 0.32962558, 2: 0.22095463}\n]\n\nmetric = metrics.CrossEntropy()\n\nfor yt, yp in zip(y_true, y_pred):\n    metric = metric.update(yt, yp)\n    print(metric.get())\n
    1.222454\n1.169691\n1.258864\n1.321597\n

    metric\n
    CrossEntropy: 1.321598\n

    "},{"location":"api/metrics/CrossEntropy/#methods","title":"Methods","text":"get

    Return the current value of the metric.

    is_better_than

    Indicate if the current metric is better than another one.

    Parameters

    • other

    revert

    Revert the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    update

    Update the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    works_with

    Indicates whether or not a metric can work with a given model.

    Parameters

    • model

    "},{"location":"api/metrics/F1/","title":"F1","text":"

    Binary F1 score.

    "},{"location":"api/metrics/F1/#parameters","title":"Parameters","text":"
    • cm

      Default \u2192 None

      This parameter allows sharing the same confusion matrix between multiple metrics. Sharing a confusion matrix reduces the amount of storage and computation time.

    • pos_val

      Default \u2192 True

      Value to treat as \"positive\".

    "},{"location":"api/metrics/F1/#attributes","title":"Attributes","text":"
    • bigger_is_better

      Indicate if a high value is better than a low one or not.

    • requires_labels

      Indicates if labels are required, rather than probabilities.

    • works_with_weights

      Indicate whether the model takes into consideration the effect of sample weights

    "},{"location":"api/metrics/F1/#examples","title":"Examples","text":"

    from river import metrics\n\ny_true = [False, False, False, True, True, True]\ny_pred = [False, False, True, True, False, False]\n\nmetric = metrics.F1()\n\nfor yt, yp in zip(y_true, y_pred):\n    metric = metric.update(yt, yp)\n\nmetric\n
    F1: 40.00%\n

    "},{"location":"api/metrics/F1/#methods","title":"Methods","text":"get

    Return the current value of the metric.

    is_better_than

    Indicate if the current metric is better than another one.

    Parameters

    • other

    revert

    Revert the metric.

    Parameters

    • y_true \u2014 'bool'
    • y_pred \u2014 'bool | float | dict[bool, float]'
    • sample_weight \u2014 defaults to 1.0

    update

    Update the metric.

    Parameters

    • y_true \u2014 'bool'
    • y_pred \u2014 'bool | float | dict[bool, float]'
    • sample_weight \u2014 defaults to 1.0

    works_with

    Indicates whether or not a metric can work with a given model.

    Parameters

    • model \u2014 'base.Estimator'

    "},{"location":"api/metrics/FBeta/","title":"FBeta","text":"

    Binary F-Beta score.

    The FBeta score is a weighted harmonic mean between precision and recall. The higher the beta value, the higher the recall will be taken into account. When beta equals 1, precision and recall and equivalently weighted, which results in the F1 score (see metrics.F1).

    "},{"location":"api/metrics/FBeta/#parameters","title":"Parameters","text":"
    • beta

      Type \u2192 float

      Weight of precision in the harmonic mean.

    • cm

      Default \u2192 None

      This parameter allows sharing the same confusion matrix between multiple metrics. Sharing a confusion matrix reduces the amount of storage and computation time.

    • pos_val

      Default \u2192 True

      Value to treat as \"positive\".

    "},{"location":"api/metrics/FBeta/#attributes","title":"Attributes","text":"
    • precision (metrics.Precision)

    • recall (metrics.Recall)

    "},{"location":"api/metrics/FBeta/#examples","title":"Examples","text":"

    from river import metrics\n\ny_true = [False, False, False, True, True, True]\ny_pred = [False, False, True, True, False, False]\n\nmetric = metrics.FBeta(beta=2)\nfor yt, yp in zip(y_true, y_pred):\n    metric = metric.update(yt, yp)\n\nmetric\n
    FBeta: 35.71%\n

    "},{"location":"api/metrics/FBeta/#methods","title":"Methods","text":"get

    Return the current value of the metric.

    is_better_than

    Indicate if the current metric is better than another one.

    Parameters

    • other

    revert

    Revert the metric.

    Parameters

    • y_true \u2014 'bool'
    • y_pred \u2014 'bool | float | dict[bool, float]'
    • sample_weight \u2014 defaults to 1.0

    update

    Update the metric.

    Parameters

    • y_true \u2014 'bool'
    • y_pred \u2014 'bool | float | dict[bool, float]'
    • sample_weight \u2014 defaults to 1.0

    works_with

    Indicates whether or not a metric can work with a given model.

    Parameters

    • model \u2014 'base.Estimator'

    "},{"location":"api/metrics/FowlkesMallows/","title":"FowlkesMallows","text":"

    Fowlkes-Mallows Index.

    The Fowlkes-Mallows Index 1 2 is an external evaluation method that is used to determine the similarity between two clusterings, and also a metric to measure confusion matrices. The measure of similarity could be either between two hierarchical clusterings or a clustering and a benchmark classification. A higher value for the Fowlkes-Mallows index indicates a greater similarity between the clusters and the benchmark classifications.

    The Fowlkes-Mallows Index, for two cluster algorithms, is defined as:

    \\[ FM = \\sqrt{PPV \\times TPR} = \\sqrt{\\frac{TP}{TP+FP} \\times \\frac{TP}{TP+FN}} \\]

    where

    • TP, FP, FN are respectively the number of true positives, false positives and false negatives;

    • TPR is the True Positive Rate (or Sensitivity/Recall), PPV is the Positive Predictive Rate (or Precision).

    "},{"location":"api/metrics/FowlkesMallows/#parameters","title":"Parameters","text":"
    • cm

      Type \u2192 confusion.ConfusionMatrix | None

      Default \u2192 None

      This parameter allows sharing the same confusion matrix between multiple metrics. Sharing a confusion matrix reduces the amount of storage and computation time.

    "},{"location":"api/metrics/FowlkesMallows/#attributes","title":"Attributes","text":"
    • bigger_is_better

      Indicate if a high value is better than a low one or not.

    • requires_labels

      Indicates if labels are required, rather than probabilities.

    • works_with_weights

      Indicate whether the model takes into consideration the effect of sample weights

    "},{"location":"api/metrics/FowlkesMallows/#examples","title":"Examples","text":"

    from river import metrics\n\ny_true = [0, 0, 0, 1, 1, 1]\ny_pred = [0, 0, 1, 1, 2, 2]\n\nmetric = metrics.FowlkesMallows()\n\nfor yt, yp in zip(y_true, y_pred):\n    print(metric.update(yt, yp))\n
    FowlkesMallows: 0.00%\nFowlkesMallows: 100.00%\nFowlkesMallows: 57.74%\nFowlkesMallows: 40.82%\nFowlkesMallows: 35.36%\nFowlkesMallows: 47.14%\n

    "},{"location":"api/metrics/FowlkesMallows/#methods","title":"Methods","text":"get

    Return the current value of the metric.

    is_better_than

    Indicate if the current metric is better than another one.

    Parameters

    • other

    revert

    Revert the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    update

    Update the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    works_with

    Indicates whether or not a metric can work with a given model.

    Parameters

    • model \u2014 'base.Estimator'

    1. Wikipedia contributors. (2020, December 22). Fowlkes\u2013Mallows index. In Wikipedia, The Free Encyclopedia, from https://en.wikipedia.org/w/index.php?title=Fowlkes%E2%80%93Mallows_index&oldid=995714222\u00a0\u21a9

    2. E. B. Fowkles and C. L. Mallows (1983). \u201cA method for comparing two hierarchical clusterings\u201d. Journal of the American Statistical Association\u00a0\u21a9

    "},{"location":"api/metrics/GeometricMean/","title":"GeometricMean","text":"

    Geometric mean score.

    The geometric mean is a good indicator of a classifier's performance in the presence of class imbalance because it is independent of the distribution of examples between classes. This implementation computes the geometric mean of class-wise sensitivity (recall).

    \\[ gm = \\sqrt[n]{s_1\\cdot s_2\\cdot s_3\\cdot \\ldots\\cdot s_n} \\]

    where \\(s_i\\) is the sensitivity (recall) of class \\(i\\) and \\(n\\) is the number of classes.

    "},{"location":"api/metrics/GeometricMean/#parameters","title":"Parameters","text":"
    • cm

      Type \u2192 confusion.ConfusionMatrix | None

      Default \u2192 None

      This parameter allows sharing the same confusion matrix between multiple metrics. Sharing a confusion matrix reduces the amount of storage and computation time.

    "},{"location":"api/metrics/GeometricMean/#attributes","title":"Attributes","text":"
    • bigger_is_better

      Indicate if a high value is better than a low one or not.

    • requires_labels

      Indicates if labels are required, rather than probabilities.

    • works_with_weights

      Indicate whether the model takes into consideration the effect of sample weights

    "},{"location":"api/metrics/GeometricMean/#examples","title":"Examples","text":"

    from river import metrics\n\ny_true = ['cat', 'ant', 'cat', 'cat', 'ant', 'bird', 'bird']\ny_pred = ['ant', 'ant', 'cat', 'cat', 'ant', 'cat', 'bird']\n\nmetric = metrics.GeometricMean()\n\nfor yt, yp in zip(y_true, y_pred):\n    metric = metric.update(yt, yp)\n\nmetric\n
    GeometricMean: 69.34%\n

    "},{"location":"api/metrics/GeometricMean/#methods","title":"Methods","text":"get

    Return the current value of the metric.

    is_better_than

    Indicate if the current metric is better than another one.

    Parameters

    • other

    revert

    Revert the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    update

    Update the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    works_with

    Indicates whether or not a metric can work with a given model.

    Parameters

    • model \u2014 'base.Estimator'

    1. Barandela, R. et al. \u201cStrategies for learning in class imbalance problems\u201d, Pattern Recognition, 36(3), (2003), pp 849-851.\u00a0\u21a9

    "},{"location":"api/metrics/Homogeneity/","title":"Homogeneity","text":"

    Homogeneity Score.

    Homogeneity metric 1 of a cluster labeling given a ground truth.

    In order to satisfy the homogeneity criteria, a clustering must assign only those data points that are members of a single class to a single cluster. That is, the class distribution within each cluster should be skewed to a single class, that is, zero entropy. We determine how close a given clustering is to this ideal by examining the conditional entropy of the class distribution given the proposed clustering.

    However, in an imperfect situation, the size of this value is dependent on the size of the dataset and the distribution of class sizes. Therefore, instead of taking the raw conditional entropy, we normalize by the maximum reduction in entropy the clustering information could provide.

    As such, we define homogeneity as:

    \\[ h = \\begin{cases} 1 if H(C) = 0, \\\\ 1 - \\frac{H(C|K)}{H(C)} otherwise. \\end{cases}. \\]"},{"location":"api/metrics/Homogeneity/#parameters","title":"Parameters","text":"
    • cm

      Type \u2192 confusion.ConfusionMatrix | None

      Default \u2192 None

      This parameter allows sharing the same confusion matrix between multiple metrics. Sharing a confusion matrix reduces the amount of storage and computation time.

    "},{"location":"api/metrics/Homogeneity/#attributes","title":"Attributes","text":"
    • bigger_is_better

      Indicate if a high value is better than a low one or not.

    • requires_labels

      Indicates if labels are required, rather than probabilities.

    • works_with_weights

      Indicate whether the model takes into consideration the effect of sample weights

    "},{"location":"api/metrics/Homogeneity/#examples","title":"Examples","text":"

    from river import metrics\n\ny_true = [1, 1, 2, 2, 3, 3]\ny_pred = [1, 1, 1, 2, 2, 2]\n\nmetric = metrics.Homogeneity()\nfor yt, yp in zip(y_true, y_pred):\n    print(metric.update(yt, yp).get())\n
    1.0\n1.0\n0.0\n0.311278\n0.37515\n0.42062\n

    metric\n
    Homogeneity: 42.06%\n

    "},{"location":"api/metrics/Homogeneity/#methods","title":"Methods","text":"get

    Return the current value of the metric.

    is_better_than

    Indicate if the current metric is better than another one.

    Parameters

    • other

    revert

    Revert the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    update

    Update the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    works_with

    Indicates whether or not a metric can work with a given model.

    Parameters

    • model \u2014 'base.Estimator'

    1. Andrew Rosenberg and Julia Hirschberg (2007). V-Measure: A conditional entropy-based external cluster evaluation measure. Proceedings of the 2007 Joing Conference on Empirical Methods in Natural Language Processing and Computational Natural Language Learning, pp. 410 - 420, Prague, June 2007.\u00a0\u21a9

    "},{"location":"api/metrics/Jaccard/","title":"Jaccard","text":"

    Jaccard score.

    "},{"location":"api/metrics/Jaccard/#parameters","title":"Parameters","text":"
    • cm

      Default \u2192 None

      This parameter allows sharing the same confusion matrix between multiple metrics. Sharing a confusion matrix reduces the amount of storage and computation time.

    • pos_val

      Default \u2192 True

      Value to treat as \"positive\".

    "},{"location":"api/metrics/Jaccard/#attributes","title":"Attributes","text":"
    • bigger_is_better

      Indicate if a high value is better than a low one or not.

    • requires_labels

      Indicates if labels are required, rather than probabilities.

    • works_with_weights

      Indicate whether the model takes into consideration the effect of sample weights

    "},{"location":"api/metrics/Jaccard/#examples","title":"Examples","text":"

    from river import metrics\n\ny_true = [False, True, True]\ny_pred = [True, True, True]\n\nmetric = metrics.Jaccard()\n\nfor yt, yp in zip(y_true, y_pred):\n    print(metric.update(yt, yp))\n
    Jaccard: 0.00%\nJaccard: 50.00%\nJaccard: 66.67%\n

    "},{"location":"api/metrics/Jaccard/#methods","title":"Methods","text":"get

    Return the current value of the metric.

    is_better_than

    Indicate if the current metric is better than another one.

    Parameters

    • other

    revert

    Revert the metric.

    Parameters

    • y_true \u2014 'bool'
    • y_pred \u2014 'bool | float | dict[bool, float]'
    • sample_weight \u2014 defaults to 1.0

    update

    Update the metric.

    Parameters

    • y_true \u2014 'bool'
    • y_pred \u2014 'bool | float | dict[bool, float]'
    • sample_weight \u2014 defaults to 1.0

    works_with

    Indicates whether or not a metric can work with a given model.

    Parameters

    • model \u2014 'base.Estimator'

    1. Jaccard index \u21a9

    "},{"location":"api/metrics/LogLoss/","title":"LogLoss","text":"

    Binary logarithmic loss.

    "},{"location":"api/metrics/LogLoss/#attributes","title":"Attributes","text":"
    • bigger_is_better

      Indicate if a high value is better than a low one or not.

    • requires_labels

      Indicates if labels are required, rather than probabilities.

    • works_with_weights

      Indicate whether the model takes into consideration the effect of sample weights

    "},{"location":"api/metrics/LogLoss/#examples","title":"Examples","text":"

    from river import metrics\n\ny_true = [True, False, False, True]\ny_pred = [0.9,  0.1,   0.2,   0.65]\n\nmetric = metrics.LogLoss()\nfor yt, yp in zip(y_true, y_pred):\n    metric = metric.update(yt, yp)\n    print(metric.get())\n
    0.105360\n0.105360\n0.144621\n0.216161\n

    metric\n
    LogLoss: 0.216162\n

    "},{"location":"api/metrics/LogLoss/#methods","title":"Methods","text":"get

    Return the current value of the metric.

    is_better_than

    Indicate if the current metric is better than another one.

    Parameters

    • other

    revert

    Revert the metric.

    Parameters

    • y_true \u2014 'bool'
    • y_pred \u2014 'bool | float | dict[bool, float]'
    • sample_weight \u2014 defaults to 1.0

    update

    Update the metric.

    Parameters

    • y_true \u2014 'bool'
    • y_pred \u2014 'bool | float | dict[bool, float]'
    • sample_weight \u2014 defaults to 1.0

    works_with

    Indicates whether or not a metric can work with a given model.

    Parameters

    • model

    "},{"location":"api/metrics/MAE/","title":"MAE","text":"

    Mean absolute error.

    "},{"location":"api/metrics/MAE/#attributes","title":"Attributes","text":"
    • bigger_is_better

      Indicate if a high value is better than a low one or not.

    • works_with_weights

      Indicate whether the model takes into consideration the effect of sample weights

    "},{"location":"api/metrics/MAE/#examples","title":"Examples","text":"

    from river import metrics\n\ny_true = [3, -0.5, 2, 7]\ny_pred = [2.5, 0.0, 2, 8]\n\nmetric = metrics.MAE()\n\nfor yt, yp in zip(y_true, y_pred):\n    print(metric.update(yt, yp).get())\n
    0.5\n0.5\n0.333\n0.5\n

    metric\n
    MAE: 0.5\n

    "},{"location":"api/metrics/MAE/#methods","title":"Methods","text":"get

    Return the current value of the metric.

    is_better_than

    Indicate if the current metric is better than another one.

    Parameters

    • other

    revert

    Revert the metric.

    Parameters

    • y_true \u2014 'numbers.Number'
    • y_pred \u2014 'numbers.Number'
    • sample_weight \u2014 defaults to 1.0

    update

    Update the metric.

    Parameters

    • y_true \u2014 'numbers.Number'
    • y_pred \u2014 'numbers.Number'
    • sample_weight \u2014 defaults to 1.0

    works_with

    Indicates whether or not a metric can work with a given model.

    Parameters

    • model

    "},{"location":"api/metrics/MAPE/","title":"MAPE","text":"

    Mean absolute percentage error.

    "},{"location":"api/metrics/MAPE/#attributes","title":"Attributes","text":"
    • bigger_is_better

      Indicate if a high value is better than a low one or not.

    • works_with_weights

      Indicate whether the model takes into consideration the effect of sample weights

    "},{"location":"api/metrics/MAPE/#examples","title":"Examples","text":"

    from river import metrics\n\ny_true = [3, -0.5, 2, 7]\ny_pred = [2.5, 0.0, 2, 8]\n\nmetric = metrics.MAPE()\nfor yt, yp in zip(y_true, y_pred):\n    metric = metric.update(yt, yp)\n\nmetric\n
    MAPE: 32.738095\n

    "},{"location":"api/metrics/MAPE/#methods","title":"Methods","text":"get

    Return the current value of the metric.

    is_better_than

    Indicate if the current metric is better than another one.

    Parameters

    • other

    revert

    Revert the metric.

    Parameters

    • y_true \u2014 'numbers.Number'
    • y_pred \u2014 'numbers.Number'
    • sample_weight \u2014 defaults to 1.0

    update

    Update the metric.

    Parameters

    • y_true \u2014 'numbers.Number'
    • y_pred \u2014 'numbers.Number'
    • sample_weight \u2014 defaults to 1.0

    works_with

    Indicates whether or not a metric can work with a given model.

    Parameters

    • model

    "},{"location":"api/metrics/MCC/","title":"MCC","text":"

    Matthews correlation coefficient.

    "},{"location":"api/metrics/MCC/#parameters","title":"Parameters","text":"
    • cm

      Default \u2192 None

      This parameter allows sharing the same confusion matrix between multiple metrics. Sharing a confusion matrix reduces the amount of storage and computation time.

    • pos_val

      Default \u2192 True

      Value to treat as \"positive\".

    "},{"location":"api/metrics/MCC/#attributes","title":"Attributes","text":"
    • bigger_is_better

      Indicate if a high value is better than a low one or not.

    • requires_labels

      Indicates if labels are required, rather than probabilities.

    • works_with_weights

      Indicate whether the model takes into consideration the effect of sample weights

    "},{"location":"api/metrics/MCC/#examples","title":"Examples","text":"

    from river import metrics\n\ny_true = [True, True, True, False]\ny_pred = [True, False, True, True]\n\nmcc = metrics.MCC()\n\nfor yt, yp in zip(y_true, y_pred):\n    mcc = mcc.update(yt, yp)\n\nmcc\n
    MCC: -0.333333\n

    "},{"location":"api/metrics/MCC/#methods","title":"Methods","text":"get

    Return the current value of the metric.

    is_better_than

    Indicate if the current metric is better than another one.

    Parameters

    • other

    revert

    Revert the metric.

    Parameters

    • y_true \u2014 'bool'
    • y_pred \u2014 'bool | float | dict[bool, float]'
    • sample_weight \u2014 defaults to 1.0

    update

    Update the metric.

    Parameters

    • y_true \u2014 'bool'
    • y_pred \u2014 'bool | float | dict[bool, float]'
    • sample_weight \u2014 defaults to 1.0

    works_with

    Indicates whether or not a metric can work with a given model.

    Parameters

    • model \u2014 'base.Estimator'

    1. Wikipedia article \u21a9

    "},{"location":"api/metrics/MSE/","title":"MSE","text":"

    Mean squared error.

    "},{"location":"api/metrics/MSE/#attributes","title":"Attributes","text":"
    • bigger_is_better

      Indicate if a high value is better than a low one or not.

    • works_with_weights

      Indicate whether the model takes into consideration the effect of sample weights

    "},{"location":"api/metrics/MSE/#examples","title":"Examples","text":"

    from river import metrics\n\ny_true = [3, -0.5, 2, 7]\ny_pred = [2.5, 0.0, 2, 8]\n\nmetric = metrics.MSE()\n\nfor yt, yp in zip(y_true, y_pred):\n    print(metric.update(yt, yp).get())\n
    0.25\n0.25\n0.1666\n0.375\n

    "},{"location":"api/metrics/MSE/#methods","title":"Methods","text":"get

    Return the current value of the metric.

    is_better_than

    Indicate if the current metric is better than another one.

    Parameters

    • other

    revert

    Revert the metric.

    Parameters

    • y_true \u2014 'numbers.Number'
    • y_pred \u2014 'numbers.Number'
    • sample_weight \u2014 defaults to 1.0

    update

    Update the metric.

    Parameters

    • y_true \u2014 'numbers.Number'
    • y_pred \u2014 'numbers.Number'
    • sample_weight \u2014 defaults to 1.0

    works_with

    Indicates whether or not a metric can work with a given model.

    Parameters

    • model

    "},{"location":"api/metrics/MacroF1/","title":"MacroF1","text":"

    Macro-average F1 score.

    This works by computing the F1 score per class, and then performs a global average.

    "},{"location":"api/metrics/MacroF1/#parameters","title":"Parameters","text":"
    • cm

      Default \u2192 None

      This parameter allows sharing the same confusion matrix between multiple metrics. Sharing a confusion matrix reduces the amount of storage and computation time.

    "},{"location":"api/metrics/MacroF1/#attributes","title":"Attributes","text":"
    • bigger_is_better

      Indicate if a high value is better than a low one or not.

    • requires_labels

      Indicates if labels are required, rather than probabilities.

    • works_with_weights

      Indicate whether the model takes into consideration the effect of sample weights

    "},{"location":"api/metrics/MacroF1/#examples","title":"Examples","text":"

    from river import metrics\n\ny_true = [0, 1, 2, 2, 2]\ny_pred = [0, 0, 2, 2, 1]\n\nmetric = metrics.MacroF1()\n\nfor yt, yp in zip(y_true, y_pred):\n    print(metric.update(yt, yp))\n
    MacroF1: 100.00%\nMacroF1: 33.33%\nMacroF1: 55.56%\nMacroF1: 55.56%\nMacroF1: 48.89%\n

    "},{"location":"api/metrics/MacroF1/#methods","title":"Methods","text":"get

    Return the current value of the metric.

    is_better_than

    Indicate if the current metric is better than another one.

    Parameters

    • other

    revert

    Revert the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    update

    Update the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    works_with

    Indicates whether or not a metric can work with a given model.

    Parameters

    • model \u2014 'base.Estimator'

    "},{"location":"api/metrics/MacroFBeta/","title":"MacroFBeta","text":"

    Macro-average F-Beta score.

    This works by computing the F-Beta score per class, and then performs a global average.

    "},{"location":"api/metrics/MacroFBeta/#parameters","title":"Parameters","text":"
    • beta

      Weight of precision in harmonic mean.

    • cm

      Default \u2192 None

      This parameter allows sharing the same confusion matrix between multiple metrics. Sharing a confusion matrix reduces the amount of storage and computation time.

    "},{"location":"api/metrics/MacroFBeta/#attributes","title":"Attributes","text":"
    • bigger_is_better

      Indicate if a high value is better than a low one or not.

    • requires_labels

      Indicates if labels are required, rather than probabilities.

    • works_with_weights

      Indicate whether the model takes into consideration the effect of sample weights

    "},{"location":"api/metrics/MacroFBeta/#examples","title":"Examples","text":"

    from river import metrics\n\ny_true = [0, 1, 2, 2, 2]\ny_pred = [0, 0, 2, 2, 1]\n\nmetric = metrics.MacroFBeta(beta=.8)\n\nfor yt, yp in zip(y_true, y_pred):\n    print(metric.update(yt, yp))\n
    MacroFBeta: 100.00%\nMacroFBeta: 31.06%\nMacroFBeta: 54.04%\nMacroFBeta: 54.04%\nMacroFBeta: 48.60%\n

    "},{"location":"api/metrics/MacroFBeta/#methods","title":"Methods","text":"get

    Return the current value of the metric.

    is_better_than

    Indicate if the current metric is better than another one.

    Parameters

    • other

    revert

    Revert the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    update

    Update the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    works_with

    Indicates whether or not a metric can work with a given model.

    Parameters

    • model \u2014 'base.Estimator'

    "},{"location":"api/metrics/MacroJaccard/","title":"MacroJaccard","text":"

    Macro-average Jaccard score.

    "},{"location":"api/metrics/MacroJaccard/#parameters","title":"Parameters","text":"
    • cm

      Type \u2192 confusion.ConfusionMatrix | None

      Default \u2192 None

      This parameter allows sharing the same confusion matrix between multiple metrics. Sharing a confusion matrix reduces the amount of storage and computation time.

    "},{"location":"api/metrics/MacroJaccard/#attributes","title":"Attributes","text":"
    • bigger_is_better

      Indicate if a high value is better than a low one or not.

    • requires_labels

      Indicates if labels are required, rather than probabilities.

    • works_with_weights

      Indicate whether the model takes into consideration the effect of sample weights

    "},{"location":"api/metrics/MacroJaccard/#examples","title":"Examples","text":"

    from river import metrics\n\ny_true = [0, 1, 2, 2, 2]\ny_pred = [0, 0, 2, 2, 1]\n\nmetric = metrics.MacroJaccard()\n\nfor yt, yp in zip(y_true, y_pred):\n    print(metric.update(yt, yp))\n
    MacroJaccard: 100.00%\nMacroJaccard: 25.00%\nMacroJaccard: 50.00%\nMacroJaccard: 50.00%\nMacroJaccard: 38.89%\n

    "},{"location":"api/metrics/MacroJaccard/#methods","title":"Methods","text":"get

    Return the current value of the metric.

    is_better_than

    Indicate if the current metric is better than another one.

    Parameters

    • other

    revert

    Revert the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    update

    Update the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    works_with

    Indicates whether or not a metric can work with a given model.

    Parameters

    • model \u2014 'base.Estimator'

    "},{"location":"api/metrics/MacroPrecision/","title":"MacroPrecision","text":"

    Macro-average precision score.

    "},{"location":"api/metrics/MacroPrecision/#parameters","title":"Parameters","text":"
    • cm

      Type \u2192 confusion.ConfusionMatrix | None

      Default \u2192 None

      This parameter allows sharing the same confusion matrix between multiple metrics. Sharing a confusion matrix reduces the amount of storage and computation time.

    "},{"location":"api/metrics/MacroPrecision/#attributes","title":"Attributes","text":"
    • bigger_is_better

      Indicate if a high value is better than a low one or not.

    • requires_labels

      Indicates if labels are required, rather than probabilities.

    • works_with_weights

      Indicate whether the model takes into consideration the effect of sample weights

    "},{"location":"api/metrics/MacroPrecision/#examples","title":"Examples","text":"

    from river import metrics\n\ny_true = [0, 1, 2, 2, 2]\ny_pred = [0, 0, 2, 2, 1]\n\nmetric = metrics.MacroPrecision()\n\nfor yt, yp in zip(y_true, y_pred):\n    print(metric.update(yt, yp))\n
    MacroPrecision: 100.00%\nMacroPrecision: 25.00%\nMacroPrecision: 50.00%\nMacroPrecision: 50.00%\nMacroPrecision: 50.00%\n

    "},{"location":"api/metrics/MacroPrecision/#methods","title":"Methods","text":"get

    Return the current value of the metric.

    is_better_than

    Indicate if the current metric is better than another one.

    Parameters

    • other

    revert

    Revert the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    update

    Update the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    works_with

    Indicates whether or not a metric can work with a given model.

    Parameters

    • model \u2014 'base.Estimator'

    "},{"location":"api/metrics/MacroRecall/","title":"MacroRecall","text":"

    Macro-average recall score.

    "},{"location":"api/metrics/MacroRecall/#parameters","title":"Parameters","text":"
    • cm

      Type \u2192 confusion.ConfusionMatrix | None

      Default \u2192 None

      This parameter allows sharing the same confusion matrix between multiple metrics. Sharing a confusion matrix reduces the amount of storage and computation time.

    "},{"location":"api/metrics/MacroRecall/#attributes","title":"Attributes","text":"
    • bigger_is_better

      Indicate if a high value is better than a low one or not.

    • requires_labels

      Indicates if labels are required, rather than probabilities.

    • works_with_weights

      Indicate whether the model takes into consideration the effect of sample weights

    "},{"location":"api/metrics/MacroRecall/#examples","title":"Examples","text":"

    from river import metrics\n\ny_true = [0, 1, 2, 2, 2]\ny_pred = [0, 0, 2, 2, 1]\n\nmetric = metrics.MacroRecall()\n\nfor yt, yp in zip(y_true, y_pred):\n    print(metric.update(yt, yp))\n
    MacroRecall: 100.00%\nMacroRecall: 50.00%\nMacroRecall: 66.67%\nMacroRecall: 66.67%\nMacroRecall: 55.56%\n

    "},{"location":"api/metrics/MacroRecall/#methods","title":"Methods","text":"get

    Return the current value of the metric.

    is_better_than

    Indicate if the current metric is better than another one.

    Parameters

    • other

    revert

    Revert the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    update

    Update the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    works_with

    Indicates whether or not a metric can work with a given model.

    Parameters

    • model \u2014 'base.Estimator'

    "},{"location":"api/metrics/MicroF1/","title":"MicroF1","text":"

    Micro-average F1 score.

    This computes the F1 score by merging all the predictions and true labels, and then computes a global F1 score.

    "},{"location":"api/metrics/MicroF1/#parameters","title":"Parameters","text":"
    • cm

      Default \u2192 None

      This parameter allows sharing the same confusion matrix between multiple metrics. Sharing a confusion matrix reduces the amount of storage and computation time.

    "},{"location":"api/metrics/MicroF1/#attributes","title":"Attributes","text":"
    • bigger_is_better

      Indicate if a high value is better than a low one or not.

    • requires_labels

      Indicates if labels are required, rather than probabilities.

    • works_with_weights

      Indicate whether the model takes into consideration the effect of sample weights

    "},{"location":"api/metrics/MicroF1/#examples","title":"Examples","text":"

    from river import metrics\n\ny_true = [0, 1, 2, 2, 0]\ny_pred = [0, 1, 1, 2, 1]\n\nmetric = metrics.MicroF1()\nfor yt, yp in zip(y_true, y_pred):\n    metric = metric.update(yt, yp)\n\nmetric\n
    MicroF1: 60.00%\n

    "},{"location":"api/metrics/MicroF1/#methods","title":"Methods","text":"get

    Return the current value of the metric.

    is_better_than

    Indicate if the current metric is better than another one.

    Parameters

    • other

    revert

    Revert the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    update

    Update the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    works_with

    Indicates whether or not a metric can work with a given model.

    Parameters

    • model \u2014 'base.Estimator'

    1. Why are precision, recall and F1 score equal when using micro averaging in a multi-class problem? \u21a9

    "},{"location":"api/metrics/MicroFBeta/","title":"MicroFBeta","text":"

    Micro-average F-Beta score.

    This computes the F-Beta score by merging all the predictions and true labels, and then computes a global F-Beta score.

    "},{"location":"api/metrics/MicroFBeta/#parameters","title":"Parameters","text":"
    • beta

      Type \u2192 float

      Weight of precision in the harmonic mean.

    • cm

      Default \u2192 None

      This parameter allows sharing the same confusion matrix between multiple metrics. Sharing a confusion matrix reduces the amount of storage and computation time.

    "},{"location":"api/metrics/MicroFBeta/#attributes","title":"Attributes","text":"
    • bigger_is_better

      Indicate if a high value is better than a low one or not.

    • requires_labels

      Indicates if labels are required, rather than probabilities.

    • works_with_weights

      Indicate whether the model takes into consideration the effect of sample weights

    "},{"location":"api/metrics/MicroFBeta/#examples","title":"Examples","text":"

    from river import metrics\n\ny_true = [0, 1, 2, 2, 0]\ny_pred = [0, 1, 1, 2, 1]\n\nmetric = metrics.MicroFBeta(beta=2)\nfor yt, yp in zip(y_true, y_pred):\n    metric = metric.update(yt, yp)\n\nmetric\n
    MicroFBeta: 60.00%\n

    "},{"location":"api/metrics/MicroFBeta/#methods","title":"Methods","text":"get

    Return the current value of the metric.

    is_better_than

    Indicate if the current metric is better than another one.

    Parameters

    • other

    revert

    Revert the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    update

    Update the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    works_with

    Indicates whether or not a metric can work with a given model.

    Parameters

    • model \u2014 'base.Estimator'

    1. Why are precision, recall and F1 score equal when using micro averaging in a multi-class problem?

    "},{"location":"api/metrics/MicroJaccard/","title":"MicroJaccard","text":"

    Micro-average Jaccard score.

    "},{"location":"api/metrics/MicroJaccard/#parameters","title":"Parameters","text":"
    • cm

      Type \u2192 confusion.ConfusionMatrix | None

      Default \u2192 None

      This parameter allows sharing the same confusion matrix between multiple metrics. Sharing a confusion matrix reduces the amount of storage and computation time.

    "},{"location":"api/metrics/MicroJaccard/#attributes","title":"Attributes","text":"
    • bigger_is_better

      Indicate if a high value is better than a low one or not.

    • requires_labels

      Indicates if labels are required, rather than probabilities.

    • works_with_weights

      Indicate whether the model takes into consideration the effect of sample weights

    "},{"location":"api/metrics/MicroJaccard/#examples","title":"Examples","text":"

    from river import metrics\n\ny_true = [0, 1, 2, 2, 2]\ny_pred = [0, 0, 2, 2, 1]\n\nmetric = metrics.MicroJaccard()\n\nfor yt, yp in zip(y_true, y_pred):\n    print(metric.update(yt, yp))\n
    MicroJaccard: 100.00%\nMicroJaccard: 33.33%\nMicroJaccard: 50.00%\nMicroJaccard: 60.00%\nMicroJaccard: 42.86%\n

    "},{"location":"api/metrics/MicroJaccard/#methods","title":"Methods","text":"get

    Return the current value of the metric.

    is_better_than

    Indicate if the current metric is better than another one.

    Parameters

    • other

    revert

    Revert the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    update

    Update the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    works_with

    Indicates whether or not a metric can work with a given model.

    Parameters

    • model \u2014 'base.Estimator'

    "},{"location":"api/metrics/MicroPrecision/","title":"MicroPrecision","text":"

    Micro-average precision score.

    The micro-average precision score is exactly equivalent to the micro-average recall as well as the micro-average F1 score.

    "},{"location":"api/metrics/MicroPrecision/#parameters","title":"Parameters","text":"
    • cm

      Type \u2192 confusion.ConfusionMatrix | None

      Default \u2192 None

      This parameter allows sharing the same confusion matrix between multiple metrics. Sharing a confusion matrix reduces the amount of storage and computation time.

    "},{"location":"api/metrics/MicroPrecision/#attributes","title":"Attributes","text":"
    • bigger_is_better

      Indicate if a high value is better than a low one or not.

    • requires_labels

      Indicates if labels are required, rather than probabilities.

    • works_with_weights

      Indicate whether the model takes into consideration the effect of sample weights

    "},{"location":"api/metrics/MicroPrecision/#examples","title":"Examples","text":"

    from river import metrics\n\ny_true = [0, 1, 2, 2, 2]\ny_pred = [0, 0, 2, 2, 1]\n\nmetric = metrics.MicroPrecision()\n\nfor yt, yp in zip(y_true, y_pred):\n    print(metric.update(yt, yp))\n
    MicroPrecision: 100.00%\nMicroPrecision: 50.00%\nMicroPrecision: 66.67%\nMicroPrecision: 75.00%\nMicroPrecision: 60.00%\n

    "},{"location":"api/metrics/MicroPrecision/#methods","title":"Methods","text":"get

    Return the current value of the metric.

    is_better_than

    Indicate if the current metric is better than another one.

    Parameters

    • other

    revert

    Revert the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    update

    Update the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    works_with

    Indicates whether or not a metric can work with a given model.

    Parameters

    • model \u2014 'base.Estimator'

    1. Why are precision, recall and F1 score equal when using micro averaging in a multi-class problem? \u21a9

    "},{"location":"api/metrics/MicroRecall/","title":"MicroRecall","text":"

    Micro-average recall score.

    The micro-average recall is exactly equivalent to the micro-average precision as well as the micro-average F1 score.

    "},{"location":"api/metrics/MicroRecall/#parameters","title":"Parameters","text":"
    • cm

      Type \u2192 confusion.ConfusionMatrix | None

      Default \u2192 None

      This parameter allows sharing the same confusion matrix between multiple metrics. Sharing a confusion matrix reduces the amount of storage and computation time.

    "},{"location":"api/metrics/MicroRecall/#attributes","title":"Attributes","text":"
    • bigger_is_better

      Indicate if a high value is better than a low one or not.

    • requires_labels

      Indicates if labels are required, rather than probabilities.

    • works_with_weights

      Indicate whether the model takes into consideration the effect of sample weights

    "},{"location":"api/metrics/MicroRecall/#examples","title":"Examples","text":"

    from river import metrics\n\ny_true = [0, 1, 2, 2, 2]\ny_pred = [0, 0, 2, 2, 1]\n\nmetric = metrics.MicroRecall()\n\nfor yt, yp in zip(y_true, y_pred):\n    print(metric.update(yt, yp))\n
    MicroRecall: 100.00%\nMicroRecall: 50.00%\nMicroRecall: 66.67%\nMicroRecall: 75.00%\nMicroRecall: 60.00%\n

    "},{"location":"api/metrics/MicroRecall/#methods","title":"Methods","text":"get

    Return the current value of the metric.

    is_better_than

    Indicate if the current metric is better than another one.

    Parameters

    • other

    revert

    Revert the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    update

    Update the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    works_with

    Indicates whether or not a metric can work with a given model.

    Parameters

    • model \u2014 'base.Estimator'

    1. Why are precision, recall and F1 score equal when using micro averaging in a multi-class problem? \u21a9

    "},{"location":"api/metrics/MultiFBeta/","title":"MultiFBeta","text":"

    Multi-class F-Beta score with different betas per class.

    The multiclass F-Beta score is the arithmetic average of the binary F-Beta scores of each class. The mean can be weighted by providing class weights.

    "},{"location":"api/metrics/MultiFBeta/#parameters","title":"Parameters","text":"
    • betas

      Weight of precision in the harmonic mean of each class.

    • weights

      Class weights. If not provided then uniform weights will be used.

    • cm

      Default \u2192 None

      This parameter allows sharing the same confusion matrix between multiple metrics. Sharing a confusion matrix reduces the amount of storage and computation time.

    "},{"location":"api/metrics/MultiFBeta/#attributes","title":"Attributes","text":"
    • bigger_is_better

      Indicate if a high value is better than a low one or not.

    • requires_labels

      Indicates if labels are required, rather than probabilities.

    • works_with_weights

      Indicate whether the model takes into consideration the effect of sample weights

    "},{"location":"api/metrics/MultiFBeta/#examples","title":"Examples","text":"

    from river import metrics\n\ny_true = [0, 1, 2, 2, 2]\ny_pred = [0, 0, 2, 2, 1]\n\nmetric = metrics.MultiFBeta(\n    betas={0: 0.25, 1: 1, 2: 4},\n    weights={0: 1, 1: 1, 2: 2}\n)\n\nfor yt, yp in zip(y_true, y_pred):\n    print(metric.update(yt, yp))\n
    MultiFBeta: 100.00%\nMultiFBeta: 25.76%\nMultiFBeta: 62.88%\nMultiFBeta: 62.88%\nMultiFBeta: 46.88%\n

    "},{"location":"api/metrics/MultiFBeta/#methods","title":"Methods","text":"get

    Return the current value of the metric.

    is_better_than

    Indicate if the current metric is better than another one.

    Parameters

    • other

    revert

    Revert the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    update

    Update the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    works_with

    Indicates whether or not a metric can work with a given model.

    Parameters

    • model \u2014 'base.Estimator'

    "},{"location":"api/metrics/MutualInfo/","title":"MutualInfo","text":"

    Mutual Information between two clusterings.

    The Mutual Information 1 is a measure of the similarity between two labels of the same data. Where \\(|U_i|\\) is the number of samples in cluster \\(U_i\\) and \\(|V_j|\\) is the number of the samples in cluster \\(V_j\\), the Mutual Information between clusterings \\(U\\) and \\(V\\) can be calculated as:

    \\[ MI(U,V) = \\sum_{i=1}^{|U|} \\sum_{v=1}^{|V|} \\frac{|U_i \\cup V_j|}{N} \\log \\frac{N |U_i \\cup V_j|}{|U_i| |V_j|} \\]

    This metric is independent of the absolute values of the labels: a permutation of the class or cluster label values won't change the score.

    This metric is furthermore symmetric: switching y_true and y_pred will return the same score value. This can be useful to measure the agreement of two independent label assignments strategies on the same dataset when the real ground truth is not known.

    The Mutual Information can be equivalently expressed as:

    \\[ MI(U,V) = H(U) - H(U | V) = H(V) - H(V | U) \\]

    where \\(H(U)\\) and \\(H(V)\\) are the marginal entropies, \\(H(U | V)\\) and \\(H(V | U)\\) are the conditional entropies.

    "},{"location":"api/metrics/MutualInfo/#parameters","title":"Parameters","text":"
    • cm

      Type \u2192 confusion.ConfusionMatrix | None

      Default \u2192 None

      This parameter allows sharing the same confusion matrix between multiple metrics. Sharing a confusion matrix reduces the amount of storage and computation time.

    "},{"location":"api/metrics/MutualInfo/#attributes","title":"Attributes","text":"
    • bigger_is_better

      Indicate if a high value is better than a low one or not.

    • requires_labels

      Indicates if labels are required, rather than probabilities.

    • works_with_weights

      Indicate whether the model takes into consideration the effect of sample weights

    "},{"location":"api/metrics/MutualInfo/#examples","title":"Examples","text":"

    from river import metrics\n\ny_true = [1, 1, 2, 2, 3, 3]\ny_pred = [1, 1, 1, 2, 2, 2]\n\nmetric = metrics.MutualInfo()\nfor yt, yp in zip(y_true, y_pred):\n    print(metric.update(yt, yp).get())\n
    0.0\n0.0\n0.0\n0.215761\n0.395752\n0.462098\n

    metric\n
    MutualInfo: 0.462098\n

    "},{"location":"api/metrics/MutualInfo/#methods","title":"Methods","text":"get

    Return the current value of the metric.

    is_better_than

    Indicate if the current metric is better than another one.

    Parameters

    • other

    revert

    Revert the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    update

    Update the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    works_with

    Indicates whether or not a metric can work with a given model.

    Parameters

    • model \u2014 'base.Estimator'

    1. Wikipedia contributors. (2021, March 17). Mutual information. In Wikipedia, The Free Encyclopedia, from https://en.wikipedia.org/w/index.php?title=Mutual_information&oldid=1012714929\u00a0\u21a9

    "},{"location":"api/metrics/NormalizedMutualInfo/","title":"NormalizedMutualInfo","text":"

    Normalized Mutual Information between two clusterings.

    Normalized Mutual Information (NMI) is a normalized version of the Mutual Information (MI) score to scale the results between the range of 0 (no mutual information) and 1 (perfectly mutual information). In the formula, the mutual information will be normalized by a generalized mean of the entropy of true and predicted labels, defined by the average_method.

    We note that this measure is not adjusted for chance (i.e corrected the effect of result agreement solely due to chance); as a result, the Adjusted Mutual Info Score will mostly be preferred. However, this metric is still symmetric, which means that switching true and predicted labels will not alter the score value. This fact can be useful when the metric is used to measure the agreement between two indepedent label solutions on the same dataset, when the ground truth remains unknown.

    Another advantage of the metric is that as it is based on the calculation of entropy-related measures, it is independent of the permutation of class/cluster labels.

    "},{"location":"api/metrics/NormalizedMutualInfo/#parameters","title":"Parameters","text":"
    • cm

      Default \u2192 None

      This parameter allows sharing the same confusion matrix between multiple metrics. Sharing a confusion matrix reduces the amount of storage and computation time.

    • average_method

      Default \u2192 arithmetic

      This parameter defines how to compute the normalizer in the denominator. Possible options include min, max, arithmetic and geometric.

    "},{"location":"api/metrics/NormalizedMutualInfo/#attributes","title":"Attributes","text":"
    • bigger_is_better

      Indicate if a high value is better than a low one or not.

    • requires_labels

      Indicates if labels are required, rather than probabilities.

    • works_with_weights

      Indicate whether the model takes into consideration the effect of sample weights

    "},{"location":"api/metrics/NormalizedMutualInfo/#examples","title":"Examples","text":"

    from river import metrics\n\ny_true = [1, 1, 2, 2, 3, 3]\ny_pred = [1, 1, 1, 2, 2, 2]\n\nmetric = metrics.NormalizedMutualInfo()\nfor yt, yp in zip(y_true, y_pred):\n    print(metric.update(yt, yp).get())\n
    1.0\n1.0\n0.0\n0.343711\n0.458065\n0.515803\n

    metric\n
    NormalizedMutualInfo: 0.515804\n

    "},{"location":"api/metrics/NormalizedMutualInfo/#methods","title":"Methods","text":"get

    Return the current value of the metric.

    is_better_than

    Indicate if the current metric is better than another one.

    Parameters

    • other

    revert

    Revert the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    update

    Update the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    works_with

    Indicates whether or not a metric can work with a given model.

    Parameters

    • model \u2014 'base.Estimator'

    1. Wikipedia contributors. (2021, March 17). Mutual information. In Wikipedia, The Free Encyclopedia, from https://en.wikipedia.org/w/index.php?title=Mutual_information&oldid=1012714929\u00a0\u21a9

    "},{"location":"api/metrics/Precision/","title":"Precision","text":"

    Binary precision score.

    "},{"location":"api/metrics/Precision/#parameters","title":"Parameters","text":"
    • cm

      Default \u2192 None

      This parameter allows sharing the same confusion matrix between multiple metrics. Sharing a confusion matrix reduces the amount of storage and computation time.

    • pos_val

      Default \u2192 True

      Value to treat as \"positive\".

    "},{"location":"api/metrics/Precision/#attributes","title":"Attributes","text":"
    • bigger_is_better

      Indicate if a high value is better than a low one or not.

    • requires_labels

      Indicates if labels are required, rather than probabilities.

    • works_with_weights

      Indicate whether the model takes into consideration the effect of sample weights

    "},{"location":"api/metrics/Precision/#examples","title":"Examples","text":"

    from river import metrics\n\ny_true = [True, False, True, True, True]\ny_pred = [True, True, False, True, True]\n\nmetric = metrics.Precision()\n\nfor yt, yp in zip(y_true, y_pred):\n    print(metric.update(yt, yp))\n
    Precision: 100.00%\nPrecision: 50.00%\nPrecision: 50.00%\nPrecision: 66.67%\nPrecision: 75.00%\n

    "},{"location":"api/metrics/Precision/#methods","title":"Methods","text":"get

    Return the current value of the metric.

    is_better_than

    Indicate if the current metric is better than another one.

    Parameters

    • other

    revert

    Revert the metric.

    Parameters

    • y_true \u2014 'bool'
    • y_pred \u2014 'bool | float | dict[bool, float]'
    • sample_weight \u2014 defaults to 1.0

    update

    Update the metric.

    Parameters

    • y_true \u2014 'bool'
    • y_pred \u2014 'bool | float | dict[bool, float]'
    • sample_weight \u2014 defaults to 1.0

    works_with

    Indicates whether or not a metric can work with a given model.

    Parameters

    • model \u2014 'base.Estimator'

    "},{"location":"api/metrics/R2/","title":"R2","text":"

    Coefficient of determination (\\(R^2\\)) score

    The coefficient of determination, denoted \\(R^2\\) or \\(r^2\\), is the proportion of the variance in the dependent variable that is predictable from the independent variable(s). 1

    Best possible score is 1.0 and it can be negative (because the model can be arbitrarily worse). A constant model that always predicts the expected value of \\(y\\), disregarding the input features, would get a \\(R^2\\) score of 0.0.

    \\(R^2\\) is not defined when less than 2 samples have been observed. This implementation returns 0.0 in this case.

    "},{"location":"api/metrics/R2/#attributes","title":"Attributes","text":"
    • bigger_is_better

      Indicate if a high value is better than a low one or not.

    • works_with_weights

      Indicate whether the model takes into consideration the effect of sample weights

    "},{"location":"api/metrics/R2/#examples","title":"Examples","text":"

    from river import metrics\n\ny_true = [3, -0.5, 2, 7]\ny_pred = [2.5, 0.0, 2, 8]\n\nmetric = metrics.R2()\n\nfor yt, yp in zip(y_true, y_pred):\n    print(metric.update(yt, yp).get())\n
    0.0\n0.9183\n0.9230\n0.9486\n

    "},{"location":"api/metrics/R2/#methods","title":"Methods","text":"get

    Return the current value of the metric.

    is_better_than

    Indicate if the current metric is better than another one.

    Parameters

    • other

    revert

    Revert the metric.

    Parameters

    • y_true \u2014 'numbers.Number'
    • y_pred \u2014 'numbers.Number'
    • sample_weight \u2014 defaults to 1.0

    update

    Update the metric.

    Parameters

    • y_true \u2014 'numbers.Number'
    • y_pred \u2014 'numbers.Number'
    • sample_weight \u2014 defaults to 1.0

    works_with

    Indicates whether or not a metric can work with a given model.

    Parameters

    • model \u2014 'base.Estimator'

    1. Coefficient of determination (Wikipedia) \u21a9

    "},{"location":"api/metrics/RMSE/","title":"RMSE","text":"

    Root mean squared error.

    "},{"location":"api/metrics/RMSE/#attributes","title":"Attributes","text":"
    • bigger_is_better

      Indicate if a high value is better than a low one or not.

    • works_with_weights

      Indicate whether the model takes into consideration the effect of sample weights

    "},{"location":"api/metrics/RMSE/#examples","title":"Examples","text":"

    from river import metrics\n\ny_true = [3, -0.5, 2, 7]\ny_pred = [2.5, 0.0, 2, 8]\n\nmetric = metrics.RMSE()\nfor yt, yp in zip(y_true, y_pred):\n    print(metric.update(yt, yp).get())\n
    0.5\n0.5\n0.408248\n0.612372\n

    metric\n
    RMSE: 0.612372\n

    "},{"location":"api/metrics/RMSE/#methods","title":"Methods","text":"get

    Return the current value of the metric.

    is_better_than

    Indicate if the current metric is better than another one.

    Parameters

    • other

    revert

    Revert the metric.

    Parameters

    • y_true \u2014 'numbers.Number'
    • y_pred \u2014 'numbers.Number'
    • sample_weight \u2014 defaults to 1.0

    update

    Update the metric.

    Parameters

    • y_true \u2014 'numbers.Number'
    • y_pred \u2014 'numbers.Number'
    • sample_weight \u2014 defaults to 1.0

    works_with

    Indicates whether or not a metric can work with a given model.

    Parameters

    • model

    "},{"location":"api/metrics/RMSLE/","title":"RMSLE","text":"

    Root mean squared logarithmic error.

    "},{"location":"api/metrics/RMSLE/#attributes","title":"Attributes","text":"
    • bigger_is_better

      Indicate if a high value is better than a low one or not.

    • works_with_weights

      Indicate whether the model takes into consideration the effect of sample weights

    "},{"location":"api/metrics/RMSLE/#examples","title":"Examples","text":"

    from river import metrics\n\ny_true = [3, -0.5, 2, 7]\ny_pred = [2.5, 0.0, 2, 8]\n\nmetric = metrics.RMSLE()\nfor yt, yp in zip(y_true, y_pred):\n    metric = metric.update(yt, yp)\n\nmetric\n
    RMSLE: 0.357826\n

    "},{"location":"api/metrics/RMSLE/#methods","title":"Methods","text":"get

    Return the current value of the metric.

    is_better_than

    Indicate if the current metric is better than another one.

    Parameters

    • other

    revert

    Revert the metric.

    Parameters

    • y_true \u2014 'numbers.Number'
    • y_pred \u2014 'numbers.Number'
    • sample_weight \u2014 defaults to 1.0

    update

    Update the metric.

    Parameters

    • y_true \u2014 'numbers.Number'
    • y_pred \u2014 'numbers.Number'
    • sample_weight \u2014 defaults to 1.0

    works_with

    Indicates whether or not a metric can work with a given model.

    Parameters

    • model

    "},{"location":"api/metrics/ROCAUC/","title":"ROCAUC","text":"

    Receiving Operating Characteristic Area Under the Curve.

    This metric is an approximation of the true ROC AUC. Computing the true ROC AUC would require storing all the predictions and ground truths, which isn't desirable. The approximation error is not significant as long as the predicted probabilities are well calibrated. In any case, this metric can still be used to reliably compare models between each other.

    "},{"location":"api/metrics/ROCAUC/#parameters","title":"Parameters","text":"
    • n_thresholds

      Default \u2192 10

      The number of thresholds used for discretizing the ROC curve. A higher value will lead to more accurate results, but will also cost more time and memory.

    • pos_val

      Default \u2192 True

      Value to treat as \"positive\".

    "},{"location":"api/metrics/ROCAUC/#attributes","title":"Attributes","text":"
    • bigger_is_better

      Indicate if a high value is better than a low one or not.

    • requires_labels

      Indicates if labels are required, rather than probabilities.

    • works_with_weights

      Indicate whether the model takes into consideration the effect of sample weights

    "},{"location":"api/metrics/ROCAUC/#examples","title":"Examples","text":"

    from river import metrics\n\ny_true = [ 0,  0,   1,  1]\ny_pred = [.1, .4, .35, .8]\n\nmetric = metrics.ROCAUC()\n\nfor yt, yp in zip(y_true, y_pred):\n    metric = metric.update(yt, yp)\n\nmetric\n
    ROCAUC: 87.50%\n

    The true ROC AUC is in fact 0.75. We can improve the accuracy by increasing the amount of thresholds. This comes at the cost more computation time and more memory usage.

    metric = metrics.ROCAUC(n_thresholds=20)\n\nfor yt, yp in zip(y_true, y_pred):\n    metric = metric.update(yt, yp)\n\nmetric\n
    ROCAUC: 75.00%\n

    "},{"location":"api/metrics/ROCAUC/#methods","title":"Methods","text":"get

    Return the current value of the metric.

    is_better_than

    Indicate if the current metric is better than another one.

    Parameters

    • other

    revert

    Revert the metric.

    Parameters

    • y_true \u2014 'bool'
    • y_pred \u2014 'bool | float | dict[bool, float]'
    • sample_weight \u2014 defaults to 1.0

    update

    Update the metric.

    Parameters

    • y_true \u2014 'bool'
    • y_pred \u2014 'bool | float | dict[bool, float]'
    • sample_weight \u2014 defaults to 1.0

    works_with

    Indicates whether or not a metric can work with a given model.

    Parameters

    • model \u2014 'base.Estimator'

    "},{"location":"api/metrics/Rand/","title":"Rand","text":"

    Rand Index.

    The Rand Index 1 2 is a measure of the similarity between two data clusterings. Given a set of elements S and two partitions of S to compare, X and Y, define the following:

    • a, the number of pairs of elements in S that are in the same subset in X and in the same subset in Y

    • b, the number of pairs of elements in S that are in the different subset in X and in different subsets in Y

    • c, the number of pairs of elements in S that are in the same subset in X and in different subsets in Y

    • d, the number of pairs of elements in S that are in the different subset in X and in the same subset in Y

    The Rand index, R, is

    \\[ R = \frac{a+b}{a+b+c+d} = \frac{a+b}{\frac{n(n-1)}{2}}. \\]"},{"location":"api/metrics/Rand/#parameters","title":"Parameters","text":"
    • cm

      Type \u2192 confusion.ConfusionMatrix | None

      Default \u2192 None

      This parameter allows sharing the same confusion matrix between multiple metrics. Sharing a confusion matrix reduces the amount of storage and computation time.

    "},{"location":"api/metrics/Rand/#attributes","title":"Attributes","text":"
    • bigger_is_better

      Indicate if a high value is better than a low one or not.

    • requires_labels

      Indicates if labels are required, rather than probabilities.

    • works_with_weights

      Indicate whether the model takes into consideration the effect of sample weights

    "},{"location":"api/metrics/Rand/#examples","title":"Examples","text":"

    from river import metrics\n\ny_true = [0, 0, 0, 1, 1, 1]\ny_pred = [0, 0, 1, 1, 2, 2]\n\nmetric = metrics.Rand()\n\nfor yt, yp in zip(y_true, y_pred):\n    metric = metric.update(yt, yp)\n\nmetric\n
    Rand: 0.666667\n

    "},{"location":"api/metrics/Rand/#methods","title":"Methods","text":"get

    Return the current value of the metric.

    is_better_than

    Indicate if the current metric is better than another one.

    Parameters

    • other

    revert

    Revert the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    update

    Update the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    works_with

    Indicates whether or not a metric can work with a given model.

    Parameters

    • model \u2014 'base.Estimator'

    1. Wikipedia contributors. (2021, January 13). Rand index. In Wikipedia, The Free Encyclopedia, from https://en.wikipedia.org/w/index.php?title=Rand_index&oldid=1000098911\u00a0\u21a9

    2. W. M. Rand (1971). \"Objective criteria for the evaluation of clustering methods\". Journal of the American Statistical Association. American Statistical Association. 66 (336): 846\u2013850. arXiv:1704.01036. doi:10.2307/2284239. JSTOR 2284239.\u00a0\u21a9

    "},{"location":"api/metrics/Recall/","title":"Recall","text":"

    Binary recall score.

    "},{"location":"api/metrics/Recall/#parameters","title":"Parameters","text":"
    • cm

      Default \u2192 None

      This parameter allows sharing the same confusion matrix between multiple metrics. Sharing a confusion matrix reduces the amount of storage and computation time.

    • pos_val

      Default \u2192 True

      Value to treat as \"positive\".

    "},{"location":"api/metrics/Recall/#attributes","title":"Attributes","text":"
    • bigger_is_better

      Indicate if a high value is better than a low one or not.

    • requires_labels

      Indicates if labels are required, rather than probabilities.

    • works_with_weights

      Indicate whether the model takes into consideration the effect of sample weights

    "},{"location":"api/metrics/Recall/#examples","title":"Examples","text":"

    from river import metrics\n\ny_true = [True, False, True, True, True]\ny_pred = [True, True, False, True, True]\n\nmetric = metrics.Recall()\n\nfor yt, yp in zip(y_true, y_pred):\n    print(metric.update(yt, yp))\n
    Recall: 100.00%\nRecall: 100.00%\nRecall: 50.00%\nRecall: 66.67%\nRecall: 75.00%\n

    "},{"location":"api/metrics/Recall/#methods","title":"Methods","text":"get

    Return the current value of the metric.

    is_better_than

    Indicate if the current metric is better than another one.

    Parameters

    • other

    revert

    Revert the metric.

    Parameters

    • y_true \u2014 'bool'
    • y_pred \u2014 'bool | float | dict[bool, float]'
    • sample_weight \u2014 defaults to 1.0

    update

    Update the metric.

    Parameters

    • y_true \u2014 'bool'
    • y_pred \u2014 'bool | float | dict[bool, float]'
    • sample_weight \u2014 defaults to 1.0

    works_with

    Indicates whether or not a metric can work with a given model.

    Parameters

    • model \u2014 'base.Estimator'

    "},{"location":"api/metrics/RollingROCAUC/","title":"RollingROCAUC","text":"

    Rolling version of the Receiving Operating Characteristic Area Under the Curve.

    The RollingROCAUC calculates the metric using the instances in its window of size S. It keeps a queue of the instances, when an instance is added and the queue length is equal to S, the last instance is removed. The metric has a tree with ordered instances, in order to calculate the AUC efficiently. It was implemented based on the algorithm presented in Brzezinski and Stefanowski, 2017.

    The difference between this metric and the standard ROCAUC is that the latter calculates an approximation of the real metric considering all data from the beginning of the stream, while the RollingROCAUC calculates the exact value considering only the last S instances. This approach may be beneficial if it's necessary to evaluate the model's performance over time, since calculating the metric using the entire stream may hide the current performance of the classifier.

    "},{"location":"api/metrics/RollingROCAUC/#parameters","title":"Parameters","text":"
    • window_size

      Default \u2192 1000

      The max length of the window.

    • pos_val

      Default \u2192 True

      Value to treat as \"positive\".

    "},{"location":"api/metrics/RollingROCAUC/#attributes","title":"Attributes","text":"
    • bigger_is_better

      Indicate if a high value is better than a low one or not.

    • requires_labels

      Indicates if labels are required, rather than probabilities.

    • works_with_weights

      Indicate whether the model takes into consideration the effect of sample weights

    "},{"location":"api/metrics/RollingROCAUC/#examples","title":"Examples","text":"

    from river import metrics\n\ny_true = [ 0,  1,  0,  1,  0,  1,  0,  0,   1,  1]\ny_pred = [.3, .5, .5, .7, .1, .3, .1, .4, .35, .8]\n\nmetric = metrics.RollingROCAUC(window_size=4)\n\nfor yt, yp in zip(y_true, y_pred):\n    metric = metric.update(yt, yp)\n\nmetric\n
    RollingROCAUC: 75.00%\n

    "},{"location":"api/metrics/RollingROCAUC/#methods","title":"Methods","text":"get

    Return the current value of the metric.

    is_better_than

    Indicate if the current metric is better than another one.

    Parameters

    • other

    revert

    Revert the metric.

    Parameters

    • y_true \u2014 'bool'
    • y_pred \u2014 'bool | float | dict[bool, float]'

    update

    Update the metric.

    Parameters

    • y_true \u2014 'bool'
    • y_pred \u2014 'bool | float | dict[bool, float]'

    works_with

    Indicates whether or not a metric can work with a given model.

    Parameters

    • model \u2014 'base.Estimator'

    "},{"location":"api/metrics/SMAPE/","title":"SMAPE","text":"

    Symmetric mean absolute percentage error.

    "},{"location":"api/metrics/SMAPE/#attributes","title":"Attributes","text":"
    • bigger_is_better

      Indicate if a high value is better than a low one or not.

    • works_with_weights

      Indicate whether the model takes into consideration the effect of sample weights

    "},{"location":"api/metrics/SMAPE/#examples","title":"Examples","text":"

    from river import metrics\n\ny_true = [0, 0.07533, 0.07533, 0.07533, 0.07533, 0.07533, 0.07533, 0.0672, 0.0672]\ny_pred = [0, 0.102, 0.107, 0.047, 0.1, 0.032, 0.047, 0.108, 0.089]\n\nmetric = metrics.SMAPE()\nfor yt, yp in zip(y_true, y_pred):\n    metric = metric.update(yt, yp)\n\nmetric\n
    SMAPE: 37.869392\n

    "},{"location":"api/metrics/SMAPE/#methods","title":"Methods","text":"get

    Return the current value of the metric.

    is_better_than

    Indicate if the current metric is better than another one.

    Parameters

    • other

    revert

    Revert the metric.

    Parameters

    • y_true \u2014 'numbers.Number'
    • y_pred \u2014 'numbers.Number'
    • sample_weight \u2014 defaults to 1.0

    update

    Update the metric.

    Parameters

    • y_true \u2014 'numbers.Number'
    • y_pred \u2014 'numbers.Number'
    • sample_weight \u2014 defaults to 1.0

    works_with

    Indicates whether or not a metric can work with a given model.

    Parameters

    • model

    "},{"location":"api/metrics/Silhouette/","title":"Silhouette","text":"

    Silhouette coefficient 1, roughly speaking, is the ratio between cohesion and the average distances from the points to their second-closest centroid. It rewards the clustering algorithm where points are very close to their assigned centroids and far from any other centroids, that is, clustering results with good cohesion and good separation.

    It rewards clusterings where points are very close to their assigned centroids and far from any other centroids, that is clusterings with good cohesion and good separation. 2

    The definition of Silhouette coefficient for online clustering evaluation is different from that of batch learning. It does not store information and calculate pairwise distances between all points at the same time, since the practice is too expensive for an incremental metric.

    "},{"location":"api/metrics/Silhouette/#attributes","title":"Attributes","text":"
    • bigger_is_better

      Indicates if a high value is better than a low one or not.

    "},{"location":"api/metrics/Silhouette/#examples","title":"Examples","text":"

    from river import cluster\nfrom river import stream\nfrom river import metrics\n\nX = [\n    [1, 2],\n    [1, 4],\n    [1, 0],\n    [4, 2],\n    [4, 4],\n    [4, 0],\n    [-2, 2],\n    [-2, 4],\n    [-2, 0]\n]\n\nk_means = cluster.KMeans(n_clusters=3, halflife=0.4, sigma=3, seed=0)\nmetric = metrics.Silhouette()\n\nfor x, _ in stream.iter_array(X):\n    k_means = k_means.learn_one(x)\n    y_pred = k_means.predict_one(x)\n    metric = metric.update(x, y_pred, k_means.centers)\n\nmetric\n
    Silhouette: 0.568058\n

    "},{"location":"api/metrics/Silhouette/#methods","title":"Methods","text":"get

    Return the current value of the metric.

    revert

    Revert the metric.

    Parameters

    • x
    • y_pred
    • centers
    • sample_weight \u2014 defaults to 1.0

    update

    Update the metric.

    Parameters

    • x
    • y_pred
    • centers
    • sample_weight \u2014 defaults to 1.0

    works_with

    Indicates whether or not a metric can work with a given model.

    Parameters

    • model \u2014 'base.Estimator'

    1. Rousseeuw, P. (1987). Silhouettes: a graphical aid to the intepretation and validation of cluster analysis 20, 53 - 65. DOI: 10.1016/0377-0427(87)90125-7\u00a0\u21a9

    2. Bifet, A. et al. (2018). \"Machine Learning for Data Streams\". DOI: 10.7551/mitpress/10654.001.0001.\u00a0\u21a9

    "},{"location":"api/metrics/VBeta/","title":"VBeta","text":"

    VBeta.

    VBeta (or V-Measure) 1 is an external entropy-based cluster evaluation measure. It provides an elegant solution to many problems that affect previously defined cluster evaluation measures including

    • Dependance of clustering algorithm or dataset,

    • The \"problem of matching\", where the clustering of only a portion of data points are evaluated, and

    • Accurate evaluation and combination of two desirable aspects of clustering, homogeneity and completeness.

    Based upon the calculations of homogeneity and completeness, a clustering solution's V-measure is calculated by computing the weighted harmonic mean of homogeneity and completeness,

    \\[ V_{\\beta} = \\frac{(1 + \\beta) \\times h \\times c}{\\beta \\times h + c}. \\]"},{"location":"api/metrics/VBeta/#parameters","title":"Parameters","text":"
    • beta

      Type \u2192 float

      Default \u2192 1.0

      Weight of Homogeneity in the harmonic mean.

    • cm

      Default \u2192 None

      This parameter allows sharing the same confusion matrix between multiple metrics. Sharing a confusion matrix reduces the amount of storage and computation time.

    "},{"location":"api/metrics/VBeta/#attributes","title":"Attributes","text":"
    • bigger_is_better

      Indicate if a high value is better than a low one or not.

    • requires_labels

      Indicates if labels are required, rather than probabilities.

    • works_with_weights

      Indicate whether the model takes into consideration the effect of sample weights

    "},{"location":"api/metrics/VBeta/#examples","title":"Examples","text":"

    from river import metrics\n\ny_true = [1, 1, 2, 2, 3, 3]\ny_pred = [1, 1, 1, 2, 2, 2]\n\nmetric = metrics.VBeta(beta=1.0)\nfor yt, yp in zip(y_true, y_pred):\n    print(metric.update(yt, yp).get())\n
    1.0\n1.0\n0.0\n0.3437110184854507\n0.4580652856440158\n0.5158037429793888\n

    metric\n
    VBeta: 51.58%\n

    "},{"location":"api/metrics/VBeta/#methods","title":"Methods","text":"get

    Return the current value of the metric.

    is_better_than

    Indicate if the current metric is better than another one.

    Parameters

    • other

    revert

    Revert the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    update

    Update the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    works_with

    Indicates whether or not a metric can work with a given model.

    Parameters

    • model \u2014 'base.Estimator'

    1. Andrew Rosenberg and Julia Hirschberg (2007). V-Measure: A conditional entropy-based external cluster evaluation measure. Proceedings of the 2007 Joing Conference on Empirical Methods in Natural Language Processing and Computational Natural Language Learning, pp. 410 - 420, Prague, June 2007.\u00a0\u21a9

    "},{"location":"api/metrics/WeightedF1/","title":"WeightedF1","text":"

    Weighted-average F1 score.

    This works by computing the F1 score per class, and then performs a global weighted average by using the support of each class.

    "},{"location":"api/metrics/WeightedF1/#parameters","title":"Parameters","text":"
    • cm

      Default \u2192 None

      This parameter allows sharing the same confusion matrix between multiple metrics. Sharing a confusion matrix reduces the amount of storage and computation time.

    "},{"location":"api/metrics/WeightedF1/#attributes","title":"Attributes","text":"
    • bigger_is_better

      Indicate if a high value is better than a low one or not.

    • requires_labels

      Indicates if labels are required, rather than probabilities.

    • works_with_weights

      Indicate whether the model takes into consideration the effect of sample weights

    "},{"location":"api/metrics/WeightedF1/#examples","title":"Examples","text":"

    from river import metrics\n\ny_true = [0, 1, 2, 2, 2]\ny_pred = [0, 0, 2, 2, 1]\n\nmetric = metrics.WeightedF1()\n\nfor yt, yp in zip(y_true, y_pred):\n    print(metric.update(yt, yp))\n
    WeightedF1: 100.00%\nWeightedF1: 33.33%\nWeightedF1: 55.56%\nWeightedF1: 66.67%\nWeightedF1: 61.33%\n

    "},{"location":"api/metrics/WeightedF1/#methods","title":"Methods","text":"get

    Return the current value of the metric.

    is_better_than

    Indicate if the current metric is better than another one.

    Parameters

    • other

    revert

    Revert the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    update

    Update the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    works_with

    Indicates whether or not a metric can work with a given model.

    Parameters

    • model \u2014 'base.Estimator'

    "},{"location":"api/metrics/WeightedFBeta/","title":"WeightedFBeta","text":"

    Weighted-average F-Beta score.

    This works by computing the F-Beta score per class, and then performs a global weighted average according to the support of each class.

    "},{"location":"api/metrics/WeightedFBeta/#parameters","title":"Parameters","text":"
    • beta

      Weight of precision in the harmonic mean.

    • cm

      Default \u2192 None

      This parameter allows sharing the same confusion matrix between multiple metrics. Sharing a confusion matrix reduces the amount of storage and computation time.

    "},{"location":"api/metrics/WeightedFBeta/#attributes","title":"Attributes","text":"
    • bigger_is_better

      Indicate if a high value is better than a low one or not.

    • requires_labels

      Indicates if labels are required, rather than probabilities.

    • works_with_weights

      Indicate whether the model takes into consideration the effect of sample weights

    "},{"location":"api/metrics/WeightedFBeta/#examples","title":"Examples","text":"

    from river import metrics\n\ny_true = [0, 1, 2, 2, 2]\ny_pred = [0, 0, 2, 2, 1]\n\nmetric = metrics.WeightedFBeta(beta=0.8)\n\nfor yt, yp in zip(y_true, y_pred):\n    print(metric.update(yt, yp))\n
    WeightedFBeta: 100.00%\nWeightedFBeta: 31.06%\nWeightedFBeta: 54.04%\nWeightedFBeta: 65.53%\nWeightedFBeta: 62.63%\n

    "},{"location":"api/metrics/WeightedFBeta/#methods","title":"Methods","text":"get

    Return the current value of the metric.

    is_better_than

    Indicate if the current metric is better than another one.

    Parameters

    • other

    revert

    Revert the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    update

    Update the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    works_with

    Indicates whether or not a metric can work with a given model.

    Parameters

    • model \u2014 'base.Estimator'

    "},{"location":"api/metrics/WeightedJaccard/","title":"WeightedJaccard","text":"

    Weighted average Jaccard score.

    "},{"location":"api/metrics/WeightedJaccard/#parameters","title":"Parameters","text":"
    • cm

      Type \u2192 confusion.ConfusionMatrix | None

      Default \u2192 None

      This parameter allows sharing the same confusion matrix between multiple metrics. Sharing a confusion matrix reduces the amount of storage and computation time.

    "},{"location":"api/metrics/WeightedJaccard/#attributes","title":"Attributes","text":"
    • bigger_is_better

      Indicate if a high value is better than a low one or not.

    • requires_labels

      Indicates if labels are required, rather than probabilities.

    • works_with_weights

      Indicate whether the model takes into consideration the effect of sample weights

    "},{"location":"api/metrics/WeightedJaccard/#examples","title":"Examples","text":"

    from river import metrics\n\ny_true = [0, 1, 2, 2, 2]\ny_pred = [0, 0, 2, 2, 1]\n\nmetric = metrics.WeightedJaccard()\n\nfor yt, yp in zip(y_true, y_pred):\n    print(metric.update(yt, yp))\n
    WeightedJaccard: 100.00%\nWeightedJaccard: 25.00%\nWeightedJaccard: 50.00%\nWeightedJaccard: 62.50%\nWeightedJaccard: 50.00%\n

    "},{"location":"api/metrics/WeightedJaccard/#methods","title":"Methods","text":"get

    Return the current value of the metric.

    is_better_than

    Indicate if the current metric is better than another one.

    Parameters

    • other

    revert

    Revert the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    update

    Update the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    works_with

    Indicates whether or not a metric can work with a given model.

    Parameters

    • model \u2014 'base.Estimator'

    "},{"location":"api/metrics/WeightedPrecision/","title":"WeightedPrecision","text":"

    Weighted-average precision score.

    This uses the support of each label to compute an average score, whereas metrics.MacroPrecision ignores the support.

    "},{"location":"api/metrics/WeightedPrecision/#parameters","title":"Parameters","text":"
    • cm

      Type \u2192 confusion.ConfusionMatrix | None

      Default \u2192 None

      This parameter allows sharing the same confusion matrix between multiple metrics. Sharing a confusion matrix reduces the amount of storage and computation time.

    "},{"location":"api/metrics/WeightedPrecision/#attributes","title":"Attributes","text":"
    • bigger_is_better

      Indicate if a high value is better than a low one or not.

    • requires_labels

      Indicates if labels are required, rather than probabilities.

    • works_with_weights

      Indicate whether the model takes into consideration the effect of sample weights

    "},{"location":"api/metrics/WeightedPrecision/#examples","title":"Examples","text":"

    from river import metrics\n\ny_true = [0, 1, 2, 2, 2]\ny_pred = [0, 0, 2, 2, 1]\n\nmetric = metrics.WeightedPrecision()\n\nfor yt, yp in zip(y_true, y_pred):\n    print(metric.update(yt, yp))\n
    WeightedPrecision: 100.00%\nWeightedPrecision: 25.00%\nWeightedPrecision: 50.00%\nWeightedPrecision: 62.50%\nWeightedPrecision: 70.00%\n

    "},{"location":"api/metrics/WeightedPrecision/#methods","title":"Methods","text":"get

    Return the current value of the metric.

    is_better_than

    Indicate if the current metric is better than another one.

    Parameters

    • other

    revert

    Revert the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    update

    Update the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    works_with

    Indicates whether or not a metric can work with a given model.

    Parameters

    • model \u2014 'base.Estimator'

    "},{"location":"api/metrics/WeightedRecall/","title":"WeightedRecall","text":"

    Weighted-average recall score.

    This uses the support of each label to compute an average score, whereas MacroRecall ignores the support.

    "},{"location":"api/metrics/WeightedRecall/#parameters","title":"Parameters","text":"
    • cm

      Type \u2192 confusion.ConfusionMatrix | None

      Default \u2192 None

      This parameter allows sharing the same confusion matrix between multiple metrics. Sharing a confusion matrix reduces the amount of storage and computation time.

    "},{"location":"api/metrics/WeightedRecall/#attributes","title":"Attributes","text":"
    • bigger_is_better

      Indicate if a high value is better than a low one or not.

    • requires_labels

      Indicates if labels are required, rather than probabilities.

    • works_with_weights

      Indicate whether the model takes into consideration the effect of sample weights

    "},{"location":"api/metrics/WeightedRecall/#examples","title":"Examples","text":"

    from river import metrics\n\ny_true = [0, 1, 2, 2, 2]\ny_pred = [0, 0, 2, 2, 1]\n\nmetric = metrics.WeightedRecall()\n\nfor yt, yp in zip(y_true, y_pred):\n    print(metric.update(yt, yp))\n
    WeightedRecall: 100.00%\nWeightedRecall: 50.00%\nWeightedRecall: 66.67%\nWeightedRecall: 75.00%\nWeightedRecall: 60.00%\n

    "},{"location":"api/metrics/WeightedRecall/#methods","title":"Methods","text":"get

    Return the current value of the metric.

    is_better_than

    Indicate if the current metric is better than another one.

    Parameters

    • other

    revert

    Revert the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    update

    Update the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    works_with

    Indicates whether or not a metric can work with a given model.

    Parameters

    • model \u2014 'base.Estimator'

    "},{"location":"api/metrics/base/BinaryMetric/","title":"BinaryMetric","text":"

    Mother class for all binary classification metrics.

    "},{"location":"api/metrics/base/BinaryMetric/#parameters","title":"Parameters","text":"
    • cm

      Default \u2192 None

      This parameter allows sharing the same confusion matrix between multiple metrics. Sharing a confusion matrix reduces the amount of storage and computation time.

    • pos_val

      Default \u2192 True

      Value to treat as \"positive\".

    "},{"location":"api/metrics/base/BinaryMetric/#attributes","title":"Attributes","text":"
    • bigger_is_better

      Indicate if a high value is better than a low one or not.

    • requires_labels

      Indicates if labels are required, rather than probabilities.

    • works_with_weights

      Indicate whether the model takes into consideration the effect of sample weights

    "},{"location":"api/metrics/base/BinaryMetric/#methods","title":"Methods","text":"get

    Return the current value of the metric.

    is_better_than

    Indicate if the current metric is better than another one.

    Parameters

    • other

    revert

    Revert the metric.

    Parameters

    • y_true \u2014 'bool'
    • y_pred \u2014 'bool | float | dict[bool, float]'
    • sample_weight \u2014 defaults to 1.0

    update

    Update the metric.

    Parameters

    • y_true \u2014 'bool'
    • y_pred \u2014 'bool | float | dict[bool, float]'
    • sample_weight \u2014 defaults to 1.0

    works_with

    Indicates whether or not a metric can work with a given model.

    Parameters

    • model \u2014 'base.Estimator'

    "},{"location":"api/metrics/base/ClassificationMetric/","title":"ClassificationMetric","text":"

    Mother class for all classification metrics.

    "},{"location":"api/metrics/base/ClassificationMetric/#parameters","title":"Parameters","text":"
    • cm

      Type \u2192 confusion.ConfusionMatrix | None

      Default \u2192 None

      This parameter allows sharing the same confusion matrix between multiple metrics. Sharing a confusion matrix reduces the amount of storage and computation time.

    "},{"location":"api/metrics/base/ClassificationMetric/#attributes","title":"Attributes","text":"
    • bigger_is_better

      Indicate if a high value is better than a low one or not.

    • requires_labels

      Indicates if labels are required, rather than probabilities.

    • works_with_weights

      Indicate whether the model takes into consideration the effect of sample weights

    "},{"location":"api/metrics/base/ClassificationMetric/#methods","title":"Methods","text":"get

    Return the current value of the metric.

    is_better_than

    Indicate if the current metric is better than another one.

    Parameters

    • other

    revert

    Revert the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    update

    Update the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    works_with

    Indicates whether or not a metric can work with a given model.

    Parameters

    • model \u2014 'base.Estimator'

    "},{"location":"api/metrics/base/Metric/","title":"Metric","text":"

    Mother class for all metrics.

    "},{"location":"api/metrics/base/Metric/#attributes","title":"Attributes","text":"
    • bigger_is_better

      Indicate if a high value is better than a low one or not.

    • works_with_weights

      Indicate whether the model takes into consideration the effect of sample weights

    "},{"location":"api/metrics/base/Metric/#methods","title":"Methods","text":"get

    Return the current value of the metric.

    is_better_than

    Indicate if the current metric is better than another one.

    Parameters

    • other

    revert

    Revert the metric.

    Parameters

    • y_true
    • y_pred

    update

    Update the metric.

    Parameters

    • y_true
    • y_pred

    works_with

    Indicates whether or not a metric can work with a given model.

    Parameters

    • model \u2014 'base.Estimator'

    "},{"location":"api/metrics/base/Metrics/","title":"Metrics","text":"

    A container class for handling multiple metrics at once.

    "},{"location":"api/metrics/base/Metrics/#parameters","title":"Parameters","text":"
    • metrics

    • str_sep

      Default \u2192 ,

    "},{"location":"api/metrics/base/Metrics/#attributes","title":"Attributes","text":"
    • bigger_is_better

      Indicate if a high value is better than a low one or not.

    • requires_labels

    • works_with_weights

      Indicate whether the model takes into consideration the effect of sample weights

    "},{"location":"api/metrics/base/Metrics/#methods","title":"Methods","text":"is_better_than

    Indicate if the current metric is better than another one.

    Parameters

    • other

    revert

    Revert the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    works_with

    Indicates whether or not a metric can work with a given model.

    Parameters

    • model \u2014 'base.Estimator'

    "},{"location":"api/metrics/base/MultiClassMetric/","title":"MultiClassMetric","text":"

    Mother class for all multi-class classification metrics.

    "},{"location":"api/metrics/base/MultiClassMetric/#parameters","title":"Parameters","text":"
    • cm

      Type \u2192 confusion.ConfusionMatrix | None

      Default \u2192 None

      This parameter allows sharing the same confusion matrix between multiple metrics. Sharing a confusion matrix reduces the amount of storage and computation time.

    "},{"location":"api/metrics/base/MultiClassMetric/#attributes","title":"Attributes","text":"
    • bigger_is_better

      Indicate if a high value is better than a low one or not.

    • requires_labels

      Indicates if labels are required, rather than probabilities.

    • works_with_weights

      Indicate whether the model takes into consideration the effect of sample weights

    "},{"location":"api/metrics/base/MultiClassMetric/#methods","title":"Methods","text":"get

    Return the current value of the metric.

    is_better_than

    Indicate if the current metric is better than another one.

    Parameters

    • other

    revert

    Revert the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    update

    Update the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    works_with

    Indicates whether or not a metric can work with a given model.

    Parameters

    • model \u2014 'base.Estimator'

    "},{"location":"api/metrics/base/RegressionMetric/","title":"RegressionMetric","text":"

    Mother class for all regression metrics.

    "},{"location":"api/metrics/base/RegressionMetric/#attributes","title":"Attributes","text":"
    • bigger_is_better

      Indicate if a high value is better than a low one or not.

    • works_with_weights

      Indicate whether the model takes into consideration the effect of sample weights

    "},{"location":"api/metrics/base/RegressionMetric/#methods","title":"Methods","text":"get

    Return the current value of the metric.

    is_better_than

    Indicate if the current metric is better than another one.

    Parameters

    • other

    revert

    Revert the metric.

    Parameters

    • y_true \u2014 'numbers.Number'
    • y_pred \u2014 'numbers.Number'

    update

    Update the metric.

    Parameters

    • y_true \u2014 'numbers.Number'
    • y_pred \u2014 'numbers.Number'

    works_with

    Indicates whether or not a metric can work with a given model.

    Parameters

    • model \u2014 'base.Estimator'

    "},{"location":"api/metrics/base/WrapperMetric/","title":"WrapperMetric","text":""},{"location":"api/metrics/base/WrapperMetric/#attributes","title":"Attributes","text":"
    • bigger_is_better

      Indicate if a high value is better than a low one or not.

    • metric

      Gives access to the wrapped metric.

    • requires_labels

    • works_with_weights

      Indicate whether the model takes into consideration the effect of sample weights

    "},{"location":"api/metrics/base/WrapperMetric/#methods","title":"Methods","text":"get

    Return the current value of the metric.

    is_better_than

    Indicate if the current metric is better than another one.

    Parameters

    • other

    revert

    Revert the metric.

    Parameters

    • y_true
    • y_pred

    update

    Update the metric.

    Parameters

    • y_true
    • y_pred

    works_with

    Indicates whether or not a metric can work with a given model.

    Parameters

    • model \u2014 'base.Estimator'

    "},{"location":"api/metrics/multioutput/ExactMatch/","title":"ExactMatch","text":"

    Exact match score.

    This is the most strict multi-label metric, defined as the number of samples that have all their labels correctly classified, divided by the total number of samples.

    "},{"location":"api/metrics/multioutput/ExactMatch/#attributes","title":"Attributes","text":"
    • bigger_is_better

      Indicate if a high value is better than a low one or not.

    • requires_labels

    • works_with_weights

      Indicate whether the model takes into consideration the effect of sample weights

    "},{"location":"api/metrics/multioutput/ExactMatch/#examples","title":"Examples","text":"

    from river import metrics\n\ny_true = [\n    {0: False, 1: True, 2: True},\n    {0: True, 1: True, 2: False},\n    {0: True, 1: True, 2: False},\n]\n\ny_pred = [\n    {0: True, 1: True, 2: True},\n    {0: True, 1: False, 2: False},\n    {0: True, 1: True, 2: False},\n]\n\nmetric = metrics.multioutput.ExactMatch()\nfor yt, yp in zip(y_true, y_pred):\n    metric = metric.update(yt, yp)\n\nmetric\n
    ExactMatch: 33.33%\n

    "},{"location":"api/metrics/multioutput/ExactMatch/#methods","title":"Methods","text":"get

    Return the current value of the metric.

    is_better_than

    Indicate if the current metric is better than another one.

    Parameters

    • other

    revert

    Revert the metric.

    Parameters

    • y_true \u2014 'dict[str | int, base.typing.ClfTarget]'
    • y_pred \u2014 'dict[str | int, base.typing.ClfTarget] | dict[str | int, dict[base.typing.ClfTarget, float]]'
    • sample_weight \u2014 defaults to 1.0

    update

    Update the metric.

    Parameters

    • y_true \u2014 'dict[str | int, base.typing.ClfTarget]'
    • y_pred \u2014 'dict[str | int, base.typing.ClfTarget] | dict[str | int, dict[base.typing.ClfTarget, float]]'
    • sample_weight \u2014 defaults to 1.0

    works_with

    Indicates whether or not a metric can work with a given model.

    Parameters

    • model

    "},{"location":"api/metrics/multioutput/MacroAverage/","title":"MacroAverage","text":"

    Macro-average wrapper.

    A copy of the provided metric is made for each output. The arithmetic average of all the metrics is returned.

    "},{"location":"api/metrics/multioutput/MacroAverage/#parameters","title":"Parameters","text":"
    • metric

      A classification or a regression metric.

    "},{"location":"api/metrics/multioutput/MacroAverage/#attributes","title":"Attributes","text":"
    • bigger_is_better

      Indicate if a high value is better than a low one or not.

    • metric

      Gives access to the wrapped metric.

    • requires_labels

    • works_with_weights

      Indicate whether the model takes into consideration the effect of sample weights

    "},{"location":"api/metrics/multioutput/MacroAverage/#methods","title":"Methods","text":"get

    Return the current value of the metric.

    is_better_than

    Indicate if the current metric is better than another one.

    Parameters

    • other

    revert

    Revert the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    update

    Update the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    works_with

    Indicates whether or not a metric can work with a given model.

    Parameters

    • model \u2014 'base.Estimator'

    "},{"location":"api/metrics/multioutput/MicroAverage/","title":"MicroAverage","text":"

    Micro-average wrapper.

    The provided metric is updated with the value of each output.

    "},{"location":"api/metrics/multioutput/MicroAverage/#parameters","title":"Parameters","text":"
    • metric

      A classification or a regression metric.

    "},{"location":"api/metrics/multioutput/MicroAverage/#attributes","title":"Attributes","text":"
    • bigger_is_better

      Indicate if a high value is better than a low one or not.

    • metric

      Gives access to the wrapped metric.

    • requires_labels

    • works_with_weights

      Indicate whether the model takes into consideration the effect of sample weights

    "},{"location":"api/metrics/multioutput/MicroAverage/#methods","title":"Methods","text":"get

    Return the current value of the metric.

    is_better_than

    Indicate if the current metric is better than another one.

    Parameters

    • other

    revert

    Revert the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    update

    Update the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    works_with

    Indicates whether or not a metric can work with a given model.

    Parameters

    • model \u2014 'base.Estimator'

    "},{"location":"api/metrics/multioutput/MultiLabelConfusionMatrix/","title":"MultiLabelConfusionMatrix","text":"

    Multi-label confusion matrix.

    Under the hood, this stores one metrics.ConfusionMatrix for each output.

    "},{"location":"api/metrics/multioutput/MultiLabelConfusionMatrix/#examples","title":"Examples","text":"

    from river import metrics\n\ncm = metrics.multioutput.MultiLabelConfusionMatrix()\n\ny_true = [\n    {0: False, 1: True, 2: True},\n    {0: True, 1: True, 2: False}\n]\n\ny_pred = [\n    {0: True, 1: True, 2: True},\n    {0: True, 1: False, 2: False}\n]\n\nfor yt, yp in zip(y_true, y_pred):\n    cm = cm.update(yt, yp)\n\ncm\n
    0\n            False   True\n    False       0      1\n     True       0      1\n<BLANKLINE>\n1\n            False   True\n    False       0      0\n     True       1      1\n<BLANKLINE>\n2\n            False   True\n    False       1      0\n     True       0      1\n

    "},{"location":"api/metrics/multioutput/MultiLabelConfusionMatrix/#methods","title":"Methods","text":"revert update"},{"location":"api/metrics/multioutput/PerOutput/","title":"PerOutput","text":"

    Per-output wrapper.

    A copy of the metric is maintained for each output.

    "},{"location":"api/metrics/multioutput/PerOutput/#parameters","title":"Parameters","text":"
    • metric

      A classification or a regression metric.

    "},{"location":"api/metrics/multioutput/PerOutput/#attributes","title":"Attributes","text":"
    • bigger_is_better

      Indicate if a high value is better than a low one or not.

    • metric

      Gives access to the wrapped metric.

    • requires_labels

    • works_with_weights

      Indicate whether the model takes into consideration the effect of sample weights

    "},{"location":"api/metrics/multioutput/PerOutput/#methods","title":"Methods","text":"get

    Return the current value of the metric.

    is_better_than

    Indicate if the current metric is better than another one.

    Parameters

    • other

    revert

    Revert the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    update

    Update the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    works_with

    Indicates whether or not a metric can work with a given model.

    Parameters

    • model \u2014 'base.Estimator'

    "},{"location":"api/metrics/multioutput/SampleAverage/","title":"SampleAverage","text":"

    Sample-average wrapper.

    The provided metric is evaluate on each sample. The arithmetic average over all the samples is returned. This is equivalent to using average='samples' in scikit-learn.

    "},{"location":"api/metrics/multioutput/SampleAverage/#parameters","title":"Parameters","text":"
    • metric

      A classification or a regression metric.

    "},{"location":"api/metrics/multioutput/SampleAverage/#attributes","title":"Attributes","text":"
    • bigger_is_better

      Indicate if a high value is better than a low one or not.

    • metric

      Gives access to the wrapped metric.

    • requires_labels

    • works_with_weights

      Indicate whether the model takes into consideration the effect of sample weights

    "},{"location":"api/metrics/multioutput/SampleAverage/#examples","title":"Examples","text":"

    from river import metrics\n\ny_true = [\n    {0: False, 1: True, 2: True},\n    {0: True, 1: True, 2: False}\n]\ny_pred = [\n    {0: True, 1: True, 2: True},\n    {0: True, 1: False, 2: False}\n]\n\nsample_jaccard = metrics.multioutput.SampleAverage(metrics.Jaccard())\n\nfor yt, yp in zip(y_true, y_pred):\n    sample_jaccard = sample_jaccard.update(yt, yp)\nsample_jaccard\n
    SampleAverage(Jaccard): 58.33%\n

    "},{"location":"api/metrics/multioutput/SampleAverage/#methods","title":"Methods","text":"get

    Return the current value of the metric.

    is_better_than

    Indicate if the current metric is better than another one.

    Parameters

    • other

    revert

    Revert the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    update

    Update the metric.

    Parameters

    • y_true
    • y_pred
    • sample_weight \u2014 defaults to 1.0

    works_with

    Indicates whether or not a metric can work with a given model.

    Parameters

    • model \u2014 'base.Estimator'

    "},{"location":"api/metrics/multioutput/base/MultiOutputClassificationMetric/","title":"MultiOutputClassificationMetric","text":"

    Mother class for all multi-output classification metrics.

    "},{"location":"api/metrics/multioutput/base/MultiOutputClassificationMetric/#parameters","title":"Parameters","text":"
    • cm

      Type \u2192 MultiLabelConfusionMatrix | None

      Default \u2192 None

      This parameter allows sharing the same confusion matrix between multiple metrics. Sharing a confusion matrix reduces the amount of storage and computation time.

    "},{"location":"api/metrics/multioutput/base/MultiOutputClassificationMetric/#attributes","title":"Attributes","text":"
    • bigger_is_better

      Indicate if a high value is better than a low one or not.

    • requires_labels

    • works_with_weights

      Indicate whether the model takes into consideration the effect of sample weights

    "},{"location":"api/metrics/multioutput/base/MultiOutputClassificationMetric/#methods","title":"Methods","text":"get

    Return the current value of the metric.

    is_better_than

    Indicate if the current metric is better than another one.

    Parameters

    • other

    revert

    Revert the metric.

    Parameters

    • y_true \u2014 'dict[str | int, base.typing.ClfTarget]'
    • y_pred \u2014 'dict[str | int, base.typing.ClfTarget] | dict[str | int, dict[base.typing.ClfTarget, float]]'
    • sample_weight \u2014 defaults to 1.0

    update

    Update the metric.

    Parameters

    • y_true \u2014 'dict[str | int, base.typing.ClfTarget]'
    • y_pred \u2014 'dict[str | int, base.typing.ClfTarget] | dict[str | int, dict[base.typing.ClfTarget, float]]'
    • sample_weight \u2014 defaults to 1.0

    works_with

    Indicates whether or not a metric can work with a given model.

    Parameters

    • model \u2014 'base.Estimator'

    "},{"location":"api/metrics/multioutput/base/MultiOutputRegressionMetric/","title":"MultiOutputRegressionMetric","text":"

    Mother class for all multi-output regression metrics.

    "},{"location":"api/metrics/multioutput/base/MultiOutputRegressionMetric/#attributes","title":"Attributes","text":"
    • bigger_is_better

      Indicate if a high value is better than a low one or not.

    • works_with_weights

      Indicate whether the model takes into consideration the effect of sample weights

    "},{"location":"api/metrics/multioutput/base/MultiOutputRegressionMetric/#methods","title":"Methods","text":"get

    Return the current value of the metric.

    is_better_than

    Indicate if the current metric is better than another one.

    Parameters

    • other

    revert

    Revert the metric.

    Parameters

    • y_true \u2014 'dict[str | int, float | int]'
    • y_pred \u2014 'dict[str | int, float | int]'

    update

    Update the metric.

    Parameters

    • y_true \u2014 'dict[str | int, float | int]'
    • y_pred \u2014 'dict[str | int, float | int]'

    works_with

    Indicates whether or not a metric can work with a given model.

    Parameters

    • model \u2014 'base.Estimator'

    "},{"location":"api/misc/SDFT/","title":"SDFT","text":"

    Sliding Discrete Fourier Transform (SDFT).

    Initially, the coefficients are all equal to 0, up until enough values have been seen. A call to numpy.fft.fft is triggered once window_size values have been seen. Subsequent values will update the coefficients online. This is much faster than recomputing an FFT from scratch for every new value.

    "},{"location":"api/misc/SDFT/#parameters","title":"Parameters","text":"
    • window_size

      The size of the window.

    "},{"location":"api/misc/SDFT/#attributes","title":"Attributes","text":"
    • window_size
    "},{"location":"api/misc/SDFT/#examples","title":"Examples","text":"
    import numpy as np\nfrom river import misc\n\nX = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n\nwindow_size = 5\nsdft = misc.SDFT(window_size)\n\nfor i, x in enumerate(X):\n    sdft = sdft.update(x)\n\n    if i + 1 >= window_size:\n        assert np.allclose(sdft.coefficients, np.fft.fft(X[i+1 - window_size:i+1]))\n
    "},{"location":"api/misc/SDFT/#methods","title":"Methods","text":"update
    1. Jacobsen, E. and Lyons, R., 2003. The sliding DFT. IEEE Signal Processing Magazine, 20(2), pp.74-80. \u21a9

    2. Understanding and Implementing the Sliding DFT \u21a9

    "},{"location":"api/misc/Skyline/","title":"Skyline","text":"

    A skyline is set of points which is not dominated by any other point.

    This implementation uses a block nested loop. Identical observations are all part of the skyline if applicable.

    "},{"location":"api/misc/Skyline/#parameters","title":"Parameters","text":"
    • minimize

      Type \u2192 list | None

      Default \u2192 None

      A list of features for which the values need to be minimized. Can be omitted as long as maximize is specified.

    • maximize

      Type \u2192 list | None

      Default \u2192 None

      A list of features for which the values need to be maximized. Can be omitted as long as minimize is specified.

    "},{"location":"api/misc/Skyline/#examples","title":"Examples","text":"

    Here is an example taken from this blog post.

    import random\nfrom river import misc\n\ncity_prices = {\n    'Bordeaux': 4045,\n    'Lyon': 4547,\n    'Toulouse': 3278\n}\n\ndef random_house():\n    city = random.choice(['Bordeaux', 'Lyon', 'Toulouse'])\n    size = round(random.gauss(200, 50))\n    price = round(random.uniform(0.8, 1.2) * city_prices[city] * size)\n    return {'city': city, 'size': size, 'price': price}\n\nskyline = misc.Skyline(minimize=['price'], maximize=['size'])\n\nrandom.seed(42)\n\nfor _ in range(100):\n    house = random_house()\n    skyline = skyline.update(house)\n\nprint(len(skyline))\n
    13\n

    print(skyline[0])\n
    {'city': 'Toulouse', 'size': 280, 'price': 763202}\n

    Here is another example using the kart data from Mario Kart: Double Dash!!.

    import collections\nfrom river import misc\n\nKart = collections.namedtuple(\n     'Kart',\n     'name speed off_road acceleration weight turbo'\n)\n\nkarts = [\n    Kart('Red Fire', 5, 4, 4, 5, 2),\n    Kart('Green Fire', 7, 3, 3, 4, 2),\n    Kart('Heart Coach', 4, 6, 6, 5, 2),\n    Kart('Bloom Coach', 6, 4, 5, 3, 2),\n    Kart('Turbo Yoshi', 4, 5, 6, 6, 2),\n    Kart('Turbo Birdo', 6, 4, 4, 7, 2),\n    Kart('Goo-Goo Buggy', 1, 9, 9, 2, 3),\n    Kart('Rattle Buggy', 2, 9, 8, 2, 3),\n    Kart('Toad Kart', 3, 9, 7, 2, 3),\n    Kart('Toadette Kart', 1, 9, 9, 2, 3),\n    Kart('Koopa Dasher', 2, 8, 8, 3, 3),\n    Kart('Para-Wing', 1, 8, 9, 3, 3),\n    Kart('DK Jumbo', 8, 2, 2, 8, 1),\n    Kart('Barrel Train', 8, 7, 3, 5, 3),\n    Kart('Koopa King', 9, 1, 1, 9, 1),\n    Kart('Bullet Blaster', 8, 1, 4, 1, 3),\n    Kart('Wario Car', 7, 3, 3, 7, 1),\n    Kart('Waluigi Racer', 5, 9, 5, 6, 2),\n    Kart('Piranha Pipes', 8, 7, 2, 9, 1),\n    Kart('Boo Pipes', 2, 9, 8, 9, 1),\n    Kart('Parade Kart', 7, 3, 4, 7, 3)\n]\n\nskyline = misc.Skyline(\n    maximize=['speed', 'off_road', 'acceleration', 'turbo'],\n    minimize=['weight']\n)\n\nfor kart in karts:\n    skyline = skyline.update(kart._asdict())\n\nbest_cart_names = [kart['name'] for kart in skyline]\nfor name in best_cart_names:\n    print(f'- {name}')\n
    - Green Fire\n- Heart Coach\n- Bloom Coach\n- Goo-Goo Buggy\n- Rattle Buggy\n- Toad Kart\n- Toadette Kart\n- Barrel Train\n- Koopa King\n- Bullet Blaster\n- Waluigi Racer\n- Parade Kart\n

    for name in sorted(set(kart.name for kart in karts) - set(best_cart_names)):\n    print(f'- {name}')\n
    - Boo Pipes\n- DK Jumbo\n- Koopa Dasher\n- Para-Wing\n- Piranha Pipes\n- Red Fire\n- Turbo Birdo\n- Turbo Yoshi\n- Wario Car\n

    "},{"location":"api/misc/Skyline/#methods","title":"Methods","text":"
    1. Skyline queries in Python \u21a9

    2. Borzsony, S., Kossmann, D. and Stocker, K., 2001, April. The skyline operator. In Proceedings 17th international conference on data engineering (pp. 421-430). IEEE. \u21a9

    3. Tao, Y. and Papadias, D., 2006. Maintaining sliding window skylines on data streams. IEEE Transactions on Knowledge and Data Engineering, 18(3), pp.377-391. \u21a9

    "},{"location":"api/model-selection/BanditClassifier/","title":"BanditClassifier","text":"

    Bandit-based model selection for classification.

    Each model is associated with an arm. At each learn_one call, the policy decides which arm/model to pull. The reward is the performance of the model on the provided sample. The predict_one and predict_proba_one methods use the current best model.

    "},{"location":"api/model-selection/BanditClassifier/#parameters","title":"Parameters","text":"
    • models

      The models to select from.

    • metric

      Type \u2192 metrics.base.ClassificationMetric

      The metric that is used to measure the performance of each model.

    • policy

      Type \u2192 bandit.base.Policy

      The bandit policy to use.

    "},{"location":"api/model-selection/BanditClassifier/#attributes","title":"Attributes","text":"
    • best_model

    • models

    "},{"location":"api/model-selection/BanditClassifier/#examples","title":"Examples","text":"

    from river import bandit\nfrom river import datasets\nfrom river import evaluate\nfrom river import linear_model\nfrom river import metrics\nfrom river import model_selection\nfrom river import optim\nfrom river import preprocessing\n\nmodels = [\n    linear_model.LogisticRegression(optimizer=optim.SGD(lr=lr))\n    for lr in [0.0001, 0.001, 1e-05, 0.01]\n]\n\ndataset = datasets.Phishing()\nmodel = (\n    preprocessing.StandardScaler() |\n    model_selection.BanditClassifier(\n        models,\n        metric=metrics.Accuracy(),\n        policy=bandit.EpsilonGreedy(\n            epsilon=0.1,\n            decay=0.001,\n            burn_in=20,\n            seed=42\n        )\n    )\n)\nmetric = metrics.Accuracy()\n\nevaluate.progressive_val_score(dataset, model, metric)\n
    Accuracy: 88.96%\n

    "},{"location":"api/model-selection/BanditClassifier/#methods","title":"Methods","text":"learn_one

    Update the model with a set of features x and a label y.

    Parameters

    • x
    • y

    Returns

    self

    predict_one

    Predict the label of a set of features x.

    Parameters

    • x \u2014 'dict'
    • kwargs

    Returns

    base.typing.ClfTarget | None: The predicted label.

    predict_proba_one

    Predict the probability of each label for a dictionary of features x.

    Parameters

    • x

    Returns

    A dictionary that associates a probability which each label.

    "},{"location":"api/model-selection/BanditRegressor/","title":"BanditRegressor","text":"

    Bandit-based model selection for regression.

    Each model is associated with an arm. At each learn_one call, the policy decides which arm/model to pull. The reward is the performance of the model on the provided sample. The predict_one method uses the current best model.

    "},{"location":"api/model-selection/BanditRegressor/#parameters","title":"Parameters","text":"
    • models

      The models to select from.

    • metric

      Type \u2192 metrics.base.RegressionMetric

      The metric that is used to measure the performance of each model.

    • policy

      Type \u2192 bandit.base.Policy

      The bandit policy to use.

    "},{"location":"api/model-selection/BanditRegressor/#attributes","title":"Attributes","text":"
    • best_model

    • models

    "},{"location":"api/model-selection/BanditRegressor/#examples","title":"Examples","text":"

    from river import bandit\nfrom river import datasets\nfrom river import evaluate\nfrom river import linear_model\nfrom river import metrics\nfrom river import model_selection\nfrom river import optim\nfrom river import preprocessing\n\nmodels = [\n    linear_model.LinearRegression(optimizer=optim.SGD(lr=lr))\n    for lr in [0.0001, 0.001, 1e-05, 0.01]\n]\n\ndataset = datasets.TrumpApproval()\nmodel = (\n    preprocessing.StandardScaler() |\n    model_selection.BanditRegressor(\n        models,\n        metric=metrics.MAE(),\n        policy=bandit.EpsilonGreedy(\n            epsilon=0.1,\n            decay=0.001,\n            burn_in=100,\n            seed=42\n        )\n    )\n)\nmetric = metrics.MAE()\n\nevaluate.progressive_val_score(dataset, model, metric)\n
    MAE: 3.134089\n

    Here's another example using the UCB policy. The latter is more sensitive to the target scale, and usually works better when the target is rescaled.

    models = [\n    linear_model.LinearRegression(optimizer=optim.SGD(lr=lr))\n    for lr in [0.0001, 0.001, 1e-05, 0.01]\n]\n\nmodel = (\n    preprocessing.StandardScaler() |\n    preprocessing.TargetStandardScaler(\n        model_selection.BanditRegressor(\n            models,\n            metric=metrics.MAE(),\n            policy=bandit.UCB(\n                delta=1,\n                burn_in=100\n            )\n        )\n    )\n)\nmetric = metrics.MAE()\n\nevaluate.progressive_val_score(dataset, model, metric)\n
    MAE: 0.875333\n

    "},{"location":"api/model-selection/BanditRegressor/#methods","title":"Methods","text":"learn_one

    Fits to a set of features x and a real-valued target y.

    Parameters

    • x
    • y

    Returns

    self

    predict_one

    Predict the output of features x.

    Parameters

    • x

    Returns

    The prediction.

    "},{"location":"api/model-selection/GreedyRegressor/","title":"GreedyRegressor","text":"

    Greedy selection regressor.

    This selection method simply updates each model at each time step. The current best model is used to make predictions. It's greedy in the sense that updating each model can be costly. On the other hand, bandit-like algorithms are more temperate in that only update a subset of the models at each step.

    "},{"location":"api/model-selection/GreedyRegressor/#parameters","title":"Parameters","text":"
    • models

      Type \u2192 list[base.Regressor]

      The models to select from.

    • metric

      Type \u2192 metrics.base.RegressionMetric | None

      Default \u2192 None

      The metric that is used to measure the performance of each model.

    "},{"location":"api/model-selection/GreedyRegressor/#attributes","title":"Attributes","text":"
    • best_model

      The current best model.

    • models

    "},{"location":"api/model-selection/GreedyRegressor/#examples","title":"Examples","text":"

    from river import datasets\nfrom river import evaluate\nfrom river import linear_model\nfrom river import metrics\nfrom river import model_selection\nfrom river import optim\nfrom river import preprocessing\n\nmodels = [\n    linear_model.LinearRegression(optimizer=optim.SGD(lr=lr))\n    for lr in [1e-5, 1e-4, 1e-3, 1e-2]\n]\n\ndataset = datasets.TrumpApproval()\nmetric = metrics.MAE()\nmodel = (\n    preprocessing.StandardScaler() |\n    model_selection.GreedyRegressor(models, metric)\n)\n\nevaluate.progressive_val_score(dataset, model, metric)\n
    MAE: 1.319678\n

    "},{"location":"api/model-selection/GreedyRegressor/#methods","title":"Methods","text":"learn_one

    Fits to a set of features x and a real-valued target y.

    Parameters

    • x \u2014 'dict'
    • y \u2014 'base.typing.RegTarget'

    Returns

    Regressor: self

    predict_one

    Predict the output of features x.

    Parameters

    • x

    Returns

    The prediction.

    "},{"location":"api/model-selection/SuccessiveHalvingClassifier/","title":"SuccessiveHalvingClassifier","text":"

    Successive halving algorithm for classification.

    Successive halving is a method for performing model selection without having to train each model on all the dataset. At certain points in time (called \"rungs\"), the worst performing will be discarded and the best ones will keep competing between each other. The rung values are designed so that at most budget model updates will be performed in total.

    If you have k combinations of hyperparameters and that your dataset contains n observations, then the maximal budget you can allocate is:

    \\[\\frac{2kn}{eta}\\]

    It is recommended that you check this beforehand. This bound can't be checked by the function because the size of the dataset is not known. In fact it is potentially infinite, in which case the algorithm will terminate once all the budget has been spent.

    If you have a budget of B, and that your dataset contains n observations, then the number of hyperparameter combinations that will spend all the budget and go through all the data is:

    \\[\\left\\lceil\\left\\lfloor\\frac{B}{2n}\\right\\rfloor \\times eta \\right\\rceil\\]"},{"location":"api/model-selection/SuccessiveHalvingClassifier/#parameters","title":"Parameters","text":"
    • models

      The models to compare.

    • metric

      Type \u2192 metrics.base.Metric

      Metric used for comparing models with.

    • budget

      Type \u2192 int

      Total number of model updates you wish to allocate.

    • eta

      Default \u2192 2

      Rate of elimination. At every rung, math.ceil(k / eta) models are kept, where k is the number of models that have reached the rung. A higher eta value will focus on less models but will allocate more iterations to the best models.

    • verbose

      Default \u2192 False

      Whether to display progress or not.

    • print_kwargs

      Extra keyword arguments are passed to the print function. For instance, this allows providing a file argument, which indicates where to output progress.

    "},{"location":"api/model-selection/SuccessiveHalvingClassifier/#attributes","title":"Attributes","text":"
    • best_model

      The current best model.

    • models

    "},{"location":"api/model-selection/SuccessiveHalvingClassifier/#examples","title":"Examples","text":"

    As an example, let's use successive halving to tune the optimizer of a logistic regression. We'll first define the model.

    from river import linear_model\nfrom river import preprocessing\n\nmodel = (\n    preprocessing.StandardScaler() |\n    linear_model.LogisticRegression()\n)\n

    Let's now define a grid of parameters which we would like to compare. We'll try different optimizers with various learning rates.

    from river import utils\nfrom river import optim\n\nmodels = utils.expand_param_grid(model, {\n    'LogisticRegression': {\n        'optimizer': [\n            (optim.SGD, {'lr': [.1, .01, .005]}),\n            (optim.Adam, {'beta_1': [.01, .001], 'lr': [.1, .01, .001]}),\n            (optim.Adam, {'beta_1': [.1], 'lr': [.001]}),\n        ]\n    }\n})\n

    We can check how many models we've created.

    len(models)\n
    10\n

    We can now pass these models to a SuccessiveHalvingClassifier. We also need to pick a metric to compare the models, and a budget which indicates how many iterations to run before picking the best model and discarding the rest.

    from river import model_selection\n\nsh = model_selection.SuccessiveHalvingClassifier(\n    models,\n    metric=metrics.Accuracy(),\n    budget=2000,\n    eta=2,\n    verbose=True\n)\n

    A SuccessiveHalvingClassifier is also a classifier with a learn_one and a predict_proba_one method. We can therefore evaluate it like any other classifier with evaluate.progressive_val_score.

    from river import datasets\nfrom river import evaluate\nfrom river import metrics\n\nevaluate.progressive_val_score(\n    dataset=datasets.Phishing(),\n    model=sh,\n    metric=metrics.ROCAUC()\n)\n
    [1] 5 removed       5 left  50 iterations   budget used: 500        budget left: 1500       best Accuracy: 80.00%\n[2] 2 removed       3 left  100 iterations  budget used: 1000       budget left: 1000       best Accuracy: 84.00%\n[3] 1 removed       2 left  166 iterations  budget used: 1498       budget left: 502        best Accuracy: 86.14%\n[4] 1 removed       1 left  250 iterations  budget used: 1998       budget left: 2  best Accuracy: 84.80%\nROCAUC: 95.22%\n

    We can now view the best model.

    sh.best_model\n
    Pipeline (\n  StandardScaler (\n    with_std=True\n  ),\n  LogisticRegression (\n    optimizer=Adam (\n      lr=Constant (\n        learning_rate=0.01\n      )\n      beta_1=0.01\n      beta_2=0.999\n      eps=1e-08\n    )\n    loss=Log (\n      weight_pos=1.\n      weight_neg=1.\n    )\n    l2=0.\n    l1=0.\n    intercept_init=0.\n    intercept_lr=Constant (\n      learning_rate=0.01\n    )\n    clip_gradient=1e+12\n    initializer=Zeros ()\n  )\n)\n

    "},{"location":"api/model-selection/SuccessiveHalvingClassifier/#methods","title":"Methods","text":"learn_one

    Update the model with a set of features x and a label y.

    Parameters

    • x \u2014 'dict'
    • y \u2014 'base.typing.ClfTarget'

    Returns

    Classifier: self

    predict_one

    Predict the label of a set of features x.

    Parameters

    • x \u2014 'dict'
    • kwargs

    Returns

    base.typing.ClfTarget | None: The predicted label.

    predict_proba_one

    Predict the probability of each label for a dictionary of features x.

    Parameters

    • x

    Returns

    A dictionary that associates a probability which each label.

    1. Jamieson, K. and Talwalkar, A., 2016, May. Non-stochastic best arm identification and hyperparameter optimization. In Artificial Intelligence and Statistics (pp. 240-248). \u21a9

    2. Li, L., Jamieson, K., Rostamizadeh, A., Gonina, E., Hardt, M., Recht, B. and Talwalkar, A., 2018. Massively parallel hyperparameter tuning. arXiv preprint arXiv:1810.05934. \u21a9

    3. Li, L., Jamieson, K., DeSalvo, G., Rostamizadeh, A. and Talwalkar, A., 2017. Hyperband: A novel bandit-based approach to hyperparameter optimization. The Journal of Machine Learning Research, 18(1), pp.6765-6816. \u21a9

    "},{"location":"api/model-selection/SuccessiveHalvingRegressor/","title":"SuccessiveHalvingRegressor","text":"

    Successive halving algorithm for regression.

    Successive halving is a method for performing model selection without having to train each model on all the dataset. At certain points in time (called \"rungs\"), the worst performing will be discarded and the best ones will keep competing between each other. The rung values are designed so that at most budget model updates will be performed in total.

    If you have k combinations of hyperparameters and that your dataset contains n observations, then the maximal budget you can allocate is:

    \\[\\frac{2kn}{eta}\\]

    It is recommended that you check this beforehand. This bound can't be checked by the function because the size of the dataset is not known. In fact it is potentially infinite, in which case the algorithm will terminate once all the budget has been spent.

    If you have a budget of B, and that your dataset contains n observations, then the number of hyperparameter combinations that will spend all the budget and go through all the data is:

    \\[\\left\\lceil\\left\\lfloor\\frac{B}{2n}\\right\\rfloor \\times eta \\right\\rceil\\]"},{"location":"api/model-selection/SuccessiveHalvingRegressor/#parameters","title":"Parameters","text":"
    • models

      The models to compare.

    • metric

      Type \u2192 metrics.base.Metric

      Metric used for comparing models with.

    • budget

      Type \u2192 int

      Total number of model updates you wish to allocate.

    • eta

      Default \u2192 2

      Rate of elimination. At every rung, math.ceil(k / eta) models are kept, where k is the number of models that have reached the rung. A higher eta value will focus on less models but will allocate more iterations to the best models.

    • verbose

      Default \u2192 False

      Whether to display progress or not.

    • print_kwargs

      Extra keyword arguments are passed to the print function. For instance, this allows providing a file argument, which indicates where to output progress.

    "},{"location":"api/model-selection/SuccessiveHalvingRegressor/#attributes","title":"Attributes","text":"
    • best_model

      The current best model.

    • models

    "},{"location":"api/model-selection/SuccessiveHalvingRegressor/#examples","title":"Examples","text":"

    As an example, let's use successive halving to tune the optimizer of a linear regression. We'll first define the model.

    from river import linear_model\nfrom river import preprocessing\n\nmodel = (\n    preprocessing.StandardScaler() |\n    linear_model.LinearRegression(intercept_lr=.1)\n)\n

    Let's now define a grid of parameters which we would like to compare. We'll try different optimizers with various learning rates.

    from river import optim\nfrom river import utils\n\nmodels = utils.expand_param_grid(model, {\n    'LinearRegression': {\n        'optimizer': [\n            (optim.SGD, {'lr': [.1, .01, .005]}),\n            (optim.Adam, {'beta_1': [.01, .001], 'lr': [.1, .01, .001]}),\n            (optim.Adam, {'beta_1': [.1], 'lr': [.001]}),\n        ]\n    }\n})\n

    We can check how many models we've created.

    len(models)\n
    10\n

    We can now pass these models to a SuccessiveHalvingRegressor. We also need to pick a metric to compare the models, and a budget which indicates how many iterations to run before picking the best model and discarding the rest.

    from river import model_selection\n\nsh = model_selection.SuccessiveHalvingRegressor(\n    models,\n    metric=metrics.MAE(),\n    budget=2000,\n    eta=2,\n    verbose=True\n)\n

    A SuccessiveHalvingRegressor is also a regressor with a learn_one and a predict_one method. We can therefore evaluate it like any other classifier with evaluate.progressive_val_score.

    from river import datasets\nfrom river import evaluate\nfrom river import metrics\n\nevaluate.progressive_val_score(\n    dataset=datasets.TrumpApproval(),\n    model=sh,\n    metric=metrics.MAE()\n)\n
    [1] 5 removed       5 left  50 iterations   budget used: 500        budget left: 1500       best MAE: 4.419643\n[2] 2 removed       3 left  100 iterations  budget used: 1000       budget left: 1000       best MAE: 2.392266\n[3] 1 removed       2 left  166 iterations  budget used: 1498       budget left: 502        best MAE: 1.541383\n[4] 1 removed       1 left  250 iterations  budget used: 1998       budget left: 2  best MAE: 1.112122\nMAE: 0.490688\n

    We can now view the best model.

    sh.best_model\n
    Pipeline (\n  StandardScaler (\n    with_std=True\n  ),\n  LinearRegression (\n    optimizer=Adam (\n      lr=Constant (\n        learning_rate=0.1\n      )\n      beta_1=0.01\n      beta_2=0.999\n      eps=1e-08\n    )\n    loss=Squared ()\n    l2=0.\n    l1=0.\n    intercept_init=0.\n    intercept_lr=Constant (\n      learning_rate=0.1\n    )\n    clip_gradient=1e+12\n    initializer=Zeros ()\n  )\n)\n

    "},{"location":"api/model-selection/SuccessiveHalvingRegressor/#methods","title":"Methods","text":"learn_one

    Fits to a set of features x and a real-valued target y.

    Parameters

    • x \u2014 'dict'
    • y \u2014 'base.typing.RegTarget'

    Returns

    Regressor: self

    predict_one

    Predict the output of features x.

    Parameters

    • x

    Returns

    The prediction.

    1. Jamieson, K. and Talwalkar, A., 2016, May. Non-stochastic best arm identification and hyperparameter optimization. In Artificial Intelligence and Statistics (pp. 240-248). \u21a9

    2. Li, L., Jamieson, K., Rostamizadeh, A., Gonina, E., Hardt, M., Recht, B. and Talwalkar, A., 2018. Massively parallel hyperparameter tuning. arXiv preprint arXiv:1810.05934. \u21a9

    3. Li, L., Jamieson, K., DeSalvo, G., Rostamizadeh, A. and Talwalkar, A., 2017. Hyperband: A novel bandit-based approach to hyperparameter optimization. The Journal of Machine Learning Research, 18(1), pp.6765-6816. \u21a9

    "},{"location":"api/model-selection/base/ModelSelectionClassifier/","title":"ModelSelectionClassifier","text":"

    A model selector for classification.

    "},{"location":"api/model-selection/base/ModelSelectionClassifier/#parameters","title":"Parameters","text":"
    • models

      Type \u2192 Iterator[base.Estimator]

    • metric

      Type \u2192 metrics.base.Metric

    "},{"location":"api/model-selection/base/ModelSelectionClassifier/#attributes","title":"Attributes","text":"
    • best_model

      The current best model.

    • models

    "},{"location":"api/model-selection/base/ModelSelectionClassifier/#methods","title":"Methods","text":"learn_one

    Update the model with a set of features x and a label y.

    Parameters

    • x \u2014 'dict'
    • y \u2014 'base.typing.ClfTarget'

    Returns

    Classifier: self

    predict_one

    Predict the label of a set of features x.

    Parameters

    • x \u2014 'dict'
    • kwargs

    Returns

    base.typing.ClfTarget | None: The predicted label.

    predict_proba_one

    Predict the probability of each label for a dictionary of features x.

    Parameters

    • x

    Returns

    A dictionary that associates a probability which each label.

    "},{"location":"api/model-selection/base/ModelSelectionRegressor/","title":"ModelSelectionRegressor","text":"

    A model selector for regression.

    "},{"location":"api/model-selection/base/ModelSelectionRegressor/#parameters","title":"Parameters","text":"
    • models

      Type \u2192 Iterator[base.Estimator]

    • metric

      Type \u2192 metrics.base.Metric

    "},{"location":"api/model-selection/base/ModelSelectionRegressor/#attributes","title":"Attributes","text":"
    • best_model

      The current best model.

    • models

    "},{"location":"api/model-selection/base/ModelSelectionRegressor/#methods","title":"Methods","text":"learn_one

    Fits to a set of features x and a real-valued target y.

    Parameters

    • x \u2014 'dict'
    • y \u2014 'base.typing.RegTarget'

    Returns

    Regressor: self

    predict_one

    Predict the output of features x.

    Parameters

    • x

    Returns

    The prediction.

    "},{"location":"api/multiclass/OneVsOneClassifier/","title":"OneVsOneClassifier","text":"

    One-vs-One (OvO) multiclass strategy.

    This strategy consists in fitting one binary classifier for each pair of classes. Because we are in a streaming context, the number of classes isn't known from the start, hence new classifiers are instantiated on the fly.

    The number of classifiers is k * (k - 1) / 2, where k is the number of classes. However, each call to learn_one only requires training k - 1 models. Indeed, only the models that pertain to the given label have to be trained. Meanwhile, making a prediction requires going through each and every model.

    "},{"location":"api/multiclass/OneVsOneClassifier/#parameters","title":"Parameters","text":"
    • classifier

      A binary classifier, although a multi-class classifier will work too.

    "},{"location":"api/multiclass/OneVsOneClassifier/#attributes","title":"Attributes","text":"
    • classifiers (dict)

      A mapping between pairs of classes and classifiers. The keys are tuples which contain a pair of classes. Each pair is sorted in lexicographical order.

    "},{"location":"api/multiclass/OneVsOneClassifier/#examples","title":"Examples","text":"

    from river import datasets\nfrom river import evaluate\nfrom river import linear_model\nfrom river import metrics\nfrom river import multiclass\nfrom river import preprocessing\n\ndataset = datasets.ImageSegments()\n\nscaler = preprocessing.StandardScaler()\novo = multiclass.OneVsOneClassifier(linear_model.LogisticRegression())\nmodel = scaler | ovo\n\nmetric = metrics.MacroF1()\n\nevaluate.progressive_val_score(dataset, model, metric)\n
    MacroF1: 80.76%\n

    "},{"location":"api/multiclass/OneVsOneClassifier/#methods","title":"Methods","text":"learn_one

    Update the model with a set of features x and a label y.

    Parameters

    • x
    • y
    • kwargs

    Returns

    self

    predict_one

    Predict the label of a set of features x.

    Parameters

    • x
    • kwargs

    Returns

    The predicted label.

    predict_proba_one

    Predict the probability of each label for a dictionary of features x.

    Parameters

    • x \u2014 'dict'

    Returns

    dict[base.typing.ClfTarget, float]: A dictionary that associates a probability which each label.

    "},{"location":"api/multiclass/OneVsRestClassifier/","title":"OneVsRestClassifier","text":"

    One-vs-the-rest (OvR) multiclass strategy.

    This strategy consists in fitting one binary classifier per class. Because we are in a streaming context, the number of classes isn't known from the start. Hence, new classifiers are instantiated on the fly. Likewise, the predicted probabilities will only include the classes seen up to a given point in time.

    Note that this classifier supports mini-batches as well as single instances.

    The computational complexity for both learning and predicting grows linearly with the number of classes. If you have a very large number of classes, then you might want to consider using an multiclass.OutputCodeClassifier instead.

    "},{"location":"api/multiclass/OneVsRestClassifier/#parameters","title":"Parameters","text":"
    • classifier

      Type \u2192 base.Classifier

      A binary classifier, although a multi-class classifier will work too.

    "},{"location":"api/multiclass/OneVsRestClassifier/#attributes","title":"Attributes","text":"
    • classifiers (dict)

      A mapping between classes and classifiers.

    "},{"location":"api/multiclass/OneVsRestClassifier/#examples","title":"Examples","text":"

    from river import datasets\nfrom river import evaluate\nfrom river import linear_model\nfrom river import metrics\nfrom river import multiclass\nfrom river import preprocessing\n\ndataset = datasets.ImageSegments()\n\nscaler = preprocessing.StandardScaler()\novr = multiclass.OneVsRestClassifier(linear_model.LogisticRegression())\nmodel = scaler | ovr\n\nmetric = metrics.MacroF1()\n\nevaluate.progressive_val_score(dataset, model, metric)\n
    MacroF1: 77.46%\n

    This estimator also also supports mini-batching.

    for X in pd.read_csv(dataset.path, chunksize=64):\n    y = X.pop('category')\n    y_pred = model.predict_many(X)\n    model = model.learn_many(X, y)\n
    "},{"location":"api/multiclass/OneVsRestClassifier/#methods","title":"Methods","text":"learn_many learn_one

    Update the model with a set of features x and a label y.

    Parameters

    • x
    • y
    • kwargs

    Returns

    self

    predict_many predict_one

    Predict the label of a set of features x.

    Parameters

    • x \u2014 'dict'
    • kwargs

    Returns

    base.typing.ClfTarget | None: The predicted label.

    predict_proba_many predict_proba_one

    Predict the probability of each label for a dictionary of features x.

    Parameters

    • x
    • kwargs

    Returns

    A dictionary that associates a probability which each label.

    "},{"location":"api/multiclass/OutputCodeClassifier/","title":"OutputCodeClassifier","text":"

    Output-code multiclass strategy.

    This also referred to as \"error-correcting output codes\".

    This class allows to learn a multi-class classification problem with a binary classifier. Each class is converted to a code of 0s and 1s. The length of the code is called the code size. A copy of the classifier made for code. The codes associated with the classes are stored in a code book.

    When a new sample arrives, the label's code is retrieved from the code book. Then, each classifier is trained on the relevant part of code, which is either a 0 or a 1.

    For predicting, each classifier outputs a probability. These are then compared to each code in the code book, and the label which is the \"closest\" is chosen as the most likely class. Closeness is determined in terms of Manhattan distance.

    One specificity of online learning is that we don't how many classes there are initially. Therefore, a random procedure generates random codes on the fly whenever a previously unseed label appears.

    "},{"location":"api/multiclass/OutputCodeClassifier/#parameters","title":"Parameters","text":"
    • classifier

      Type \u2192 base.Classifier

      A binary classifier, although a multi-class classifier will work too.

    • code_size

      Type \u2192 int

      The code size, which dictates how many copies of the provided classifiers to train. Must be strictly positive.

    • coding_method

      Type \u2192 str

      Default \u2192 random

      The method used to generate the codes. Can be either 'exact' or 'random'. The 'exact' method generates all possible codes of a given size in memory, and streams them in a random order. The 'random' method generates random codes of a given size on the fly. The 'exact' method necessarily generates different codes for each class, but requires more memory. The 'random' method can generate duplicate codes for different classes, but requires less memory.

    • seed

      Type \u2192 int | None

      Default \u2192 None

      A random seed number that can be set for reproducibility.

    "},{"location":"api/multiclass/OutputCodeClassifier/#examples","title":"Examples","text":"

    from river import datasets\nfrom river import evaluate\nfrom river import linear_model\nfrom river import metrics\nfrom river import multiclass\nfrom river import preprocessing\n\ndataset = datasets.ImageSegments()\n\nscaler = preprocessing.StandardScaler()\nooc = multiclass.OutputCodeClassifier(\n    classifier=linear_model.LogisticRegression(),\n    code_size=10,\n    coding_method='random',\n    seed=1\n)\nmodel = scaler | ooc\n\nmetric = metrics.MacroF1()\n\nevaluate.progressive_val_score(dataset, model, metric)\n
    MacroF1: 79.58%\n

    "},{"location":"api/multiclass/OutputCodeClassifier/#methods","title":"Methods","text":"learn_one

    Update the model with a set of features x and a label y.

    Parameters

    • x
    • y
    • kwargs

    Returns

    self

    predict_one

    Predict the label of a set of features x.

    Parameters

    • x
    • kwargs

    Returns

    The predicted label.

    predict_proba_one

    Predict the probability of each label for a dictionary of features x.

    Parameters

    • x \u2014 'dict'

    Returns

    dict[base.typing.ClfTarget, float]: A dictionary that associates a probability which each label.

    1. Dietterich, T.G. and Bakiri, G., 1994. Solving multiclass learning problems via error-correcting output codes. Journal of artificial intelligence research, 2, pp.263-286. \u21a9

    2. James, G. and Hastie, T., 1998. The error coding method and PICTs. Journal of Computational and Graphical statistics, 7(3), pp.377-387. \u21a9

    "},{"location":"api/multioutput/ClassifierChain/","title":"ClassifierChain","text":"

    A multi-output model that arranges classifiers into a chain.

    This will create one model per output. The prediction of the first output will be used as a feature in the second model. The prediction for the second output will be used as a feature for the third model, etc. This \"chain model\" is therefore capable of capturing dependencies between outputs.

    "},{"location":"api/multioutput/ClassifierChain/#parameters","title":"Parameters","text":"
    • model

      Type \u2192 base.Classifier

      A classifier model used for each label.

    • order

      Type \u2192 list | None

      Default \u2192 None

      A list with the targets order in which to construct the chain. If None then the order will be inferred from the order of the keys in the target.

    "},{"location":"api/multioutput/ClassifierChain/#examples","title":"Examples","text":"

    from river import feature_selection\nfrom river import linear_model\nfrom river import metrics\nfrom river import multioutput\nfrom river import preprocessing\nfrom river import stream\nfrom sklearn import datasets\n\ndataset = stream.iter_sklearn_dataset(\n    dataset=datasets.fetch_openml('yeast', version=4, parser='auto', as_frame=False),\n    shuffle=True,\n    seed=42\n)\n\nmodel = feature_selection.VarianceThreshold(threshold=0.01)\nmodel |= preprocessing.StandardScaler()\nmodel |= multioutput.ClassifierChain(\n    model=linear_model.LogisticRegression(),\n    order=list(range(14))\n)\n\nmetric = metrics.multioutput.MicroAverage(metrics.Jaccard())\n\nfor x, y in dataset:\n    # Convert y values to booleans\n    y = {i: yi == 'TRUE' for i, yi in y.items()}\n    y_pred = model.predict_one(x)\n    metric = metric.update(y, y_pred)\n    model = model.learn_one(x, y)\n\nmetric\n
    MicroAverage(Jaccard): 41.81%\n

    "},{"location":"api/multioutput/ClassifierChain/#methods","title":"Methods","text":"learn_one

    Update the model with a set of features x and the labels y.

    Parameters

    • x
    • y
    • kwargs

    Returns

    self

    predict_one

    Predict the labels of a set of features x.

    Parameters

    • x \u2014 'dict'
    • kwargs

    Returns

    dict[FeatureName, bool]: The predicted labels.

    predict_proba_one

    Predict the probability of each label appearing given dictionary of features x.

    Parameters

    • x
    • kwargs

    Returns

    A dictionary that associates a probability which each label.

    1. Multi-Output Chain Models and their Application in Data Streams \u21a9

    "},{"location":"api/multioutput/MonteCarloClassifierChain/","title":"MonteCarloClassifierChain","text":"

    Monte Carlo Sampling Classifier Chains.

    Probabilistic Classifier Chains using Monte Carlo sampling, as described in 1.

    m samples are taken from the posterior distribution. Therefore we need a probabilistic interpretation of the output, and thus, this is a particular variety of ProbabilisticClassifierChain.

    "},{"location":"api/multioutput/MonteCarloClassifierChain/#parameters","title":"Parameters","text":"
    • model

      Type \u2192 base.Classifier

    • m

      Type \u2192 int

      Default \u2192 10

      Number of samples to take from the posterior distribution.

    • seed

      Type \u2192 int | None

      Default \u2192 None

      Random number generator seed for reproducibility.

    "},{"location":"api/multioutput/MonteCarloClassifierChain/#examples","title":"Examples","text":"

    from river import feature_selection\nfrom river import linear_model\nfrom river import metrics\nfrom river import multioutput\nfrom river import preprocessing\nfrom river.datasets import synth\n\ndataset = synth.Logical(seed=42, n_tiles=100)\n\nmodel = multioutput.MonteCarloClassifierChain(\n    model=linear_model.LogisticRegression(),\n    m=10,\n    seed=42\n)\n\nmetric = metrics.multioutput.MicroAverage(metrics.Jaccard())\n\nfor x, y in dataset:\n   y_pred = model.predict_one(x)\n   y_pred = {k: y_pred.get(k, 0) for k in y}\n   metric = metric.update(y, y_pred)\n   model = model.learn_one(x, y)\n\nmetric\n
    MicroAverage(Jaccard): 51.79%\n

    "},{"location":"api/multioutput/MonteCarloClassifierChain/#methods","title":"Methods","text":"learn_one

    Update the model with a set of features x and the labels y.

    Parameters

    • x
    • y
    • kwargs

    Returns

    self

    predict_one

    Predict the labels of a set of features x.

    Parameters

    • x \u2014 'dict'
    • kwargs

    Returns

    dict[FeatureName, bool]: The predicted labels.

    predict_proba_one

    Predict the probability of each label appearing given dictionary of features x.

    Parameters

    • x
    • kwargs

    Returns

    A dictionary that associates a probability which each label.

    1. Read, J., Martino, L., & Luengo, D. (2014). Efficient monte carlo methods for multi-dimensional learning with classifier chains. Pattern Recognition, 47(3), 1535-1546.\u00a0\u21a9

    "},{"location":"api/multioutput/MultiClassEncoder/","title":"MultiClassEncoder","text":"

    Convert a multi-label task into multiclass.

    Assigns a class to each unique combination of labels, and proceeds with training the supplied multi-class classifier.

    The transformation is done by converting the label set, which could be seen as a binary number, into an integer representing a class. At prediction time, the predicted integer is converted back to a binary number which is the predicted label set.

    "},{"location":"api/multioutput/MultiClassEncoder/#parameters","title":"Parameters","text":"
    • model

      Type \u2192 base.Classifier

      The classifier used for learning.

    "},{"location":"api/multioutput/MultiClassEncoder/#examples","title":"Examples","text":"

    from river import forest\nfrom river import metrics\nfrom river import multioutput\nfrom river.datasets import synth\n\ndataset = synth.Logical(seed=42, n_tiles=100)\n\nmodel = multioutput.MultiClassEncoder(\n    model=forest.ARFClassifier(seed=7)\n)\n\nmetric = metrics.multioutput.MicroAverage(metrics.Jaccard())\n\nfor x, y in dataset:\n   y_pred = model.predict_one(x)\n   y_pred = {k: y_pred.get(k, 0) for k in y}\n   metric = metric.update(y, y_pred)\n   model = model.learn_one(x, y)\n\nmetric\n
    MicroAverage(Jaccard): 95.10%\n

    "},{"location":"api/multioutput/MultiClassEncoder/#methods","title":"Methods","text":"learn_one

    Update the model with a set of features x and the labels y.

    Parameters

    • x \u2014 'dict'
    • y \u2014 'dict[FeatureName, bool]'

    Returns

    MultiLabelClassifier: self

    predict_one

    Predict the labels of a set of features x.

    Parameters

    • x \u2014 'dict'
    • kwargs

    Returns

    dict[FeatureName, bool]: The predicted labels.

    predict_proba_one

    Predict the probability of each label appearing given dictionary of features x.

    Parameters

    • x \u2014 'dict'
    • kwargs

    Returns

    dict[FeatureName, dict[bool, float]]: A dictionary that associates a probability which each label.

    "},{"location":"api/multioutput/ProbabilisticClassifierChain/","title":"ProbabilisticClassifierChain","text":"

    Probabilistic Classifier Chains.

    The Probabilistic Classifier Chains (PCC) 1 is a Bayes-optimal method based on the Classifier Chains (CC).

    Consider the concept of chaining classifiers as searching a path in a binary tree whose leaf nodes are associated with a label \\(y \\in Y\\). While CC searches only a single path in the aforementioned binary tree, PCC looks at each of the \\(2^l\\) paths, where \\(l\\) is the number of labels. This limits the applicability of the method to data sets with a small to moderate number of labels. The authors recommend no more than about 15 labels for real-world applications.

    "},{"location":"api/multioutput/ProbabilisticClassifierChain/#parameters","title":"Parameters","text":"
    • model

      Type \u2192 base.Classifier

    "},{"location":"api/multioutput/ProbabilisticClassifierChain/#examples","title":"Examples","text":"

    from river import linear_model\nfrom river import metrics\nfrom river import multioutput\nfrom river.datasets import synth\n\ndataset = synth.Logical(seed=42, n_tiles=100)\n\nmodel = multioutput.ProbabilisticClassifierChain(\n    model=linear_model.LogisticRegression()\n)\n\nmetric = metrics.multioutput.MicroAverage(metrics.Jaccard())\n\nfor x, y in dataset:\n   y_pred = model.predict_one(x)\n   y_pred = {k: y_pred.get(k, 0) for k in y}\n   metric = metric.update(y, y_pred)\n   model = model.learn_one(x, y)\n\nmetric\n
    MicroAverage(Jaccard): 51.84%\n

    "},{"location":"api/multioutput/ProbabilisticClassifierChain/#methods","title":"Methods","text":"learn_one

    Update the model with a set of features x and the labels y.

    Parameters

    • x
    • y
    • kwargs

    Returns

    self

    predict_one

    Predict the labels of a set of features x.

    Parameters

    • x \u2014 'dict'
    • kwargs

    Returns

    dict[FeatureName, bool]: The predicted labels.

    predict_proba_one

    Predict the probability of each label appearing given dictionary of features x.

    Parameters

    • x
    • kwargs

    Returns

    A dictionary that associates a probability which each label.

    1. Cheng, W., H\u00fcllermeier, E., & Dembczynski, K. J. (2010). Bayes optimal multilabel classification via probabilistic classifier chains. In Proceedings of the 27th international conference on machine learning (ICML-10) (pp. 279-286).\u00a0\u21a9

    "},{"location":"api/multioutput/RegressorChain/","title":"RegressorChain","text":"

    A multi-output model that arranges regressors into a chain.

    This will create one model per output. The prediction of the first output will be used as a feature in the second output. The prediction for the second output will be used as a feature for the third, etc. This \"chain model\" is therefore capable of capturing dependencies between outputs.

    "},{"location":"api/multioutput/RegressorChain/#parameters","title":"Parameters","text":"
    • model

      Type \u2192 base.Regressor

      The regression model used to make predictions for each target.

    • order

      Type \u2192 list | None

      Default \u2192 None

      A list with the targets order in which to construct the chain. If None then the order will be inferred from the order of the keys in the target.

    "},{"location":"api/multioutput/RegressorChain/#examples","title":"Examples","text":"

    from river import evaluate\nfrom river import linear_model\nfrom river import metrics\nfrom river import multioutput\nfrom river import preprocessing\nfrom river import stream\n\nfrom sklearn import datasets\n\ndataset = stream.iter_sklearn_dataset(\n    dataset=datasets.load_linnerud(),\n    shuffle=True,\n    seed=42\n)\n\nmodel = multioutput.RegressorChain(\n    model=(\n        preprocessing.StandardScaler() |\n        linear_model.LinearRegression(intercept_lr=0.3)\n    ),\n    order=[0, 1, 2]\n)\n\nmetric = metrics.multioutput.MicroAverage(metrics.MAE())\n\nevaluate.progressive_val_score(dataset, model, metric)\n
    MicroAverage(MAE): 12.733525\n

    "},{"location":"api/multioutput/RegressorChain/#methods","title":"Methods","text":"learn_one

    Fits to a set of features x and a real-valued target y.

    Parameters

    • x
    • y
    • kwargs

    Returns

    self

    predict_one

    Predict the outputs of features x.

    Parameters

    • x
    • kwargs

    Returns

    The predictions.

    "},{"location":"api/naive-bayes/BernoulliNB/","title":"BernoulliNB","text":"

    Bernoulli Naive Bayes.

    Bernoulli Naive Bayes model learns from occurrences between features such as word counts and discrete classes. The input vector must contain positive values, such as counts or TF-IDF values.

    "},{"location":"api/naive-bayes/BernoulliNB/#parameters","title":"Parameters","text":"
    • alpha

      Default \u2192 1.0

      Additive (Laplace/Lidstone) smoothing parameter (use 0 for no smoothing).

    • true_threshold

      Default \u2192 0.0

      Threshold for binarizing (mapping to booleans) features.

    "},{"location":"api/naive-bayes/BernoulliNB/#attributes","title":"Attributes","text":"
    • class_counts (collections.Counter)

      Number of times each class has been seen.

    • feature_counts (collections.defaultdict)

      Total frequencies per feature and class.

    "},{"location":"api/naive-bayes/BernoulliNB/#examples","title":"Examples","text":"

    import pandas as pd\nfrom river import compose\nfrom river import feature_extraction\nfrom river import naive_bayes\n\ndocs = [\n    (\"Chinese Beijing Chinese\", \"yes\"),\n    (\"Chinese Chinese Shanghai\", \"yes\"),\n    (\"Chinese Macao\", \"yes\"),\n    (\"Tokyo Japan Chinese\", \"no\")\n]\n\nmodel = compose.Pipeline(\n    (\"tokenize\", feature_extraction.BagOfWords(lowercase=False)),\n    (\"nb\", naive_bayes.BernoulliNB(alpha=1))\n)\n\nfor sentence, label in docs:\n    model = model.learn_one(sentence, label)\n\nmodel[\"nb\"].p_class(\"yes\")\n
    0.75\n
    model[\"nb\"].p_class(\"no\")\n
    0.25\n

    model.predict_proba_one(\"test\")\n
    {'yes': 0.8831539823829913, 'no': 0.11684601761700895}\n

    model.predict_one(\"test\")\n
    'yes'\n

    You can train the model and make predictions in mini-batch mode using the class methods learn_many and predict_many.

    df_docs = pd.DataFrame(docs, columns = [\"docs\", \"y\"])\n\nX = pd.Series([\n   \"Chinese Beijing Chinese\",\n   \"Chinese Chinese Shanghai\",\n   \"Chinese Macao\",\n   \"Tokyo Japan Chinese\"\n])\n\ny = pd.Series([\"yes\", \"yes\", \"yes\", \"no\"])\n\nmodel = compose.Pipeline(\n    (\"tokenize\", feature_extraction.BagOfWords(lowercase=False)),\n    (\"nb\", naive_bayes.BernoulliNB(alpha=1))\n)\n\nmodel = model.learn_many(X, y)\n\nunseen = pd.Series([\"Taiwanese Taipei\", \"Chinese Shanghai\"])\n\nmodel.predict_proba_many(unseen)\n
             no       yes\n0  0.116846  0.883154\n1  0.047269  0.952731\n

    model.predict_many(unseen)\n
    0    yes\n1    yes\ndtype: object\n

    "},{"location":"api/naive-bayes/BernoulliNB/#methods","title":"Methods","text":"joint_log_likelihood

    Computes the joint log likelihood of input features.

    Parameters

    • x \u2014 'dict'

    Returns

    float: Mapping between classes and joint log likelihood.

    joint_log_likelihood_many

    Computes the joint log likelihood of input features.

    Parameters

    • X \u2014 'pd.DataFrame'

    Returns

    pd.DataFrame: Input samples joint log likelihood.

    learn_many

    Learn from a batch of count vectors.

    Parameters

    • X \u2014 'pd.DataFrame'
    • y \u2014 'pd.Series'

    Returns

    MiniBatchClassifier: self

    learn_one

    Updates the model with a single observation.

    Parameters

    • x \u2014 'dict'
    • y \u2014 'base.typing.ClfTarget'

    Returns

    Classifier: self

    p_class p_class_many p_feature_given_class predict_many

    Predict the outcome for each given sample.

    Parameters

    • X \u2014 'pd.DataFrame'

    Returns

    pd.Series: The predicted labels.

    predict_one

    Predict the label of a set of features x.

    Parameters

    • x \u2014 'dict'
    • kwargs

    Returns

    base.typing.ClfTarget | None: The predicted label.

    predict_proba_many

    Return probabilities using the log-likelihoods in mini-batchs setting.

    Parameters

    • X \u2014 'pd.DataFrame'

    predict_proba_one

    Return probabilities using the log-likelihoods.

    Parameters

    • x \u2014 'dict'

    1. The Bernoulli model \u21a9

    "},{"location":"api/naive-bayes/ComplementNB/","title":"ComplementNB","text":"

    Naive Bayes classifier for multinomial models.

    Complement Naive Bayes model learns from occurrences between features such as word counts and discrete classes. ComplementNB is suitable for imbalance dataset. The input vector must contain positive values, such as counts or TF-IDF values.

    "},{"location":"api/naive-bayes/ComplementNB/#parameters","title":"Parameters","text":"
    • alpha

      Default \u2192 1.0

      Additive (Laplace/Lidstone) smoothing parameter (use 0 for no smoothing).

    "},{"location":"api/naive-bayes/ComplementNB/#attributes","title":"Attributes","text":"
    • class_dist (proba.Multinomial)

      Class prior probability distribution.

    • feature_counts (collections.defaultdict)

      Total frequencies per feature and class.

    • class_totals (collections.Counter)

      Total frequencies per class.

    "},{"location":"api/naive-bayes/ComplementNB/#examples","title":"Examples","text":"

    import pandas as pd\nfrom river import compose\nfrom river import feature_extraction\nfrom river import naive_bayes\n\ndocs = [\n    (\"Chinese Beijing Chinese\", \"yes\"),\n    (\"Chinese Chinese Shanghai\", \"yes\"),\n    (\"Chinese Macao\", \"maybe\"),\n    (\"Tokyo Japan Chinese\", \"no\")\n]\n\nmodel = compose.Pipeline(\n    (\"tokenize\", feature_extraction.BagOfWords(lowercase=False)),\n    (\"nb\", naive_bayes.ComplementNB(alpha=1))\n)\n\nfor sentence, label in docs:\n    model = model.learn_one(sentence, label)\n\nmodel[\"nb\"].p_class(\"yes\")\n
    0.5\n

    model[\"nb\"].p_class(\"no\")\n
    0.25\n

    model[\"nb\"].p_class(\"maybe\")\n
    0.25\n

    model.predict_proba_one(\"test\")\n
    {'yes': 0.275, 'maybe': 0.375, 'no': 0.35}\n

    model.predict_one(\"test\")\n
    'maybe'\n

    You can train the model and make predictions in mini-batch mode using the class methods learn_many and predict_many.

    df_docs = pd.DataFrame(docs, columns = [\"docs\", \"y\"])\n\nX = pd.Series([\n   \"Chinese Beijing Chinese\",\n   \"Chinese Chinese Shanghai\",\n   \"Chinese Macao\",\n   \"Tokyo Japan Chinese\"\n])\n\ny = pd.Series([\"yes\", \"yes\", \"maybe\", \"no\"])\n\nmodel = compose.Pipeline(\n    (\"tokenize\", feature_extraction.BagOfWords(lowercase=False)),\n    (\"nb\", naive_bayes.ComplementNB(alpha=1))\n)\n\nmodel = model.learn_many(X, y)\n\nunseen = pd.Series([\"Taiwanese Taipei\", \"Chinese Shanghai\"])\n\nmodel.predict_proba_many(unseen)\n
          maybe        no       yes\n0  0.415129  0.361624  0.223247\n1  0.248619  0.216575  0.534807\n

    model.predict_many(unseen)\n
    0    maybe\n1      yes\ndtype: object\n

    "},{"location":"api/naive-bayes/ComplementNB/#methods","title":"Methods","text":"joint_log_likelihood

    Computes the joint log likelihood of input features.

    Parameters

    • x \u2014 'dict'

    Returns

    float: Mapping between classes and joint log likelihood.

    joint_log_likelihood_many

    Computes the joint log likelihood of input features.

    Parameters

    • X \u2014 'pd.DataFrame'

    Returns

    pd.DataFrame: Input samples joint log likelihood.

    learn_many

    Learn from a batch of count vectors.

    Parameters

    • X \u2014 'pd.DataFrame'
    • y \u2014 'pd.Series'

    Returns

    MiniBatchClassifier: self

    learn_one

    Updates the model with a single observation.

    Parameters

    • x \u2014 'dict'
    • y \u2014 'base.typing.ClfTarget'

    Returns

    Classifier: self

    p_class p_class_many predict_many

    Predict the outcome for each given sample.

    Parameters

    • X \u2014 'pd.DataFrame'

    Returns

    pd.Series: The predicted labels.

    predict_one

    Predict the label of a set of features x.

    Parameters

    • x \u2014 'dict'
    • kwargs

    Returns

    base.typing.ClfTarget | None: The predicted label.

    predict_proba_many

    Return probabilities using the log-likelihoods in mini-batchs setting.

    Parameters

    • X \u2014 'pd.DataFrame'

    predict_proba_one

    Return probabilities using the log-likelihoods.

    Parameters

    • x \u2014 'dict'

    1. Rennie, J.D., Shih, L., Teevan, J. and Karger, D.R., 2003. Tackling the poor assumptions of naive bayes text classifiers. In Proceedings of the 20th international conference on machine learning (ICML-03) (pp. 616-623) \u21a9

    2. StackExchange discussion \u21a9

    "},{"location":"api/naive-bayes/GaussianNB/","title":"GaussianNB","text":"

    Gaussian Naive Bayes.

    A Gaussian distribution \\(G_{cf}\\) is maintained for each class \\(c\\) and each feature \\(f\\). Each Gaussian is updated using the amount associated with each feature; the details can be be found in proba.Gaussian. The joint log-likelihood is then obtained by summing the log probabilities of each feature associated with each class.

    "},{"location":"api/naive-bayes/GaussianNB/#examples","title":"Examples","text":"

    from river import naive_bayes\nfrom river import stream\nimport numpy as np\n\nX = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])\nY = np.array([1, 1, 1, 2, 2, 2])\n\nmodel = naive_bayes.GaussianNB()\n\nfor x, y in stream.iter_array(X, Y):\n    _ = model.learn_one(x, y)\n\nmodel.predict_one({0: -0.8, 1: -1})\n
    1\n

    "},{"location":"api/naive-bayes/GaussianNB/#methods","title":"Methods","text":"joint_log_likelihood joint_log_likelihood_many learn_one

    Update the model with a set of features x and a label y.

    Parameters

    • x \u2014 'dict'
    • y \u2014 'base.typing.ClfTarget'

    Returns

    Classifier: self

    p_class predict_one

    Predict the label of a set of features x.

    Parameters

    • x \u2014 'dict'
    • kwargs

    Returns

    base.typing.ClfTarget | None: The predicted label.

    predict_proba_one

    Return probabilities using the log-likelihoods.

    Parameters

    • x \u2014 'dict'

    "},{"location":"api/naive-bayes/MultinomialNB/","title":"MultinomialNB","text":"

    Naive Bayes classifier for multinomial models.

    Multinomial Naive Bayes model learns from occurrences between features such as word counts and discrete classes. The input vector must contain positive values, such as counts or TF-IDF values.

    "},{"location":"api/naive-bayes/MultinomialNB/#parameters","title":"Parameters","text":"
    • alpha

      Default \u2192 1.0

      Additive (Laplace/Lidstone) smoothing parameter (use 0 for no smoothing).

    "},{"location":"api/naive-bayes/MultinomialNB/#attributes","title":"Attributes","text":"
    • class_dist (proba.Multinomial)

      Class prior probability distribution.

    • feature_counts (collections.defaultdict)

      Total frequencies per feature and class.

    • class_totals (collections.Counter)

      Total frequencies per class.

    "},{"location":"api/naive-bayes/MultinomialNB/#examples","title":"Examples","text":"

    import pandas as pd\nfrom river import compose\nfrom river import feature_extraction\nfrom river import naive_bayes\n\ndocs = [\n    (\"Chinese Beijing Chinese\", \"yes\"),\n    (\"Chinese Chinese Shanghai\", \"yes\"),\n    (\"Chinese Macao\", \"maybe\"),\n    (\"Tokyo Japan Chinese\", \"no\")\n]\n\nmodel = compose.Pipeline(\n    (\"tokenize\", feature_extraction.BagOfWords(lowercase=False)),\n    (\"nb\", naive_bayes.MultinomialNB(alpha=1))\n)\n\nfor sentence, label in docs:\n    model = model.learn_one(sentence, label)\n\nmodel[\"nb\"].p_class(\"yes\")\n
    0.5\n

    model[\"nb\"].p_class(\"no\")\n
    0.25\n

    model[\"nb\"].p_class(\"maybe\")\n
    0.25\n

    model.predict_proba_one(\"test\")\n
    {'yes': 0.413, 'maybe': 0.310, 'no': 0.275}\n

    model.predict_one(\"test\")\n
    'yes'\n

    You can train the model and make predictions in mini-batch mode using the class methods learn_many and predict_many.

    df_docs = pd.DataFrame(docs, columns = [\"docs\", \"y\"])\n\nX = pd.Series([\n   \"Chinese Beijing Chinese\",\n   \"Chinese Chinese Shanghai\",\n   \"Chinese Macao\",\n   \"Tokyo Japan Chinese\"\n])\n\ny = pd.Series([\"yes\", \"yes\", \"maybe\", \"no\"])\n\nmodel = compose.Pipeline(\n    (\"tokenize\", feature_extraction.BagOfWords(lowercase=False)),\n    (\"nb\", naive_bayes.MultinomialNB(alpha=1))\n)\n\nmodel = model.learn_many(X, y)\n\nunseen = pd.Series([\"Taiwanese Taipei\", \"Chinese Shanghai\"])\n\nmodel.predict_proba_many(unseen)\n
          maybe        no       yes\n0  0.373272  0.294931  0.331797\n1  0.160396  0.126733  0.712871\n

    model.predict_many(unseen)\n
    0    maybe\n1      yes\ndtype: object\n

    "},{"location":"api/naive-bayes/MultinomialNB/#methods","title":"Methods","text":"joint_log_likelihood

    Computes the joint log likelihood of input features.

    Parameters

    • x \u2014 'dict'

    Returns

    float: Mapping between classes and joint log likelihood.

    joint_log_likelihood_many

    Computes the joint log likelihood of input features.

    Parameters

    • X \u2014 'pd.DataFrame'

    Returns

    pd.DataFrame: Input samples joint log likelihood.

    learn_many

    Learn from a batch of count vectors.

    Parameters

    • X \u2014 'pd.DataFrame'
    • y \u2014 'pd.Series'

    Returns

    MiniBatchClassifier: self

    learn_one

    Updates the model with a single observation.

    Parameters

    • x \u2014 'dict'
    • y \u2014 'base.typing.ClfTarget'

    Returns

    Classifier: self

    p_class p_class_many p_feature_given_class predict_many

    Predict the outcome for each given sample.

    Parameters

    • X \u2014 'pd.DataFrame'

    Returns

    pd.Series: The predicted labels.

    predict_one

    Predict the label of a set of features x.

    Parameters

    • x \u2014 'dict'
    • kwargs

    Returns

    base.typing.ClfTarget | None: The predicted label.

    predict_proba_many

    Return probabilities using the log-likelihoods in mini-batchs setting.

    Parameters

    • X \u2014 'pd.DataFrame'

    predict_proba_one

    Return probabilities using the log-likelihoods.

    Parameters

    • x \u2014 'dict'

    1. Naive Bayes text classification \u21a9

    "},{"location":"api/neighbors/KNNClassifier/","title":"KNNClassifier","text":"

    K-Nearest Neighbors (KNN) for classification.

    Samples are stored using a first-in, first-out strategy. The strategy to perform search queries in the data buffer is defined by the engine parameter.

    "},{"location":"api/neighbors/KNNClassifier/#parameters","title":"Parameters","text":"
    • n_neighbors

      Type \u2192 int

      Default \u2192 5

      The number of nearest neighbors to search for.

    • engine

      Type \u2192 BaseNN | None

      Default \u2192 None

      The search engine used to store the instances and perform search queries. Depending on the choose engine, search will be exact or approximate. Please, consult the documentation of each available search engine for more details on its usage. By default, use the SWINN search engine for approximate search queries.

    • weighted

      Type \u2192 bool

      Default \u2192 True

      Weight the contribution of each neighbor by it's inverse distance.

    • cleanup_every

      Type \u2192 int

      Default \u2192 0

      This determines at which rate old classes are cleaned up. Classes that have been seen in the past but that are not present in the current window are dropped. Classes are never dropped when this is set to 0.

    • softmax

      Type \u2192 bool

      Default \u2192 False

      Whether or not to use softmax normalization to normalize the neighbors contributions. Votes are divided by the total number of votes if this is False.

    "},{"location":"api/neighbors/KNNClassifier/#examples","title":"Examples","text":"
    import functools\nfrom river import datasets\nfrom river import evaluate\nfrom river import metrics\nfrom river import neighbors\nfrom river import preprocessing\nfrom river import utils\n\ndataset = datasets.Phishing()\n

    To select a custom distance metric which takes one or several parameter, you can wrap your chosen distance using functools.partial:

    l1_dist = functools.partial(utils.math.minkowski_distance, p=1)\n\nmodel = (\n    preprocessing.StandardScaler() |\n    neighbors.KNNClassifier(\n        engine=neighbors.SWINN(\n            dist_func=l1_dist,\n            seed=42\n        )\n    )\n)\n\nevaluate.progressive_val_score(dataset, model, metrics.Accuracy())\n
    Accuracy: 89.67%\n

    "},{"location":"api/neighbors/KNNClassifier/#methods","title":"Methods","text":"clean_up_classes

    Clean up classes added to the window.

    Classes that are added (and removed) from the window may no longer be valid. This method cleans up the window and and ensures only known classes are added, and we do not consider \"None\" a class. It is called every cleanup_every step, or can be called manually.

    learn_one

    Update the model with a set of features x and a label y.

    Parameters

    • x \u2014 'dict'
    • y \u2014 'base.typing.ClfTarget'

    Returns

    Classifier: self

    predict_one

    Predict the label of a set of features x.

    Parameters

    • x \u2014 'dict'
    • kwargs

    Returns

    base.typing.ClfTarget | None: The predicted label.

    predict_proba_one

    Predict the probability of each label for a dictionary of features x.

    Parameters

    • x \u2014 'dict'
    • kwargs

    Returns

    dict[base.typing.ClfTarget, float]: A dictionary that associates a probability which each label.

    "},{"location":"api/neighbors/KNNClassifier/#notes","title":"Notes","text":"

    Note that since the window is moving and we keep track of all classes that are added at some point, a class might be returned in a result (with a value of 0) if it is no longer in the window. You can call model.clean_up_classes(), or set cleanup_every to a non-zero value.

    "},{"location":"api/neighbors/KNNRegressor/","title":"KNNRegressor","text":"

    K-Nearest Neighbors regressor.

    Samples are stored using a first-in, first-out strategy. The strategy to perform search queries in the data buffer is defined by the engine parameter. Predictions are obtained by aggregating the values of the closest n_neighbors stored samples with respect to a query sample.

    "},{"location":"api/neighbors/KNNRegressor/#parameters","title":"Parameters","text":"
    • n_neighbors

      Type \u2192 int

      Default \u2192 5

      The number of nearest neighbors to search for.

    • engine

      Type \u2192 BaseNN | None

      Default \u2192 None

      The search engine used to store the instances and perform search queries. Depending on the choose engine, search will be exact or approximate. Please, consult the documentation of each available search engine for more details on its usage. By default, use the SWINN search engine for approximate search queries.

    • aggregation_method

      Type \u2192 str

      Default \u2192 mean

      The method to aggregate the target values of neighbors. | 'mean' | 'median' | 'weighted_mean'

    "},{"location":"api/neighbors/KNNRegressor/#examples","title":"Examples","text":"

    from river import datasets\nfrom river import evaluate\nfrom river import metrics\nfrom river import neighbors\nfrom river import preprocessing\n\ndataset = datasets.TrumpApproval()\n\nmodel = neighbors.KNNRegressor()\nevaluate.progressive_val_score(dataset, model, metrics.RMSE())\n
    RMSE: 1.427743\n

    "},{"location":"api/neighbors/KNNRegressor/#methods","title":"Methods","text":"learn_one

    Fits to a set of features x and a real-valued target y.

    Parameters

    • x \u2014 'dict'
    • y \u2014 'base.typing.RegTarget'

    Returns

    Regressor: self

    predict_one

    Predict the output of features x.

    Parameters

    • x \u2014 'dict'
    • kwargs

    Returns

    base.typing.RegTarget: The prediction.

    "},{"location":"api/neighbors/LazySearch/","title":"LazySearch","text":"

    Exact nearest neighbors using a lazy search estrategy.

    "},{"location":"api/neighbors/LazySearch/#parameters","title":"Parameters","text":"
    • window_size

      Type \u2192 int

      Default \u2192 50

      Size of the sliding window use to search neighbors with.

    • min_distance_keep

      Type \u2192 float

      Default \u2192 0.0

      The minimum distance (similarity) to consider adding a point to the window. E.g., a value of 0.0 will add even exact duplicates.

    • dist_func

      Type \u2192 DistanceFunc | FunctionWrapper | None

      Default \u2192 None

      A distance function which accepts two input items to compare. If not set, use the Minkowski distance with p=2.

    "},{"location":"api/neighbors/LazySearch/#methods","title":"Methods","text":"append

    Add a point to the window, optionally with extra metadata.

    Parameters

    • item \u2014 'typing.Any'
    • extra \u2014 'typing.Any | None' \u2014 defaults to None
    • kwargs

    search

    Find the n_neighbors closest points to item, along with their distances.

    Parameters

    • item \u2014 'typing.Any'
    • n_neighbors \u2014 'int'
    • kwargs

    update

    Update the window with a new point, only added if > min distance.

    If min distance is 0, we do not need to do the calculation. The item (and extra metadata) will not be added to the window if it is too close to an existing point.

    Parameters

    • item \u2014 'typing.Any'
    • n_neighbors \u2014 'int' \u2014 defaults to 1
    • extra \u2014 'typing.Any | None' \u2014 defaults to None

    Returns

    A boolean (true/false) to indicate if the point was added.

    "},{"location":"api/neighbors/LazySearch/#notes","title":"Notes","text":"

    Updates are by default stored by the FIFO (first in first out) method, which means that when the size limit is reached, old samples are dumped to give room for new samples. This is circular, meaning that older points are dumped first. This also gives the implementation a temporal aspect, because older samples are replaced with newer ones.

    The parameter min_dinstance_keep controls the addition of new items to the window - items that are far enough away (> min_distance_keep) are added to the window. Thus a value of 0 indicates that we add all points, and increasing from 0 makes it less likely we will keep a new item.

    "},{"location":"api/neighbors/SWINN/","title":"SWINN","text":"

    Sliding WIndow-based Nearest Neighbor (SWINN) search using Graphs.

    Extends the NNDescent algorithm1 to handle vertex addition and removal in a FIFO data ingestion policy. SWINN builds and keeps a directed graph where edges connect the nearest neighbors. Any distance metric can be used to build the graph. By using a directed graph, the user must set the desired number of neighbors. More neighbors imply more accurate search queries at the cost of increased running time and memory usage. Note that although the number of directed neighbors is limited by the user, there is no direct control on the number of reverse neighbors, i.e., the number of vertices that have an edge to a given vertex.

    The basic idea of SWINN and NNDescent is that \"the neighbor of my neighbors might as well be my neighbor\". Hence, the connections are constantly revisited to improve the graph structure. The algorithm for creating and maintaining the search graph can be described in general lines as follows:

    • Start with a random neighborhood graph;

    • For each node in the search graph: refine the current neighborhood by checking if there are better neighborhood options among the neighbors of the current neighbors;

    • If the total number of neighborhood changes is smaller than a given stopping criterion, then stop.

    SWINN adds strategies to remove vertices from the search graph and pruning redundant edges. SWINN is more efficient when the selected maxlen is greater than 500. For small sized data windows, using the lazy/exhaustive search, i.e., neighbors.LazySearch might be a better idea.

    "},{"location":"api/neighbors/SWINN/#parameters","title":"Parameters","text":"
    • graph_k

      Type \u2192 int

      Default \u2192 20

      The maximum number of direct nearest neighbors each node has.

    • dist_func

      Type \u2192 DistanceFunc | FunctionWrapper | None

      Default \u2192 None

      The distance function used to compare two items. If not set, use the Minkowski distance with p=2.

    • maxlen

      Type \u2192 int

      Default \u2192 1000

      The maximum size of the data buffer.

    • warm_up

      Type \u2192 int

      Default \u2192 500

      How many data instances to observe before starting the search graph.

    • max_candidates

      Type \u2192 int

      Default \u2192 None

      The maximum number of vertices to consider when performing local neighborhood joins. If not set SWINN will use min(50, max(50, self.graph_k)).

    • delta

      Type \u2192 float

      Default \u2192 0.0001

      Early stop parameter for the neighborhood refinement procedure. NNDescent will stop running if the maximum number of iterations is reached or the number of edge changes after an iteration is smaller than or equal to delta * graph_k * n_nodes. In the last expression, n_nodes refers to the number of graph nodes involved in the (local) neighborhood refinement.

    • prune_prob

      Type \u2192 float

      Default \u2192 0.0

      The probability of removing redundant edges. Must be between 0 and 1. If set to zero, no edge will be pruned. When set to one, every potentially redundant edge will be dropped.

    • n_iters

      Type \u2192 int

      Default \u2192 10

      The maximum number of NNDescent iterations to perform to refine the search index.

    • seed

      Type \u2192 int

      Default \u2192 None

      Random seed for reproducibility.

    "},{"location":"api/neighbors/SWINN/#methods","title":"Methods","text":"append

    Add a new item to the search index.

    Data is stored using the FIFO strategy. Both the data buffer and the search graph are updated. The addition of a new item will trigger the removal of the oldest item, if the maximum size was reached. All edges of the removed node are also dropped and safety procedures are applied to ensure its neighbors keep accessible. The addition of a new item also trigger local neighborhood refinement procedures, to ensure the search index is effective and the node degree constraints are met.

    Parameters

    • item \u2014 'typing.Any'
    • kwargs

    connectivity

    Get a list with the size of each connected component in the search graph.

    This metric provides an overview of reachability in the search index by using Kruskal's algorithm to build a forest of connected components. We want our search index to have a single connected component, i.e., the case where we get a list containing a single number which is equal to maxlen. If that is not the case, not every node in the search graph can be reached from any given starting point. You may want to try increasing graph_k to improve connectivity. However, keep in mind the following aspects: 1) computing this metric is a costly operation (\\(O(E\\log V)\\)), where \\(E\\) and \\(V\\) are, respectively, the number of edges and vertices in the search graph; 2) often, connectivity comes at the price of increased computational costs. Tweaking the sample_rate might help in such situations. The best possible scenario is to decrease the value of graph_k while keeping a single connected component.

    Returns

    list[int]: A list of the number of elements in each connected component of the graph.

    search

    Search the underlying nearest neighbor graph given a query item.

    In case not enough samples were observed, i.e., the number of stored samples is smaller than warm_up, then the search switches to a brute force strategy.

    Parameters

    • item \u2014 'typing.Any'
    • n_neighbors \u2014 'int'
    • epsilon \u2014 'float' \u2014 defaults to 0.1
    • kwargs

    Returns

    tuple[list, list]: neighbors, dists

    "},{"location":"api/neighbors/SWINN/#notes","title":"Notes","text":"

    There is an accuracy/speed trade-off between graph_k and sample_rate. To ensure a single connected component, and thus an effective search index, one can increase graph_k. The connectivity method is a helper to determine whether the search index has a single connected component. However, search accuracy might come at the cost of increased memory usage and slow processing. To alleviate that, one can rely on decreasing the sample_rate to avoid exploring all the undirected edges of a node during search queries and local graph refinements. Moreover, the edge pruning procedures also help decreasing the computational costs. Note that, anything that limits the number of explored neighbors or prunes edges might have a negative impact on search accuracy.

    1. Dong, W., Moses, C., & Li, K. (2011, March). Efficient k-nearest neighbor graph construction for generic similarity measures. In Proceedings of the 20th international conference on World wide web (pp. 577-586).\u00a0\u21a9

    "},{"location":"api/neural-net/MLPRegressor/","title":"MLPRegressor","text":"

    Multi-layer Perceptron for regression.

    This model is still work in progress. Here are some features that still need implementing:

    • learn_one and predict_one just cast the input dict to a single row dataframe and then

      call learn_many and predict_many respectively. This is very inefficient. - Not all of the optimizers in the optim module can be used as they are not all vectorised.

    • Emerging and disappearing features are not supported. Each instance/batch has to have the

      same features. - The gradient haven't been numerically checked.

    "},{"location":"api/neural-net/MLPRegressor/#parameters","title":"Parameters","text":"
    • hidden_dims

      The dimensions of the hidden layers. For example, specifying (10, 20) means that there are two hidden layers with 10 and 20 neurons, respectively. Note that the number of layers the network contains is equal to the number of hidden layers plus two (to account for the input and output layers).

    • activations

      The activation functions to use at each layer, including the input and output layers. Therefore you need to specify three activation if you specify one hidden layer.

    • loss

      Type \u2192 optim.losses.Loss | None

      Default \u2192 None

      Loss function. Defaults to optim.losses.Squared.

    • optimizer

      Type \u2192 optim.base.Optimizer | None

      Default \u2192 None

      Optimizer. Defaults to optim.SGD with the learning rate set to 0.01.

    • seed

      Type \u2192 int | None

      Default \u2192 None

      Random number generation seed. Set this for reproducibility.

    "},{"location":"api/neural-net/MLPRegressor/#attributes","title":"Attributes","text":"
    • n_layers

      Return the number of layers in the network. The number of layers is equal to the number of hidden layers plus 2. The 2 accounts for the input layer and the output layer.

    "},{"location":"api/neural-net/MLPRegressor/#examples","title":"Examples","text":"

    from river import datasets\nfrom river import evaluate\nfrom river import neural_net as nn\nfrom river import optim\nfrom river import preprocessing as pp\nfrom river import metrics\n\nmodel = (\n    pp.StandardScaler() |\n    nn.MLPRegressor(\n        hidden_dims=(5,),\n        activations=(\n            nn.activations.ReLU,\n            nn.activations.ReLU,\n            nn.activations.Identity\n        ),\n        optimizer=optim.SGD(1e-3),\n        seed=42\n    )\n)\n\ndataset = datasets.TrumpApproval()\n\nmetric = metrics.MAE()\n\nevaluate.progressive_val_score(dataset, model, metric)\n
    MAE: 1.580578\n

    You can also use this to process mini-batches of data.

    model = (\n    pp.StandardScaler() |\n    nn.MLPRegressor(\n        hidden_dims=(10,),\n        activations=(\n            nn.activations.ReLU,\n            nn.activations.ReLU,\n            nn.activations.ReLU\n        ),\n        optimizer=optim.SGD(1e-4),\n        seed=42\n    )\n)\n\ndataset = datasets.TrumpApproval()\nbatch_size = 32\n\nfor epoch in range(10):\n    for xb in pd.read_csv(dataset.path, chunksize=batch_size):\n        yb = xb.pop('five_thirty_eight')\n        y_pred = model.predict_many(xb)\n        model = model.learn_many(xb, yb)\n\nmodel.predict_many(xb)\n
          five_thirty_eight\n992           39.405231\n993           46.447481\n994           42.121865\n995           40.251148\n996           40.836378\n997           40.893153\n998           40.949927\n999           48.416504\n1000          42.077830\n

    "},{"location":"api/neural-net/MLPRegressor/#methods","title":"Methods","text":"call

    Make predictions.

    Parameters

    • X \u2014 'pd.DataFrame'

    learn_many

    Train the network.

    Parameters

    • X \u2014 'pd.DataFrame'
    • y \u2014 'pd.DataFrame'

    learn_one

    Fits to a set of features x and a real-valued target y.

    Parameters

    • x \u2014 'dict'
    • y \u2014 'base.typing.RegTarget'

    Returns

    Regressor: self

    predict_many predict_one

    Predict the output of features x.

    Parameters

    • x \u2014 'dict'

    Returns

    base.typing.RegTarget: The prediction.

    "},{"location":"api/neural-net/activations/Identity/","title":"Identity","text":"

    Identity activation function.

    "},{"location":"api/neural-net/activations/Identity/#methods","title":"Methods","text":"apply

    Apply the activation function to a layer output z.

    • z

    gradient

    Return the gradient with respect to a layer output z.

    • z

    "},{"location":"api/neural-net/activations/ReLU/","title":"ReLU","text":"

    Rectified Linear Unit (ReLU) activation function.

    "},{"location":"api/neural-net/activations/ReLU/#methods","title":"Methods","text":"apply

    Apply the activation function to a layer output z.

    • z

    gradient

    Return the gradient with respect to a layer output z.

    • z

    "},{"location":"api/neural-net/activations/Sigmoid/","title":"Sigmoid","text":"

    Sigmoid activation function.

    "},{"location":"api/neural-net/activations/Sigmoid/#methods","title":"Methods","text":"apply

    Apply the activation function to a layer output z.

    • z

    gradient

    Return the gradient with respect to a layer output z.

    • z

    "},{"location":"api/optim/AMSGrad/","title":"AMSGrad","text":"

    AMSGrad optimizer.

    "},{"location":"api/optim/AMSGrad/#parameters","title":"Parameters","text":"
    • lr

      Type \u2192 int | float | optim.base.Scheduler

      Default \u2192 0.1

      The learning rate.

    • beta_1

      Default \u2192 0.9

    • beta_2

      Default \u2192 0.999

    • eps

      Default \u2192 1e-08

    • correct_bias

      Default \u2192 True

    "},{"location":"api/optim/AMSGrad/#attributes","title":"Attributes","text":"
    • m (collections.defaultdict)

    • v (collections.defaultdict)

    • v_hat (collections.defaultdict)

    "},{"location":"api/optim/AMSGrad/#examples","title":"Examples","text":"

    from river import datasets\nfrom river import evaluate\nfrom river import linear_model\nfrom river import metrics\nfrom river import optim\nfrom river import preprocessing\n\ndataset = datasets.Phishing()\noptimizer = optim.AMSGrad()\nmodel = (\n    preprocessing.StandardScaler() |\n    linear_model.LogisticRegression(optimizer)\n)\nmetric = metrics.F1()\n\nevaluate.progressive_val_score(dataset, model, metric)\n
    F1: 86.60%\n

    "},{"location":"api/optim/AMSGrad/#methods","title":"Methods","text":"look_ahead

    Updates a weight vector before a prediction is made.

    Parameters: w (dict): A dictionary of weight parameters. The weights are modified in-place. Returns: The updated weights.

    Parameters

    • w \u2014 'dict'

    step

    Updates a weight vector given a gradient.

    Parameters

    • w \u2014 'dict | VectorLike'
    • g \u2014 'dict | VectorLike'

    Returns

    dict | VectorLike: The updated weights.

    1. Reddi, S.J., Kale, S. and Kumar, S., 2019. On the convergence of adam and beyond. arXiv preprint arXiv:1904.09237 \u21a9

    "},{"location":"api/optim/AdaBound/","title":"AdaBound","text":"

    AdaBound optimizer.

    "},{"location":"api/optim/AdaBound/#parameters","title":"Parameters","text":"
    • lr

      Default \u2192 0.001

      The learning rate.

    • beta_1

      Default \u2192 0.9

    • beta_2

      Default \u2192 0.999

    • eps

      Default \u2192 1e-08

    • gamma

      Default \u2192 0.001

    • final_lr

      Default \u2192 0.1

    "},{"location":"api/optim/AdaBound/#attributes","title":"Attributes","text":"
    • m (collections.defaultdict)

    • s (collections.defaultdict)

    "},{"location":"api/optim/AdaBound/#examples","title":"Examples","text":"

    from river import datasets\nfrom river import evaluate\nfrom river import linear_model\nfrom river import metrics\nfrom river import optim\nfrom river import preprocessing\n\ndataset = datasets.Phishing()\noptimizer = optim.AdaBound()\nmodel = (\n    preprocessing.StandardScaler() |\n    linear_model.LogisticRegression(optimizer)\n)\nmetric = metrics.F1()\n\nevaluate.progressive_val_score(dataset, model, metric)\n
    F1: 88.06%\n

    "},{"location":"api/optim/AdaBound/#methods","title":"Methods","text":"look_ahead

    Updates a weight vector before a prediction is made.

    Parameters: w (dict): A dictionary of weight parameters. The weights are modified in-place. Returns: The updated weights.

    Parameters

    • w \u2014 'dict'

    step

    Updates a weight vector given a gradient.

    Parameters

    • w \u2014 'dict | VectorLike'
    • g \u2014 'dict | VectorLike'

    Returns

    dict | VectorLike: The updated weights.

    1. Luo, L., Xiong, Y., Liu, Y. and Sun, X., 2019. Adaptive gradient methods with dynamic bound of learning rate. arXiv preprint arXiv:1902.09843 \u21a9

    "},{"location":"api/optim/AdaDelta/","title":"AdaDelta","text":"

    AdaDelta optimizer.

    "},{"location":"api/optim/AdaDelta/#parameters","title":"Parameters","text":"
    • rho

      Default \u2192 0.95

    • eps

      Default \u2192 1e-08

    "},{"location":"api/optim/AdaDelta/#attributes","title":"Attributes","text":"
    • g2 (collections.defaultdict)

    • s2 (collections.defaultdict)

    "},{"location":"api/optim/AdaDelta/#examples","title":"Examples","text":"

    from river import datasets\nfrom river import evaluate\nfrom river import linear_model\nfrom river import metrics\nfrom river import optim\nfrom river import preprocessing\n\ndataset = datasets.Phishing()\noptimizer = optim.AdaDelta()\nmodel = (\n    preprocessing.StandardScaler() |\n    linear_model.LogisticRegression(optimizer)\n)\nmetric = metrics.F1()\n\nevaluate.progressive_val_score(dataset, model, metric)\n
    F1: 80.56%\n

    "},{"location":"api/optim/AdaDelta/#methods","title":"Methods","text":"look_ahead

    Updates a weight vector before a prediction is made.

    Parameters: w (dict): A dictionary of weight parameters. The weights are modified in-place. Returns: The updated weights.

    Parameters

    • w \u2014 'dict'

    step

    Updates a weight vector given a gradient.

    Parameters

    • w \u2014 'dict | VectorLike'
    • g \u2014 'dict | VectorLike'

    Returns

    dict | VectorLike: The updated weights.

    1. Zeiler, M.D., 2012. Adadelta: an adaptive learning rate method. arXiv preprint arXiv:1212.5701. \u21a9

    "},{"location":"api/optim/AdaGrad/","title":"AdaGrad","text":"

    AdaGrad optimizer.

    "},{"location":"api/optim/AdaGrad/#parameters","title":"Parameters","text":"
    • lr

      Default \u2192 0.1

    • eps

      Default \u2192 1e-08

    "},{"location":"api/optim/AdaGrad/#attributes","title":"Attributes","text":"
    • g2 (collections.defaultdict)
    "},{"location":"api/optim/AdaGrad/#examples","title":"Examples","text":"

    from river import datasets\nfrom river import evaluate\nfrom river import linear_model\nfrom river import metrics\nfrom river import optim\nfrom river import preprocessing\n\ndataset = datasets.Phishing()\noptimizer = optim.AdaGrad()\nmodel = (\n    preprocessing.StandardScaler() |\n    linear_model.LogisticRegression(optimizer)\n)\nmetric = metrics.F1()\n\nevaluate.progressive_val_score(dataset, model, metric)\n
    F1: 88.01%\n

    "},{"location":"api/optim/AdaGrad/#methods","title":"Methods","text":"look_ahead

    Updates a weight vector before a prediction is made.

    Parameters: w (dict): A dictionary of weight parameters. The weights are modified in-place. Returns: The updated weights.

    Parameters

    • w \u2014 'dict'

    step

    Updates a weight vector given a gradient.

    Parameters

    • w \u2014 'dict | VectorLike'
    • g \u2014 'dict | VectorLike'

    Returns

    dict | VectorLike: The updated weights.

    1. Duchi, J., Hazan, E. and Singer, Y., 2011. Adaptive subgradient methods for online learning and stochastic optimization. Journal of machine learning research, 12(Jul), pp.2121-2159. \u21a9

    "},{"location":"api/optim/AdaMax/","title":"AdaMax","text":"

    AdaMax optimizer.

    "},{"location":"api/optim/AdaMax/#parameters","title":"Parameters","text":"
    • lr

      Default \u2192 0.1

    • beta_1

      Default \u2192 0.9

    • beta_2

      Default \u2192 0.999

    • eps

      Default \u2192 1e-08

    "},{"location":"api/optim/AdaMax/#attributes","title":"Attributes","text":"
    • m (collections.defaultdict)

    • v (collections.defaultdict)

    "},{"location":"api/optim/AdaMax/#examples","title":"Examples","text":"

    from river import datasets\nfrom river import evaluate\nfrom river import linear_model\nfrom river import metrics\nfrom river import optim\nfrom river import preprocessing\n\ndataset = datasets.Phishing()\noptimizer = optim.AdaMax()\nmodel = (\n    preprocessing.StandardScaler() |\n    linear_model.LogisticRegression(optimizer)\n)\nmetric = metrics.F1()\n\nevaluate.progressive_val_score(dataset, model, metric)\n
    F1: 87.61%\n

    "},{"location":"api/optim/AdaMax/#methods","title":"Methods","text":"look_ahead

    Updates a weight vector before a prediction is made.

    Parameters: w (dict): A dictionary of weight parameters. The weights are modified in-place. Returns: The updated weights.

    Parameters

    • w \u2014 'dict'

    step

    Updates a weight vector given a gradient.

    Parameters

    • w \u2014 'dict | VectorLike'
    • g \u2014 'dict | VectorLike'

    Returns

    dict | VectorLike: The updated weights.

    1. Kingma, D.P. and Ba, J., 2014. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980. \u21a9

    2. Ruder, S., 2016. An overview of gradient descent optimization algorithms. arXiv preprint arXiv:1609.04747. \u21a9

    "},{"location":"api/optim/Adam/","title":"Adam","text":"

    Adam optimizer.

    "},{"location":"api/optim/Adam/#parameters","title":"Parameters","text":"
    • lr

      Default \u2192 0.1

    • beta_1

      Default \u2192 0.9

    • beta_2

      Default \u2192 0.999

    • eps

      Default \u2192 1e-08

    "},{"location":"api/optim/Adam/#attributes","title":"Attributes","text":"
    • m (collections.defaultdict)

    • v (collections.defaultdict)

    "},{"location":"api/optim/Adam/#examples","title":"Examples","text":"

    from river import datasets\nfrom river import evaluate\nfrom river import linear_model\nfrom river import metrics\nfrom river import optim\nfrom river import preprocessing\n\ndataset = datasets.Phishing()\noptimizer = optim.Adam()\nmodel = (\n    preprocessing.StandardScaler() |\n    linear_model.LogisticRegression(optimizer)\n)\nmetric = metrics.F1()\n\nevaluate.progressive_val_score(dataset, model, metric)\n
    F1: 86.52%\n

    "},{"location":"api/optim/Adam/#methods","title":"Methods","text":"look_ahead

    Updates a weight vector before a prediction is made.

    Parameters: w (dict): A dictionary of weight parameters. The weights are modified in-place. Returns: The updated weights.

    Parameters

    • w \u2014 'dict'

    step

    Updates a weight vector given a gradient.

    Parameters

    • w \u2014 'dict | VectorLike'
    • g \u2014 'dict | VectorLike'

    Returns

    dict | VectorLike: The updated weights.

    1. Kingma, D.P. and Ba, J., 2014. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980. \u21a9

    "},{"location":"api/optim/Averager/","title":"Averager","text":"

    Averaged stochastic gradient descent.

    This is a wrapper that can be applied to any stochastic gradient descent optimiser. Note that this implementation differs than what may be found elsewhere. Essentially, the average of the weights is usually only used at the end of the optimisation, once all the data has been seen. However, in this implementation the optimiser returns the current averaged weights.

    "},{"location":"api/optim/Averager/#parameters","title":"Parameters","text":"
    • optimizer

      Type \u2192 optim.base.Optimizer

      An optimizer for which the produced weights will be averaged.

    • start

      Type \u2192 int

      Default \u2192 0

      Indicates the number of iterations to wait before starting the average. Essentially, nothing happens differently before the number of iterations reaches this value.

    "},{"location":"api/optim/Averager/#attributes","title":"Attributes","text":"
    • learning_rate
    "},{"location":"api/optim/Averager/#examples","title":"Examples","text":"

    from river import datasets\nfrom river import evaluate\nfrom river import linear_model\nfrom river import metrics\nfrom river import optim\nfrom river import preprocessing\n\ndataset = datasets.Phishing()\noptimizer = optim.Averager(optim.SGD(0.01), 100)\nmodel = (\n    preprocessing.StandardScaler() |\n    linear_model.LogisticRegression(optimizer)\n)\nmetric = metrics.F1()\n\nevaluate.progressive_val_score(dataset, model, metric)\n
    F1: 87.97%\n

    "},{"location":"api/optim/Averager/#methods","title":"Methods","text":"look_ahead

    Updates a weight vector before a prediction is made.

    Parameters: w (dict): A dictionary of weight parameters. The weights are modified in-place. Returns: The updated weights.

    Parameters

    • w \u2014 'dict'

    step

    Updates a weight vector given a gradient.

    Parameters

    • w \u2014 'dict | VectorLike'
    • g \u2014 'dict | VectorLike'

    Returns

    dict | VectorLike: The updated weights.

    1. Bottou, L., 2010. Large-scale machine learning with stochastic gradient descent. In Proceedings of COMPSTAT'2010 (pp. 177-186). Physica-Verlag HD. \u21a9

    2. Stochastic Algorithms for One-Pass Learning slides by L\u00e9on Bottou \u21a9

    3. Xu, W., 2011. Towards optimal one pass large scale learning with averaged stochastic gradient descent. arXiv preprint arXiv:1107.2490. \u21a9

    "},{"location":"api/optim/FTRLProximal/","title":"FTRLProximal","text":"

    FTRL-Proximal optimizer.

    "},{"location":"api/optim/FTRLProximal/#parameters","title":"Parameters","text":"
    • alpha

      Default \u2192 0.05

    • beta

      Default \u2192 1.0

    • l1

      Default \u2192 0.0

    • l2

      Default \u2192 1.0

    "},{"location":"api/optim/FTRLProximal/#attributes","title":"Attributes","text":"
    • z (collections.defaultdict)

    • n (collections.defaultdict)

    "},{"location":"api/optim/FTRLProximal/#examples","title":"Examples","text":"

    from river import datasets\nfrom river import evaluate\nfrom river import linear_model\nfrom river import metrics\nfrom river import optim\nfrom river import preprocessing\n\ndataset = datasets.Phishing()\noptimizer = optim.FTRLProximal()\nmodel = (\n    preprocessing.StandardScaler() |\n    linear_model.LogisticRegression(optimizer)\n)\nmetric = metrics.F1()\n\nevaluate.progressive_val_score(dataset, model, metric)\n
    F1: 87.56%\n

    "},{"location":"api/optim/FTRLProximal/#methods","title":"Methods","text":"look_ahead

    Updates a weight vector before a prediction is made.

    Parameters: w (dict): A dictionary of weight parameters. The weights are modified in-place. Returns: The updated weights.

    Parameters

    • w \u2014 'dict'

    step

    Updates a weight vector given a gradient.

    Parameters

    • w \u2014 'dict | VectorLike'
    • g \u2014 'dict | VectorLike'

    Returns

    dict | VectorLike: The updated weights.

    1. McMahan, H.B., Holt, G., Sculley, D., Young, M., Ebner, D., Grady, J., Nie, L., Phillips, T., Davydov, E., Golovin, D. and Chikkerur, S., 2013, August. Ad click prediction: a view from the trenches. In Proceedings of the 19th ACM SIGKDD international conference on Knowledge discovery and data mining (pp. 1222-1230) \u21a9

    2. Tensorflow's FtrlOptimizer \u21a9

    "},{"location":"api/optim/Momentum/","title":"Momentum","text":"

    Momentum optimizer.

    "},{"location":"api/optim/Momentum/#parameters","title":"Parameters","text":"
    • lr

      Default \u2192 0.1

    • rho

      Default \u2192 0.9

    "},{"location":"api/optim/Momentum/#attributes","title":"Attributes","text":"
    • learning_rate
    "},{"location":"api/optim/Momentum/#examples","title":"Examples","text":"

    from river import datasets\nfrom river import evaluate\nfrom river import linear_model\nfrom river import metrics\nfrom river import optim\nfrom river import preprocessing\n\ndataset = datasets.Phishing()\noptimizer = optim.Momentum()\nmodel = (\n    preprocessing.StandardScaler() |\n    linear_model.LogisticRegression(optimizer)\n)\nmetric = metrics.F1()\n\nevaluate.progressive_val_score(dataset, model, metric)\n
    F1: 84.09%\n

    "},{"location":"api/optim/Momentum/#methods","title":"Methods","text":"look_ahead

    Updates a weight vector before a prediction is made.

    Parameters: w (dict): A dictionary of weight parameters. The weights are modified in-place. Returns: The updated weights.

    Parameters

    • w \u2014 'dict'

    step

    Updates a weight vector given a gradient.

    Parameters

    • w \u2014 'dict | VectorLike'
    • g \u2014 'dict | VectorLike'

    Returns

    dict | VectorLike: The updated weights.

    "},{"location":"api/optim/Nadam/","title":"Nadam","text":"

    Nadam optimizer.

    "},{"location":"api/optim/Nadam/#parameters","title":"Parameters","text":"
    • lr

      Default \u2192 0.1

    • beta_1

      Default \u2192 0.9

    • beta_2

      Default \u2192 0.999

    • eps

      Default \u2192 1e-08

    "},{"location":"api/optim/Nadam/#attributes","title":"Attributes","text":"
    • learning_rate
    "},{"location":"api/optim/Nadam/#examples","title":"Examples","text":"

    from river import datasets\nfrom river import evaluate\nfrom river import linear_model\nfrom river import metrics\nfrom river import optim\nfrom river import preprocessing\n\ndataset = datasets.Phishing()\noptimizer = optim.Nadam()\nmodel = (\n    preprocessing.StandardScaler() |\n    linear_model.LogisticRegression(optimizer)\n)\nmetric = metrics.F1()\n\nevaluate.progressive_val_score(dataset, model, metric)\n
    F1: 86.60%\n

    "},{"location":"api/optim/Nadam/#methods","title":"Methods","text":"look_ahead

    Updates a weight vector before a prediction is made.

    Parameters: w (dict): A dictionary of weight parameters. The weights are modified in-place. Returns: The updated weights.

    Parameters

    • w \u2014 'dict'

    step

    Updates a weight vector given a gradient.

    Parameters

    • w \u2014 'dict | VectorLike'
    • g \u2014 'dict | VectorLike'

    Returns

    dict | VectorLike: The updated weights.

    1. Nadam: A combination of adam and nesterov \u21a9

    "},{"location":"api/optim/NesterovMomentum/","title":"NesterovMomentum","text":"

    Nesterov Momentum optimizer.

    "},{"location":"api/optim/NesterovMomentum/#parameters","title":"Parameters","text":"
    • lr

      Default \u2192 0.1

    • rho

      Default \u2192 0.9

    "},{"location":"api/optim/NesterovMomentum/#attributes","title":"Attributes","text":"
    • learning_rate
    "},{"location":"api/optim/NesterovMomentum/#examples","title":"Examples","text":"

    from river import datasets\nfrom river import evaluate\nfrom river import linear_model\nfrom river import metrics\nfrom river import optim\nfrom river import preprocessing\n\ndataset = datasets.Phishing()\noptimizer = optim.NesterovMomentum()\nmodel = (\n    preprocessing.StandardScaler() |\n    linear_model.LogisticRegression(optimizer)\n)\nmetric = metrics.F1()\n\nevaluate.progressive_val_score(dataset, model, metric)\n
    F1: 84.22%\n

    "},{"location":"api/optim/NesterovMomentum/#methods","title":"Methods","text":"look_ahead

    Updates a weight vector before a prediction is made.

    Parameters: w (dict): A dictionary of weight parameters. The weights are modified in-place. Returns: The updated weights.

    Parameters

    • w \u2014 'dict'

    step

    Updates a weight vector given a gradient.

    Parameters

    • w \u2014 'dict | VectorLike'
    • g \u2014 'dict | VectorLike'

    Returns

    dict | VectorLike: The updated weights.

    "},{"location":"api/optim/RMSProp/","title":"RMSProp","text":"

    RMSProp optimizer.

    "},{"location":"api/optim/RMSProp/#parameters","title":"Parameters","text":"
    • lr

      Default \u2192 0.1

    • rho

      Default \u2192 0.9

    • eps

      Default \u2192 1e-08

    "},{"location":"api/optim/RMSProp/#attributes","title":"Attributes","text":"
    • learning_rate
    "},{"location":"api/optim/RMSProp/#examples","title":"Examples","text":"

    from river import datasets\nfrom river import evaluate\nfrom river import linear_model\nfrom river import metrics\nfrom river import optim\nfrom river import preprocessing\n\ndataset = datasets.Phishing()\noptimizer = optim.RMSProp()\nmodel = (\n    preprocessing.StandardScaler() |\n    linear_model.LogisticRegression(optimizer)\n)\nmetric = metrics.F1()\n\nevaluate.progressive_val_score(dataset, model, metric)\n
    F1: 87.24%\n

    "},{"location":"api/optim/RMSProp/#methods","title":"Methods","text":"look_ahead

    Updates a weight vector before a prediction is made.

    Parameters: w (dict): A dictionary of weight parameters. The weights are modified in-place. Returns: The updated weights.

    Parameters

    • w \u2014 'dict'

    step

    Updates a weight vector given a gradient.

    Parameters

    • w \u2014 'dict | VectorLike'
    • g \u2014 'dict | VectorLike'

    Returns

    dict | VectorLike: The updated weights.

    1. Divide the gradient by a running average of itsrecent magnitude \u21a9

    "},{"location":"api/optim/SGD/","title":"SGD","text":"

    Plain stochastic gradient descent.

    "},{"location":"api/optim/SGD/#parameters","title":"Parameters","text":"
    • lr

      Default \u2192 0.01

    "},{"location":"api/optim/SGD/#attributes","title":"Attributes","text":"
    • learning_rate
    "},{"location":"api/optim/SGD/#examples","title":"Examples","text":"

    from river import datasets\nfrom river import evaluate\nfrom river import linear_model\nfrom river import metrics\nfrom river import optim\nfrom river import preprocessing\n\ndataset = datasets.Phishing()\noptimizer = optim.SGD(0.1)\nmodel = (\n    preprocessing.StandardScaler() |\n    linear_model.LogisticRegression(optimizer)\n)\nmetric = metrics.F1()\n\nevaluate.progressive_val_score(dataset, model, metric)\n
    F1: 87.85%\n

    "},{"location":"api/optim/SGD/#methods","title":"Methods","text":"look_ahead

    Updates a weight vector before a prediction is made.

    Parameters: w (dict): A dictionary of weight parameters. The weights are modified in-place. Returns: The updated weights.

    Parameters

    • w \u2014 'dict'

    step

    Updates a weight vector given a gradient.

    Parameters

    • w \u2014 'dict | VectorLike'
    • g \u2014 'dict | VectorLike'

    Returns

    dict | VectorLike: The updated weights.

    1. Robbins, H. and Monro, S., 1951. A stochastic approximation method. The annals of mathematical statistics, pp.400-407 \u21a9

    "},{"location":"api/optim/base/Initializer/","title":"Initializer","text":"

    An initializer is used to set initial weights in a model.

    "},{"location":"api/optim/base/Initializer/#methods","title":"Methods","text":"call

    Returns a fresh set of weights.

    Parameters

    • shape \u2014 defaults to 1

    "},{"location":"api/optim/base/Loss/","title":"Loss","text":"

    Base class for all loss functions.

    "},{"location":"api/optim/base/Loss/#methods","title":"Methods","text":"call

    Returns the loss.

    Parameters

    • y_true
    • y_pred

    Returns

    The loss(es).

    gradient

    Return the gradient with respect to y_pred.

    Parameters

    • y_true
    • y_pred

    Returns

    The gradient(s).

    mean_func

    Mean function.

    This is the inverse of the link function. Typically, a loss function takes as input the raw output of a model. In the case of classification, the raw output would be logits. The mean function can be used to convert the raw output into a value that makes sense to the user, such as a probability.

    Parameters

    • y_pred

    Returns

    The adjusted prediction(s).

    "},{"location":"api/optim/base/Optimizer/","title":"Optimizer","text":"

    Optimizer interface.

    Every optimizer inherits from this base interface.

    "},{"location":"api/optim/base/Optimizer/#parameters","title":"Parameters","text":"
    • lr

      Type \u2192 int | float | Scheduler

    "},{"location":"api/optim/base/Optimizer/#attributes","title":"Attributes","text":"
    • learning_rate (float)

      Returns the current learning rate value.

    "},{"location":"api/optim/base/Optimizer/#methods","title":"Methods","text":"look_ahead

    Updates a weight vector before a prediction is made.

    Parameters: w (dict): A dictionary of weight parameters. The weights are modified in-place. Returns: The updated weights.

    Parameters

    • w \u2014 'dict'

    step

    Updates a weight vector given a gradient.

    Parameters

    • w \u2014 'dict | VectorLike'
    • g \u2014 'dict | VectorLike'

    Returns

    dict | VectorLike: The updated weights.

    "},{"location":"api/optim/base/Scheduler/","title":"Scheduler","text":"

    Can be used to program the learning rate schedule of an optim.base.Optimizer.

    "},{"location":"api/optim/base/Scheduler/#methods","title":"Methods","text":"get

    Returns the learning rate at a given iteration.

    Parameters

    • t \u2014 'int'

    "},{"location":"api/optim/initializers/Constant/","title":"Constant","text":"

    Constant initializer which always returns the same value.

    "},{"location":"api/optim/initializers/Constant/#parameters","title":"Parameters","text":"
    • value

      Type \u2192 float

    "},{"location":"api/optim/initializers/Constant/#examples","title":"Examples","text":"

    from river import optim\n\ninit = optim.initializers.Constant(value=3.14)\n\ninit(shape=1)\n
    3.14\n

    init(shape=2)\n
    array([3.14, 3.14])\n

    "},{"location":"api/optim/initializers/Constant/#methods","title":"Methods","text":"call

    Returns a fresh set of weights.

    Parameters

    • shape \u2014 defaults to 1

    "},{"location":"api/optim/initializers/Normal/","title":"Normal","text":"

    Random normal initializer which simulate a normal distribution with specified parameters.

    "},{"location":"api/optim/initializers/Normal/#parameters","title":"Parameters","text":"
    • mu

      Default \u2192 0.0

      The mean of the normal distribution

    • sigma

      Default \u2192 1.0

      The standard deviation of the normal distribution

    • seed

      Type \u2192 int | None

      Default \u2192 None

      Random number generation seed that can be set for reproducibility.

    "},{"location":"api/optim/initializers/Normal/#examples","title":"Examples","text":"

    from river import optim\n\ninit = optim.initializers.Normal(mu=0, sigma=1, seed=42)\n\ninit(shape=1)\n
    0.496714\n

    init(shape=2)\n
    array([-0.1382643 ,  0.64768854])\n

    "},{"location":"api/optim/initializers/Normal/#methods","title":"Methods","text":"call

    Returns a fresh set of weights.

    Parameters

    • shape \u2014 defaults to 1

    "},{"location":"api/optim/initializers/Zeros/","title":"Zeros","text":"

    Constant initializer which always returns zeros.

    "},{"location":"api/optim/initializers/Zeros/#examples","title":"Examples","text":"

    from river import optim\n\ninit = optim.initializers.Zeros()\n\ninit(shape=1)\n
    0.0\n

    init(shape=2)\n
    array([0., 0.])\n

    "},{"location":"api/optim/initializers/Zeros/#methods","title":"Methods","text":"call

    Returns a fresh set of weights.

    Parameters

    • shape \u2014 defaults to 1

    "},{"location":"api/optim/losses/Absolute/","title":"Absolute","text":"

    Absolute loss, also known as the mean absolute error or L1 loss.

    Mathematically, it is defined as

    \\[L = |p_i - y_i|\\]

    It's gradient w.r.t. to \\(p_i\\) is

    \\[\\frac{\\partial L}{\\partial p_i} = sgn(p_i - y_i)\\]"},{"location":"api/optim/losses/Absolute/#examples","title":"Examples","text":"

    from river import optim\n\nloss = optim.losses.Absolute()\nloss(-42, 42)\n
    84\n
    loss.gradient(1, 2)\n
    1\n
    loss.gradient(2, 1)\n
    -1\n

    "},{"location":"api/optim/losses/Absolute/#methods","title":"Methods","text":"call

    Returns the loss.

    Parameters

    • y_true
    • y_pred

    Returns

    The loss(es).

    gradient

    Return the gradient with respect to y_pred.

    Parameters

    • y_true
    • y_pred

    Returns

    The gradient(s).

    mean_func

    Mean function.

    This is the inverse of the link function. Typically, a loss function takes as input the raw output of a model. In the case of classification, the raw output would be logits. The mean function can be used to convert the raw output into a value that makes sense to the user, such as a probability.

    Parameters

    • y_pred

    Returns

    The adjusted prediction(s).

    "},{"location":"api/optim/losses/BinaryFocalLoss/","title":"BinaryFocalLoss","text":"

    Binary focal loss.

    This implements the \"star\" algorithm from the appendix of the focal loss paper.

    "},{"location":"api/optim/losses/BinaryFocalLoss/#parameters","title":"Parameters","text":"
    • gamma

      Default \u2192 2

    • beta

      Default \u2192 1

    "},{"location":"api/optim/losses/BinaryFocalLoss/#methods","title":"Methods","text":"call

    Returns the loss.

    Parameters

    • y_true
    • y_pred

    Returns

    The loss(es).

    gradient

    Return the gradient with respect to y_pred.

    Parameters

    • y_true
    • y_pred

    Returns

    The gradient(s).

    mean_func

    Mean function.

    This is the inverse of the link function. Typically, a loss function takes as input the raw output of a model. In the case of classification, the raw output would be logits. The mean function can be used to convert the raw output into a value that makes sense to the user, such as a probability.

    Parameters

    • y_pred

    Returns

    The adjusted prediction(s).

    1. Lin, T.Y., Goyal, P., Girshick, R., He, K. and Doll\u00e1r, P., 2017. Focal loss for dense object detection. In Proceedings of the IEEE international conference on computer vision (pp. 2980-2988)

    "},{"location":"api/optim/losses/BinaryLoss/","title":"BinaryLoss","text":"

    A loss appropriate for binary classification tasks.

    "},{"location":"api/optim/losses/BinaryLoss/#methods","title":"Methods","text":"call

    Returns the loss.

    Parameters

    • y_true
    • y_pred

    Returns

    The loss(es).

    gradient

    Return the gradient with respect to y_pred.

    Parameters

    • y_true
    • y_pred

    Returns

    The gradient(s).

    mean_func

    Mean function.

    This is the inverse of the link function. Typically, a loss function takes as input the raw output of a model. In the case of classification, the raw output would be logits. The mean function can be used to convert the raw output into a value that makes sense to the user, such as a probability.

    Parameters

    • y_pred

    Returns

    The adjusted prediction(s).

    "},{"location":"api/optim/losses/Cauchy/","title":"Cauchy","text":"

    Cauchy loss function.

    "},{"location":"api/optim/losses/Cauchy/#parameters","title":"Parameters","text":"
    • C

      Default \u2192 80

    "},{"location":"api/optim/losses/Cauchy/#methods","title":"Methods","text":"call

    Returns the loss.

    Parameters

    • y_true
    • y_pred

    Returns

    The loss(es).

    gradient

    Return the gradient with respect to y_pred.

    Parameters

    • y_true
    • y_pred

    Returns

    The gradient(s).

    mean_func

    Mean function.

    This is the inverse of the link function. Typically, a loss function takes as input the raw output of a model. In the case of classification, the raw output would be logits. The mean function can be used to convert the raw output into a value that makes sense to the user, such as a probability.

    Parameters

    • y_pred

    Returns

    The adjusted prediction(s).

    1. \"Effect of MAE\" Kaggle discussion \u21a9

    2. Paris Madness Kaggle kernel \u21a9

    "},{"location":"api/optim/losses/CrossEntropy/","title":"CrossEntropy","text":"

    Cross entropy loss.

    This is a generalization of logistic loss to multiple classes.

    "},{"location":"api/optim/losses/CrossEntropy/#parameters","title":"Parameters","text":"
    • class_weight

      Type \u2192 dict[base.typing.ClfTarget, float] | None

      Default \u2192 None

      A dictionary that indicates what weight to associate with each class.

    "},{"location":"api/optim/losses/CrossEntropy/#examples","title":"Examples","text":"

    from river import optim\n\ny_true = [0, 1, 2, 2]\ny_pred = [\n    {0: 0.29450637, 1: 0.34216758, 2: 0.36332605},\n    {0: 0.21290077, 1: 0.32728332, 2: 0.45981591},\n    {0: 0.42860913, 1: 0.33380113, 2: 0.23758974},\n    {0: 0.44941979, 1: 0.32962558, 2: 0.22095463}\n]\n\nloss = optim.losses.CrossEntropy()\n\nfor yt, yp in zip(y_true, y_pred):\n    print(loss(yt, yp))\n
    1.222454\n1.116929\n1.437209\n1.509797\n

    for yt, yp in zip(y_true, y_pred):\n    print(loss.gradient(yt, yp))\n
    {0: -0.70549363, 1: 0.34216758, 2: 0.36332605}\n{0: 0.21290077, 1: -0.67271668, 2: 0.45981591}\n{0: 0.42860913, 1: 0.33380113, 2: -0.76241026}\n{0: 0.44941979, 1: 0.32962558, 2: -0.77904537}\n

    "},{"location":"api/optim/losses/CrossEntropy/#methods","title":"Methods","text":"call

    Returns the loss.

    Parameters

    • y_true
    • y_pred

    Returns

    The loss(es).

    gradient

    Return the gradient with respect to y_pred.

    Parameters

    • y_true
    • y_pred

    Returns

    The gradient(s).

    mean_func

    Mean function.

    This is the inverse of the link function. Typically, a loss function takes as input the raw output of a model. In the case of classification, the raw output would be logits. The mean function can be used to convert the raw output into a value that makes sense to the user, such as a probability.

    Parameters

    • y_pred

    Returns

    The adjusted prediction(s).

    1. What is Softmax regression and how is it related to Logistic regression? \u21a9

    "},{"location":"api/optim/losses/EpsilonInsensitiveHinge/","title":"EpsilonInsensitiveHinge","text":"

    Epsilon-insensitive hinge loss.

    "},{"location":"api/optim/losses/EpsilonInsensitiveHinge/#parameters","title":"Parameters","text":"
    • eps

      Default \u2192 0.1

    "},{"location":"api/optim/losses/EpsilonInsensitiveHinge/#methods","title":"Methods","text":"call

    Returns the loss.

    Parameters

    • y_true
    • y_pred

    Returns

    The loss(es).

    gradient

    Return the gradient with respect to y_pred.

    Parameters

    • y_true
    • y_pred

    Returns

    The gradient(s).

    mean_func

    Mean function.

    This is the inverse of the link function. Typically, a loss function takes as input the raw output of a model. In the case of classification, the raw output would be logits. The mean function can be used to convert the raw output into a value that makes sense to the user, such as a probability.

    Parameters

    • y_pred

    Returns

    The adjusted prediction(s).

    "},{"location":"api/optim/losses/Hinge/","title":"Hinge","text":"

    Computes the hinge loss.

    Mathematically, it is defined as

    \\[L = max(0, 1 - p_i * y_i)\\]

    It's gradient w.r.t. to \\(p_i\\) is

    \\[ \\\\frac{\\\\partial L}{\\\\partial y_i} = \\\\left\\{ \\\\begin{array}{ll} \\\\ 0 & p_iy_i \\geqslant 1 \\\\\\\\ \\\\ - y_i & p_iy_i < 1 \\\\end{array} \\\\right. \\]"},{"location":"api/optim/losses/Hinge/#parameters","title":"Parameters","text":"
    • threshold

      Default \u2192 1.0

      Margin threshold. 1 yield the loss used in SVMs, whilst 0 is equivalent to the loss used in the Perceptron algorithm.

    "},{"location":"api/optim/losses/Hinge/#examples","title":"Examples","text":"

    from river import optim\n\nloss = optim.losses.Hinge(threshold=1)\nloss(1, .2)\n
    0.8\n

    loss.gradient(1, .2)\n
    -1\n

    "},{"location":"api/optim/losses/Hinge/#methods","title":"Methods","text":"call

    Returns the loss.

    Parameters

    • y_true
    • y_pred

    Returns

    The loss(es).

    gradient

    Return the gradient with respect to y_pred.

    Parameters

    • y_true
    • y_pred

    Returns

    The gradient(s).

    mean_func

    Mean function.

    This is the inverse of the link function. Typically, a loss function takes as input the raw output of a model. In the case of classification, the raw output would be logits. The mean function can be used to convert the raw output into a value that makes sense to the user, such as a probability.

    Parameters

    • y_pred

    Returns

    The adjusted prediction(s).

    "},{"location":"api/optim/losses/Huber/","title":"Huber","text":"

    Huber loss.

    Variant of the squared loss that is robust to outliers.

    "},{"location":"api/optim/losses/Huber/#parameters","title":"Parameters","text":"
    • epsilon

      Default \u2192 0.1

    "},{"location":"api/optim/losses/Huber/#methods","title":"Methods","text":"call

    Returns the loss.

    Parameters

    • y_true
    • y_pred

    Returns

    The loss(es).

    gradient

    Return the gradient with respect to y_pred.

    Parameters

    • y_true
    • y_pred

    Returns

    The gradient(s).

    mean_func

    Mean function.

    This is the inverse of the link function. Typically, a loss function takes as input the raw output of a model. In the case of classification, the raw output would be logits. The mean function can be used to convert the raw output into a value that makes sense to the user, such as a probability.

    Parameters

    • y_pred

    Returns

    The adjusted prediction(s).

    1. Huber loss function - Wikipedia

    "},{"location":"api/optim/losses/Log/","title":"Log","text":"

    Logarithmic loss.

    This loss function expects each provided y_pred to be a logit. In other words if must be the raw output of a linear model or a neural network.

    "},{"location":"api/optim/losses/Log/#parameters","title":"Parameters","text":"
    • weight_pos

      Default \u2192 1.0

    • weight_neg

      Default \u2192 1.0

    "},{"location":"api/optim/losses/Log/#methods","title":"Methods","text":"call

    Returns the loss.

    Parameters

    • y_true
    • y_pred

    Returns

    The loss(es).

    gradient

    Return the gradient with respect to y_pred.

    Parameters

    • y_true
    • y_pred

    Returns

    The gradient(s).

    mean_func

    Mean function.

    This is the inverse of the link function. Typically, a loss function takes as input the raw output of a model. In the case of classification, the raw output would be logits. The mean function can be used to convert the raw output into a value that makes sense to the user, such as a probability.

    Parameters

    • y_pred

    Returns

    The adjusted prediction(s).

    1. Logit Wikipedia page \u21a9

    "},{"location":"api/optim/losses/MultiClassLoss/","title":"MultiClassLoss","text":"

    A loss appropriate for multi-class classification tasks.

    "},{"location":"api/optim/losses/MultiClassLoss/#methods","title":"Methods","text":"call

    Returns the loss.

    Parameters

    • y_true
    • y_pred

    Returns

    The loss(es).

    gradient

    Return the gradient with respect to y_pred.

    Parameters

    • y_true
    • y_pred

    Returns

    The gradient(s).

    mean_func

    Mean function.

    This is the inverse of the link function. Typically, a loss function takes as input the raw output of a model. In the case of classification, the raw output would be logits. The mean function can be used to convert the raw output into a value that makes sense to the user, such as a probability.

    Parameters

    • y_pred

    Returns

    The adjusted prediction(s).

    "},{"location":"api/optim/losses/Poisson/","title":"Poisson","text":"

    Poisson loss.

    The Poisson loss is usually more suited for regression with count data than the squared loss.

    Mathematically, it is defined as

    \\[L = exp(p_i) - y_i \\times p_i\\]

    It's gradient w.r.t. to \\(p_i\\) is

    \\[\\frac{\\partial L}{\\partial p_i} = exp(p_i) - y_i\\]"},{"location":"api/optim/losses/Poisson/#methods","title":"Methods","text":"call

    Returns the loss.

    Parameters

    • y_true
    • y_pred

    Returns

    The loss(es).

    gradient

    Return the gradient with respect to y_pred.

    Parameters

    • y_true
    • y_pred

    Returns

    The gradient(s).

    mean_func

    Mean function.

    This is the inverse of the link function. Typically, a loss function takes as input the raw output of a model. In the case of classification, the raw output would be logits. The mean function can be used to convert the raw output into a value that makes sense to the user, such as a probability.

    Parameters

    • y_pred

    Returns

    The adjusted prediction(s).

    "},{"location":"api/optim/losses/Quantile/","title":"Quantile","text":"

    Quantile loss.

    "},{"location":"api/optim/losses/Quantile/#parameters","title":"Parameters","text":"
    • alpha

      Default \u2192 0.5

      Desired quantile to attain.

    "},{"location":"api/optim/losses/Quantile/#examples","title":"Examples","text":"

    from river import optim\n\nloss = optim.losses.Quantile(0.5)\nloss(1, 3)\n
    1.0\n

    loss.gradient(1, 3)\n
    0.5\n

    loss.gradient(3, 1)\n
    -0.5\n

    "},{"location":"api/optim/losses/Quantile/#methods","title":"Methods","text":"call

    Returns the loss.

    Parameters

    • y_true
    • y_pred

    Returns

    The loss(es).

    gradient

    Return the gradient with respect to y_pred.

    Parameters

    • y_true
    • y_pred

    Returns

    The gradient(s).

    mean_func

    Mean function.

    This is the inverse of the link function. Typically, a loss function takes as input the raw output of a model. In the case of classification, the raw output would be logits. The mean function can be used to convert the raw output into a value that makes sense to the user, such as a probability.

    Parameters

    • y_pred

    Returns

    The adjusted prediction(s).

    1. Wikipedia article on quantile regression \u21a9

    2. Derivative from WolframAlpha \u21a9

    "},{"location":"api/optim/losses/RegressionLoss/","title":"RegressionLoss","text":"

    A loss appropriate for regression tasks.

    "},{"location":"api/optim/losses/RegressionLoss/#methods","title":"Methods","text":"call

    Returns the loss.

    Parameters

    • y_true
    • y_pred

    Returns

    The loss(es).

    gradient

    Return the gradient with respect to y_pred.

    Parameters

    • y_true
    • y_pred

    Returns

    The gradient(s).

    mean_func

    Mean function.

    This is the inverse of the link function. Typically, a loss function takes as input the raw output of a model. In the case of classification, the raw output would be logits. The mean function can be used to convert the raw output into a value that makes sense to the user, such as a probability.

    Parameters

    • y_pred

    Returns

    The adjusted prediction(s).

    "},{"location":"api/optim/losses/Squared/","title":"Squared","text":"

    Squared loss, also known as the L2 loss.

    Mathematically, it is defined as

    \\[L = (p_i - y_i) ^ 2\\]

    It's gradient w.r.t. to \\(p_i\\) is

    \\[\\frac{\\partial L}{\\partial p_i} = 2 (p_i - y_i)\\]

    One thing to note is that this convention is consistent with Vowpal Wabbit and PyTorch, but not with scikit-learn. Indeed, scikit-learn divides the loss by 2, making the 2 disappear in the gradient.

    "},{"location":"api/optim/losses/Squared/#examples","title":"Examples","text":"

    from river import optim\n\nloss = optim.losses.Squared()\nloss(-4, 5)\n
    81\n
    loss.gradient(-4, 5)\n
    18\n
    loss.gradient(5, -4)\n
    -18\n

    "},{"location":"api/optim/losses/Squared/#methods","title":"Methods","text":"call

    Returns the loss.

    Parameters

    • y_true
    • y_pred

    Returns

    The loss(es).

    gradient

    Return the gradient with respect to y_pred.

    Parameters

    • y_true
    • y_pred

    Returns

    The gradient(s).

    mean_func

    Mean function.

    This is the inverse of the link function. Typically, a loss function takes as input the raw output of a model. In the case of classification, the raw output would be logits. The mean function can be used to convert the raw output into a value that makes sense to the user, such as a probability.

    Parameters

    • y_pred

    Returns

    The adjusted prediction(s).

    "},{"location":"api/optim/schedulers/Constant/","title":"Constant","text":"

    Always uses the same learning rate.

    "},{"location":"api/optim/schedulers/Constant/#parameters","title":"Parameters","text":"
    • learning_rate

      Type \u2192 int | float

    "},{"location":"api/optim/schedulers/Constant/#methods","title":"Methods","text":"get

    Returns the learning rate at a given iteration.

    Parameters

    • t \u2014 'int'

    "},{"location":"api/optim/schedulers/InverseScaling/","title":"InverseScaling","text":"

    Reduces the learning rate using a power schedule.

    Assuming an initial learning rate \\(\\eta\\), the learning rate at step \\(t\\) is:

    \\[\\\\frac{eta}{(t + 1) ^ p}\\]

    where \\(p\\) is a user-defined parameter.

    "},{"location":"api/optim/schedulers/InverseScaling/#parameters","title":"Parameters","text":"
    • learning_rate

      Type \u2192 float

    • power

      Default \u2192 0.5

    "},{"location":"api/optim/schedulers/InverseScaling/#methods","title":"Methods","text":"get

    Returns the learning rate at a given iteration.

    Parameters

    • t \u2014 'int'

    "},{"location":"api/optim/schedulers/Optimal/","title":"Optimal","text":"

    Optimal learning schedule as proposed by L\u00e9on Bottou.

    "},{"location":"api/optim/schedulers/Optimal/#parameters","title":"Parameters","text":"
    • loss

      Type \u2192 optim.losses.Loss

    • alpha

      Default \u2192 0.0001

    "},{"location":"api/optim/schedulers/Optimal/#methods","title":"Methods","text":"get

    Returns the learning rate at a given iteration.

    Parameters

    • t \u2014 'int'

    1. Bottou, L., 2012. Stochastic gradient descent tricks. In Neural networks: Tricks of the trade (pp. 421-436). Springer, Berlin, Heidelberg. \u21a9

    "},{"location":"api/preprocessing/AdaptiveStandardScaler/","title":"AdaptiveStandardScaler","text":"

    Scales data using exponentially weighted moving average and variance.

    Under the hood, a exponentially weighted running mean and variance are maintained for each feature. This can potentially provide better results for drifting data in comparison to preprocessing.StandardScaler. Indeed, the latter computes a global mean and variance for each feature, whereas this scaler weights data in proportion to their recency.

    "},{"location":"api/preprocessing/AdaptiveStandardScaler/#parameters","title":"Parameters","text":"
    • fading_factor

      Default \u2192 0.3

      This parameter is passed to stats.EWVar. It is expected to be in [0, 1]. More weight is assigned to recent samples the closer fading_factor is to 1.

    "},{"location":"api/preprocessing/AdaptiveStandardScaler/#examples","title":"Examples","text":"

    Consider the following series which contains a positive trend.

    import random\n\nrandom.seed(42)\nX = [\n    {'x': random.uniform(4 + i, 6 + i)}\n    for i in range(8)\n]\nfor x in X:\n    print(x)\n
    {'x': 5.278}\n{'x': 5.050}\n{'x': 6.550}\n{'x': 7.446}\n{'x': 9.472}\n{'x': 10.353}\n{'x': 11.784}\n{'x': 11.173}\n

    This scaler works well with this kind of data because it uses statistics that assign higher weight to more recent data.

    from river import preprocessing\n\nscaler = preprocessing.AdaptiveStandardScaler(fading_factor=.6)\n\nfor x in X:\n    print(scaler.learn_one(x).transform_one(x))\n
    {'x': 0.0}\n{'x': -0.816}\n{'x': 0.812}\n{'x': 0.695}\n{'x': 0.754}\n{'x': 0.598}\n{'x': 0.651}\n{'x': 0.124}\n

    "},{"location":"api/preprocessing/AdaptiveStandardScaler/#methods","title":"Methods","text":"learn_one

    Update with a set of features x.

    A lot of transformers don't actually have to do anything during the learn_one step because they are stateless. For this reason the default behavior of this function is to do nothing. Transformers that however do something during the learn_one can override this method.

    Parameters

    • x \u2014 'dict'

    Returns

    Transformer: self

    transform_one

    Transform a set of features x.

    Parameters

    • x \u2014 'dict'

    Returns

    dict: The transformed values.

    "},{"location":"api/preprocessing/Binarizer/","title":"Binarizer","text":"

    Binarizes the data to 0 or 1 according to a threshold.

    "},{"location":"api/preprocessing/Binarizer/#parameters","title":"Parameters","text":"
    • threshold

      Default \u2192 0.0

      Values above this are replaced by 1 and the others by 0.

    • dtype

      Default \u2192 <class 'bool'>

      The desired data type to apply.

    "},{"location":"api/preprocessing/Binarizer/#examples","title":"Examples","text":"

    import river\nimport numpy as np\n\nrng = np.random.RandomState(42)\nX = [{'x1': v, 'x2': int(v)} for v in rng.uniform(low=-4, high=4, size=6)]\n\nbinarizer = river.preprocessing.Binarizer()\nfor x in X:\n    print(binarizer.learn_one(x).transform_one(x))\n
    {'x1': False, 'x2': False}\n{'x1': True, 'x2': True}\n{'x1': True, 'x2': True}\n{'x1': True, 'x2': False}\n{'x1': False, 'x2': False}\n{'x1': False, 'x2': False}\n

    "},{"location":"api/preprocessing/Binarizer/#methods","title":"Methods","text":"learn_one

    Update with a set of features x.

    A lot of transformers don't actually have to do anything during the learn_one step because they are stateless. For this reason the default behavior of this function is to do nothing. Transformers that however do something during the learn_one can override this method.

    Parameters

    • x \u2014 'dict'

    Returns

    Transformer: self

    transform_one

    Transform a set of features x.

    Parameters

    • x \u2014 'dict'

    Returns

    dict: The transformed values.

    "},{"location":"api/preprocessing/FeatureHasher/","title":"FeatureHasher","text":"

    Implements the hashing trick.

    Each pair of (name, value) features is hashed into a random integer. A module operator is then used to make sure the hash is in a certain range. We use the Murmurhash implementation from scikit-learn.

    "},{"location":"api/preprocessing/FeatureHasher/#parameters","title":"Parameters","text":"
    • n_features

      Default \u2192 1048576

      The number by which each hash will be moduloed by.

    • seed

      Type \u2192 int | None

      Default \u2192 None

      Set the seed to produce identical results.

    "},{"location":"api/preprocessing/FeatureHasher/#examples","title":"Examples","text":"

    import river\n\nhasher = river.preprocessing.FeatureHasher(n_features=10, seed=42)\n\nX = [\n    {'dog': 1, 'cat': 2, 'elephant': 4},\n    {'dog': 2, 'run': 5}\n]\nfor x in X:\n    print(hasher.transform_one(x))\n
    Counter({1: 4, 9: 2, 8: 1})\nCounter({4: 5, 8: 2})\n

    "},{"location":"api/preprocessing/FeatureHasher/#methods","title":"Methods","text":"learn_one

    Update with a set of features x.

    A lot of transformers don't actually have to do anything during the learn_one step because they are stateless. For this reason the default behavior of this function is to do nothing. Transformers that however do something during the learn_one can override this method.

    Parameters

    • x \u2014 'dict'

    Returns

    Transformer: self

    transform_one

    Transform a set of features x.

    Parameters

    • x \u2014 'dict'

    Returns

    dict: The transformed values.

    1. Wikipedia article on feature vectorization using the hashing trick \u21a9

    "},{"location":"api/preprocessing/GaussianRandomProjector/","title":"GaussianRandomProjector","text":"

    Gaussian random projector.

    This transformer reduces the dimensionality of inputs through Gaussian random projection.

    The components of the random projections matrix are drawn from N(0, 1 / n_components).

    "},{"location":"api/preprocessing/GaussianRandomProjector/#parameters","title":"Parameters","text":"
    • n_components

      Default \u2192 10

      Number of components to project the data onto.

    • seed

      Type \u2192 int | None

      Default \u2192 None

      Random seed for reproducibility.

    "},{"location":"api/preprocessing/GaussianRandomProjector/#examples","title":"Examples","text":"

    from river import datasets\nfrom river import evaluate\nfrom river import linear_model\nfrom river import metrics\nfrom river import preprocessing\n\ndataset = datasets.TrumpApproval()\nmodel = preprocessing.GaussianRandomProjector(\n    n_components=3,\n    seed=42\n)\n\nfor x, y in dataset:\n    x = model.transform_one(x)\n    print(x)\n    break\n
    {0: -61289.37139206629, 1: 141312.51039283074, 2: 279165.99370457436}\n

    model = (\n    preprocessing.GaussianRandomProjector(\n        n_components=5,\n        seed=42\n    ) |\n    preprocessing.StandardScaler() |\n    linear_model.LinearRegression()\n)\nevaluate.progressive_val_score(dataset, model, metrics.MAE())\n
    MAE: 0.933502\n

    "},{"location":"api/preprocessing/GaussianRandomProjector/#methods","title":"Methods","text":"learn_one

    Update with a set of features x.

    A lot of transformers don't actually have to do anything during the learn_one step because they are stateless. For this reason the default behavior of this function is to do nothing. Transformers that however do something during the learn_one can override this method.

    Parameters

    • x \u2014 'dict'

    Returns

    Transformer: self

    transform_one

    Transform a set of features x.

    Parameters

    • x \u2014 'dict'

    Returns

    dict: The transformed values.

    1. Gaussian random projection \u21a9

    2. scikit-learn random projections module \u21a9

    "},{"location":"api/preprocessing/LDA/","title":"LDA","text":"

    Online Latent Dirichlet Allocation with Infinite Vocabulary.

    Latent Dirichlet allocation (LDA) is a probabilistic approach for exploring topics in document collections. The key advantage of this variant is that it assumes an infinite vocabulary, meaning that the set of tokens does not have to known in advance, as opposed to the implementation from sklearn The results produced by this implementation are identical to those from the original implementation proposed by the method's authors.

    This class takes as input token counts. Therefore, it requires you to tokenize beforehand. You can do so by using a feature_extraction.BagOfWords instance, as shown in the example below.

    "},{"location":"api/preprocessing/LDA/#parameters","title":"Parameters","text":"
    • n_components

      Default \u2192 10

      Number of topics of the latent Drichlet allocation.

    • number_of_documents

      Default \u2192 1000000.0

      Estimated number of documents.

    • alpha_theta

      Default \u2192 0.5

      Hyper-parameter of the Dirichlet distribution of topics.

    • alpha_beta

      Default \u2192 100.0

      Hyper-parameter of the Dirichlet process of distribution over words.

    • tau

      Default \u2192 64.0

      Learning inertia to prevent premature convergence.

    • kappa

      Default \u2192 0.75

      The learning rate kappa controls how quickly new parameters estimates replace the old ones. kappa \u2208 (0.5, 1] is required for convergence.

    • vocab_prune_interval

      Default \u2192 10

      Interval at which to refresh the words topics distribution.

    • number_of_samples

      Default \u2192 10

      Number of iteration to computes documents topics distribution.

    • ranking_smooth_factor

      Default \u2192 1e-12

    • burn_in_sweeps

      Default \u2192 5

      Number of iteration necessaries while analyzing a document before updating document topics distribution.

    • maximum_size_vocabulary

      Default \u2192 4000

      Maximum size of the stored vocabulary.

    • seed

      Type \u2192 int | None

      Default \u2192 None

      Random number seed used for reproducibility.

    "},{"location":"api/preprocessing/LDA/#attributes","title":"Attributes","text":"
    • counter (int)

      The current number of observed documents.

    • truncation_size_prime (int)

      Number of distincts words stored in the vocabulary. Updated before processing a document.

    • truncation_size (int)

      Number of distincts words stored in the vocabulary. Updated after processing a document.

    • word_to_index (dict)

      Words as keys and indexes as values.

    • index_to_word (dict)

      Indexes as keys and words as values.

    • nu_1 (dict)

      Weights of the words. Component of the variational inference.

    • nu_2 (dict)

      Weights of the words. Component of the variational inference.

    "},{"location":"api/preprocessing/LDA/#examples","title":"Examples","text":"

    from river import compose\nfrom river import feature_extraction\nfrom river import preprocessing\n\nX = [\n   'weather cold',\n   'weather hot dry',\n   'weather cold rainy',\n   'weather hot',\n   'weather cold humid',\n]\n\nlda = compose.Pipeline(\n    feature_extraction.BagOfWords(),\n    preprocessing.LDA(\n        n_components=2,\n        number_of_documents=60,\n        seed=42\n    )\n)\n\nfor x in X:\n    lda = lda.learn_one(x)\n    topics = lda.transform_one(x)\n    print(topics)\n
    {0: 0.5, 1: 2.5}\n{0: 2.499..., 1: 1.5}\n{0: 0.5, 1: 3.5}\n{0: 0.5, 1: 2.5}\n{0: 1.5, 1: 2.5}\n

    "},{"location":"api/preprocessing/LDA/#methods","title":"Methods","text":"learn_one

    Update with a set of features x.

    A lot of transformers don't actually have to do anything during the learn_one step because they are stateless. For this reason the default behavior of this function is to do nothing. Transformers that however do something during the learn_one can override this method.

    Parameters

    • x \u2014 'dict'

    Returns

    Transformer: self

    learn_transform_one

    Equivalent to lda.learn_one(x).transform_one(x)s, but faster.

    Parameters

    • x \u2014 'dict'

    Returns

    dict: Component attributions for the input document.

    transform_one

    Transform a set of features x.

    Parameters

    • x \u2014 'dict'

    Returns

    dict: The transformed values.

    1. Zhai, K. and Boyd-Graber, J., 2013, February. Online latent Dirichlet allocation with infinite vocabulary. In International Conference on Machine Learning (pp. 561-569). \u21a9

    2. PyInfVoc on GitHub \u21a9

    "},{"location":"api/preprocessing/MaxAbsScaler/","title":"MaxAbsScaler","text":"

    Scales the data to a [-1, 1] range based on absolute maximum.

    Under the hood a running absolute max is maintained. This scaler is meant for data that is already centered at zero or sparse data. It does not shift/center the data, and thus does not destroy any sparsity.

    "},{"location":"api/preprocessing/MaxAbsScaler/#attributes","title":"Attributes","text":"
    • abs_max (dict)

      Mapping between features and instances of stats.AbsMax.

    "},{"location":"api/preprocessing/MaxAbsScaler/#examples","title":"Examples","text":"

    import random\nfrom river import preprocessing\n\nrandom.seed(42)\nX = [{'x': random.uniform(8, 12)} for _ in range(5)]\nfor x in X:\n    print(x)\n
    {'x': 10.557707}\n{'x': 8.100043}\n{'x': 9.100117}\n{'x': 8.892842}\n{'x': 10.945884}\n

    scaler = preprocessing.MaxAbsScaler()\n\nfor x in X:\n    print(scaler.learn_one(x).transform_one(x))\n
    {'x': 1.0}\n{'x': 0.767216}\n{'x': 0.861940}\n{'x': 0.842308}\n{'x': 1.0}\n

    "},{"location":"api/preprocessing/MaxAbsScaler/#methods","title":"Methods","text":"learn_one

    Update with a set of features x.

    A lot of transformers don't actually have to do anything during the learn_one step because they are stateless. For this reason the default behavior of this function is to do nothing. Transformers that however do something during the learn_one can override this method.

    Parameters

    • x \u2014 'dict'

    Returns

    Transformer: self

    transform_one

    Transform a set of features x.

    Parameters

    • x \u2014 'dict'

    Returns

    dict: The transformed values.

    "},{"location":"api/preprocessing/MinMaxScaler/","title":"MinMaxScaler","text":"

    Scales the data to a fixed range from 0 to 1.

    Under the hood a running min and a running peak to peak (max - min) are maintained.

    "},{"location":"api/preprocessing/MinMaxScaler/#attributes","title":"Attributes","text":"
    • min (dict)

      Mapping between features and instances of stats.Min.

    • max (dict)

      Mapping between features and instances of stats.Max.

    "},{"location":"api/preprocessing/MinMaxScaler/#examples","title":"Examples","text":"

    import random\nfrom river import preprocessing\n\nrandom.seed(42)\nX = [{'x': random.uniform(8, 12)} for _ in range(5)]\nfor x in X:\n    print(x)\n
    {'x': 10.557707}\n{'x': 8.100043}\n{'x': 9.100117}\n{'x': 8.892842}\n{'x': 10.945884}\n

    scaler = preprocessing.MinMaxScaler()\n\nfor x in X:\n    print(scaler.learn_one(x).transform_one(x))\n
    {'x': 0.0}\n{'x': 0.0}\n{'x': 0.406920}\n{'x': 0.322582}\n{'x': 1.0}\n

    "},{"location":"api/preprocessing/MinMaxScaler/#methods","title":"Methods","text":"learn_one

    Update with a set of features x.

    A lot of transformers don't actually have to do anything during the learn_one step because they are stateless. For this reason the default behavior of this function is to do nothing. Transformers that however do something during the learn_one can override this method.

    Parameters

    • x \u2014 'dict'

    Returns

    Transformer: self

    transform_one

    Transform a set of features x.

    Parameters

    • x \u2014 'dict'

    Returns

    dict: The transformed values.

    "},{"location":"api/preprocessing/Normalizer/","title":"Normalizer","text":"

    Scales a set of features so that it has unit norm.

    This is particularly useful when used after a feature_extraction.TFIDF.

    "},{"location":"api/preprocessing/Normalizer/#parameters","title":"Parameters","text":"
    • order

      Default \u2192 2

      Order of the norm (e.g. 2 corresponds to the \\(L^2\\) norm).

    "},{"location":"api/preprocessing/Normalizer/#examples","title":"Examples","text":"

    from river import preprocessing\nfrom river import stream\n\nscaler = preprocessing.Normalizer(order=2)\n\nX = [[4, 1, 2, 2],\n     [1, 3, 9, 3],\n     [5, 7, 5, 1]]\n\nfor x, _ in stream.iter_array(X):\n    print(scaler.transform_one(x))\n
    {0: 0.8, 1: 0.2, 2: 0.4, 3: 0.4}\n{0: 0.1, 1: 0.3, 2: 0.9, 3: 0.3}\n{0: 0.5, 1: 0.7, 2: 0.5, 3: 0.1}\n

    "},{"location":"api/preprocessing/Normalizer/#methods","title":"Methods","text":"learn_one

    Update with a set of features x.

    A lot of transformers don't actually have to do anything during the learn_one step because they are stateless. For this reason the default behavior of this function is to do nothing. Transformers that however do something during the learn_one can override this method.

    Parameters

    • x \u2014 'dict'

    Returns

    Transformer: self

    transform_one

    Transform a set of features x.

    Parameters

    • x \u2014 'dict'

    Returns

    dict: The transformed values.

    "},{"location":"api/preprocessing/OneHotEncoder/","title":"OneHotEncoder","text":"

    One-hot encoding.

    This transformer will encode every feature it is provided with. If a list or set is provided, this transformer will encode every entry in the list/set. You can apply it to a subset of features by composing it with compose.Select or compose.SelectType.

    "},{"location":"api/preprocessing/OneHotEncoder/#parameters","title":"Parameters","text":"
    • drop_zeros

      Default \u2192 False

      Whether or not 0s should be made explicit or not.

    • drop_first

      Default \u2192 False

      Whether to get k - 1 dummies out of k categorical levels by removing the first key. This is useful in some statistical models where perfectly collinear features cause problems.

    "},{"location":"api/preprocessing/OneHotEncoder/#examples","title":"Examples","text":"

    Let us first create an example dataset.

    from pprint import pprint\nimport random\nimport string\n\nrandom.seed(42)\nalphabet = list(string.ascii_lowercase)\nX = [\n    {\n        'c1': random.choice(alphabet),\n        'c2': random.choice(alphabet),\n    }\n    for _ in range(4)\n]\npprint(X)\n
    [{'c1': 'u', 'c2': 'd'},\n    {'c1': 'a', 'c2': 'x'},\n    {'c1': 'i', 'c2': 'h'},\n    {'c1': 'h', 'c2': 'e'}]\n

    e can now apply one-hot encoding. All the provided are one-hot encoded, there is therefore no need to specify which features to encode.

    from river import preprocessing\n\noh = preprocessing.OneHotEncoder()\nfor x in X[:2]:\n    oh = oh.learn_one(x)\n    pprint(oh.transform_one(x))\n
    {'c1_u': 1, 'c2_d': 1}\n{'c1_a': 1, 'c1_u': 0, 'c2_d': 0, 'c2_x': 1}\n

    The drop_zeros parameter can be set to True if you don't want the past features to be included in the output. Otherwise, all the past features will be included in the output.

    oh = preprocessing.OneHotEncoder(drop_zeros=True)\nfor x in X:\n    oh = oh.learn_one(x)\n    pprint(oh.transform_one(x))\n
    {'c1_u': 1, 'c2_d': 1}\n{'c1_a': 1, 'c2_x': 1}\n{'c1_i': 1, 'c2_h': 1}\n{'c1_h': 1, 'c2_e': 1}\n

    You can encode only k - 1 features out of k by setting drop_first to True.

    oh = preprocessing.OneHotEncoder(drop_first=True, drop_zeros=True)\nfor x in X:\n    oh = oh.learn_one(x)\n    pprint(oh.transform_one(x))\n
    {'c2_d': 1}\n{'c2_x': 1}\n{'c2_h': 1}\n{'c2_e': 1}\n

    A subset of the features can be one-hot encoded by piping a compose.Select into the OneHotEncoder.

    from river import compose\n\npp = compose.Select('c1') | preprocessing.OneHotEncoder()\n\nfor x in X:\n    pp = pp.learn_one(x)\n    pprint(pp.transform_one(x))\n
    {'c1_u': 1}\n{'c1_a': 1, 'c1_u': 0}\n{'c1_a': 0, 'c1_i': 1, 'c1_u': 0}\n{'c1_a': 0, 'c1_h': 1, 'c1_i': 0, 'c1_u': 0}\n

    You can preserve the c2 feature by using a union:

    pp = compose.Select('c1') | preprocessing.OneHotEncoder()\npp += compose.Select('c2')\n\nfor x in X:\n    pp = pp.learn_one(x)\n    pprint(pp.transform_one(x))\n
    {'c1_u': 1, 'c2': 'd'}\n{'c1_a': 1, 'c1_u': 0, 'c2': 'x'}\n{'c1_a': 0, 'c1_i': 1, 'c1_u': 0, 'c2': 'h'}\n{'c1_a': 0, 'c1_h': 1, 'c1_i': 0, 'c1_u': 0, 'c2': 'e'}\n

    Similar to the above examples, we can also pass values as a list. This will one-hot encode all of the entries individually.

    X = [{'c1': ['u', 'a'], 'c2': ['d']},\n    {'c1': ['a', 'b'], 'c2': ['x']},\n    {'c1': ['i'], 'c2': ['h', 'z']},\n    {'c1': ['h', 'b'], 'c2': ['e']}]\n\noh = preprocessing.OneHotEncoder(drop_zeros=True)\nfor x in X:\n    oh = oh.learn_one(x)\n    pprint(oh.transform_one(x))\n
    {'c1_a': 1, 'c1_u': 1, 'c2_d': 1}\n{'c1_a': 1, 'c1_b': 1, 'c2_x': 1}\n{'c1_i': 1, 'c2_h': 1, 'c2_z': 1}\n{'c1_b': 1, 'c1_h': 1, 'c2_e': 1}\n

    Processing mini-batches is also possible.

    from pprint import pprint\nimport random\nimport string\n\nrandom.seed(42)\nalphabet = list(string.ascii_lowercase)\nX = pd.DataFrame(\n    {\n        'c1': random.choice(alphabet),\n        'c2': random.choice(alphabet),\n    }\n    for _ in range(3)\n)\nX\n
      c1 c2\n0  u  d\n1  a  x\n2  i  h\n

    oh = preprocessing.OneHotEncoder(drop_zeros=True)\ndf = oh.transform_many(X)\ndf.sort_index(axis=\"columns\")\n
       c1_a  c1_i  c1_u  c2_d  c2_h  c2_x\n0     0     0     1     1     0     0\n1     1     0     0     0     0     1\n2     0     1     0     0     1     0\n

    oh = preprocessing.OneHotEncoder(drop_zeros=True, drop_first=True)\ndf = oh.transform_many(X)\ndf.sort_index(axis=\"columns\")\n
       c1_i  c1_u  c2_d  c2_h  c2_x\n0     0     1     1     0     0\n1     0     0     0     0     1\n2     1     0     0     1     0\n

    Here's an example where the zeros are kept:

    oh = preprocessing.OneHotEncoder(drop_zeros=False)\nX_init = pd.DataFrame([{\"c1\": \"Oranges\", \"c2\": \"Apples\"}])\noh = oh.learn_many(X_init)\noh = oh.learn_many(X)\n\ndf = oh.transform_many(X)\ndf.sort_index(axis=\"columns\")\n
       c1_Oranges  c1_a  c1_i  c1_u  c2_Apples  c2_d  c2_h  c2_x\n0           0     0     0     1          0     1     0     0\n1           0     1     0     0          0     0     0     1\n2           0     0     1     0          0     0     1     0\n

    df.dtypes.sort_index()\n
    c1_Oranges    Sparse[uint8, 0]\nc1_a          Sparse[uint8, 0]\nc1_i          Sparse[uint8, 0]\nc1_u          Sparse[uint8, 0]\nc2_Apples     Sparse[uint8, 0]\nc2_d          Sparse[uint8, 0]\nc2_h          Sparse[uint8, 0]\nc2_x          Sparse[uint8, 0]\ndtype: object\n

    "},{"location":"api/preprocessing/OneHotEncoder/#methods","title":"Methods","text":"learn_many

    Update with a mini-batch of features.

    A lot of transformers don't actually have to do anything during the learn_many step because they are stateless. For this reason the default behavior of this function is to do nothing. Transformers that however do something during the learn_many can override this method.

    Parameters

    • X \u2014 'pd.DataFrame'

    Returns

    Transformer: self

    learn_one

    Update with a set of features x.

    A lot of transformers don't actually have to do anything during the learn_one step because they are stateless. For this reason the default behavior of this function is to do nothing. Transformers that however do something during the learn_one can override this method.

    Parameters

    • x \u2014 'dict'

    Returns

    Transformer: self

    transform_many

    Transform a mini-batch of features.

    Parameters

    • X \u2014 'pd.DataFrame'

    Returns

    pd.DataFrame: A new DataFrame.

    transform_one

    Transform a set of features x.

    Parameters

    • x \u2014 'dict'
    • y \u2014 defaults to None

    Returns

    dict: The transformed values.

    "},{"location":"api/preprocessing/OrdinalEncoder/","title":"OrdinalEncoder","text":"

    Ordinal encoder.

    This transformer maps each feature to integers. It can useful when a feature has string values (i.e. categorical variables).

    "},{"location":"api/preprocessing/OrdinalEncoder/#parameters","title":"Parameters","text":"
    • unknown_value

      Type \u2192 int | None

      Default \u2192 0

      The value to use for unknown categories seen during transform_one. Unknown categories will be mapped to an integer once they are seen during learn_one. This value can be set to None in order to categories to None if they've never been seen before.

    • none_value

      Type \u2192 int

      Default \u2192 -1

      The value to encode None with.

    "},{"location":"api/preprocessing/OrdinalEncoder/#attributes","title":"Attributes","text":"
    • categories

      A dict of dicts. The outer dict maps each feature to its inner dict. The inner dict maps each category to its code.

    "},{"location":"api/preprocessing/OrdinalEncoder/#examples","title":"Examples","text":"

    from river import preprocessing\n\nX = [\n    {\"country\": \"France\", \"place\": \"Taco Bell\"},\n    {\"country\": None, \"place\": None},\n    {\"country\": \"Sweden\", \"place\": \"Burger King\"},\n    {\"country\": \"France\", \"place\": \"Burger King\"},\n    {\"country\": \"Russia\", \"place\": \"Starbucks\"},\n    {\"country\": \"Russia\", \"place\": \"Starbucks\"},\n    {\"country\": \"Sweden\", \"place\": \"Taco Bell\"},\n    {\"country\": None, \"place\": None},\n]\n\nencoder = preprocessing.OrdinalEncoder()\nfor x in X:\n    print(encoder.transform_one(x))\n    encoder = encoder.learn_one(x)\n
    {'country': 0, 'place': 0}\n{'country': -1, 'place': -1}\n{'country': 0, 'place': 0}\n{'country': 1, 'place': 2}\n{'country': 0, 'place': 0}\n{'country': 3, 'place': 3}\n{'country': 2, 'place': 1}\n{'country': -1, 'place': -1}\n

    xb1 = pd.DataFrame(X[0:4], index=[0, 1, 2, 3])\nxb2 = pd.DataFrame(X[4:8], index=[4, 5, 6, 7])\n\nencoder = preprocessing.OrdinalEncoder()\nencoder.transform_many(xb1)\n
       country  place\n0        0      0\n1       -1     -1\n2        0      0\n3        0      0\n

    encoder = encoder.learn_many(xb1)\nencoder.transform_many(xb2)\n
       country  place\n4        0      0\n5        0      0\n6        2      1\n7       -1     -1\n

    "},{"location":"api/preprocessing/OrdinalEncoder/#methods","title":"Methods","text":"learn_many

    Update with a mini-batch of features.

    A lot of transformers don't actually have to do anything during the learn_many step because they are stateless. For this reason the default behavior of this function is to do nothing. Transformers that however do something during the learn_many can override this method.

    Parameters

    • X \u2014 'pd.DataFrame'
    • y \u2014 defaults to None

    Returns

    Transformer: self

    learn_one

    Update with a set of features x.

    A lot of transformers don't actually have to do anything during the learn_one step because they are stateless. For this reason the default behavior of this function is to do nothing. Transformers that however do something during the learn_one can override this method.

    Parameters

    • x \u2014 'dict'

    Returns

    Transformer: self

    transform_many

    Transform a mini-batch of features.

    Parameters

    • X \u2014 'pd.DataFrame'

    Returns

    pd.DataFrame: A new DataFrame.

    transform_one

    Transform a set of features x.

    Parameters

    • x \u2014 'dict'

    Returns

    dict: The transformed values.

    "},{"location":"api/preprocessing/PredClipper/","title":"PredClipper","text":"

    Clips the target after predicting.

    "},{"location":"api/preprocessing/PredClipper/#parameters","title":"Parameters","text":"
    • regressor

      Type \u2192 base.Regressor

      Regressor model for which to clip the predictions.

    • y_min

      Type \u2192 float

      minimum value.

    • y_max

      Type \u2192 float

      maximum value.

    "},{"location":"api/preprocessing/PredClipper/#examples","title":"Examples","text":"

    from river import linear_model\nfrom river import preprocessing\n\ndataset = (\n    ({'a': 2, 'b': 4}, 80),\n    ({'a': 3, 'b': 5}, 100),\n    ({'a': 4, 'b': 6}, 120)\n)\n\nmodel = preprocessing.PredClipper(\n    regressor=linear_model.LinearRegression(),\n    y_min=0,\n    y_max=200\n)\n\nfor x, y in dataset:\n    _ = model.learn_one(x, y)\n\nmodel.predict_one({'a': -100, 'b': -200})\n
    0\n

    model.predict_one({'a': 50, 'b': 60})\n
    200\n

    "},{"location":"api/preprocessing/PredClipper/#methods","title":"Methods","text":"learn_one

    Fits to a set of features x and a real-valued target y.

    Parameters

    • x
    • y
    • kwargs

    Returns

    self

    predict_one

    Predict the output of features x.

    Parameters

    • x
    • kwargs

    Returns

    The prediction.

    "},{"location":"api/preprocessing/PreviousImputer/","title":"PreviousImputer","text":"

    Imputes missing values by using the most recent value.

    "},{"location":"api/preprocessing/PreviousImputer/#examples","title":"Examples","text":"

    from river import preprocessing\n\nimputer = preprocessing.PreviousImputer()\n\nimputer = imputer.learn_one({'x': 1, 'y': 2})\nimputer.transform_one({'y': None})\n
    {'y': 2}\n

    imputer.transform_one({'x': None})\n
    {'x': 1}\n

    "},{"location":"api/preprocessing/PreviousImputer/#methods","title":"Methods","text":"learn_one

    Update with a set of features x.

    A lot of transformers don't actually have to do anything during the learn_one step because they are stateless. For this reason the default behavior of this function is to do nothing. Transformers that however do something during the learn_one can override this method.

    Parameters

    • x \u2014 'dict'

    Returns

    Transformer: self

    transform_one

    Transform a set of features x.

    Parameters

    • x \u2014 'dict'

    Returns

    dict: The transformed values.

    "},{"location":"api/preprocessing/RobustScaler/","title":"RobustScaler","text":"

    Scale features using statistics that are robust to outliers.

    This Scaler removes the median and scales the data according to the interquantile range.

    "},{"location":"api/preprocessing/RobustScaler/#parameters","title":"Parameters","text":"
    • with_centering

      Default \u2192 True

      Whether to centre the data before scaling.

    • with_scaling

      Default \u2192 True

      Whether to scale data to IQR.

    • q_inf

      Default \u2192 0.25

      Desired inferior quantile, must be between 0 and 1.

    • q_sup

      Default \u2192 0.75

      Desired superior quantile, must be between 0 and 1.

    "},{"location":"api/preprocessing/RobustScaler/#attributes","title":"Attributes","text":"
    • median (dict)

      Mapping between features and instances of stats.Quantile(0.5)`.

    • iqr (dict)

      Mapping between features and instances of stats.IQR.

    "},{"location":"api/preprocessing/RobustScaler/#examples","title":"Examples","text":"

    from pprint import pprint\nimport random\nfrom river import preprocessing\n\nrandom.seed(42)\nX = [{'x': random.uniform(8, 12)} for _ in range(5)]\npprint(X)\n
    [{'x': 10.557707},\n    {'x': 8.100043},\n    {'x': 9.100117},\n    {'x': 8.892842},\n    {'x': 10.945884}]\n

    scaler = preprocessing.RobustScaler()\n\nfor x in X:\n    print(scaler.learn_one(x).transform_one(x))\n
        {'x': 0.0}\n    {'x': -1.0}\n    {'x': 0.0}\n    {'x': -0.12449923287875722}\n    {'x': 1.1086595155704708}\n

    "},{"location":"api/preprocessing/RobustScaler/#methods","title":"Methods","text":"learn_one

    Update with a set of features x.

    A lot of transformers don't actually have to do anything during the learn_one step because they are stateless. For this reason the default behavior of this function is to do nothing. Transformers that however do something during the learn_one can override this method.

    Parameters

    • x \u2014 'dict'

    Returns

    Transformer: self

    transform_one

    Transform a set of features x.

    Parameters

    • x \u2014 'dict'

    Returns

    dict: The transformed values.

    "},{"location":"api/preprocessing/SparseRandomProjector/","title":"SparseRandomProjector","text":"

    Sparse random projector.

    This transformer reduces the dimensionality of inputs by projecting them onto a sparse random projection matrix.

    Ping Li et al. recommend using a minimum density of 1 / sqrt(n_features). The transformer is not aware of how many features will be seen, so the user must specify the density manually.

    "},{"location":"api/preprocessing/SparseRandomProjector/#parameters","title":"Parameters","text":"
    • n_components

      Default \u2192 10

      Number of components to project the data onto.

    • density

      Default \u2192 0.1

      Density of the random projection matrix. The density is defined as the ratio of non-zero components in the matrix. It is equal to 1 - sparsity.

    • seed

      Type \u2192 int | None

      Default \u2192 None

      Random seed for reproducibility.

    "},{"location":"api/preprocessing/SparseRandomProjector/#examples","title":"Examples","text":"

    from river import datasets\nfrom river import evaluate\nfrom river import linear_model\nfrom river import metrics\nfrom river import preprocessing\n\ndataset = datasets.TrumpApproval()\nmodel = preprocessing.SparseRandomProjector(\n    n_components=3,\n    seed=42\n)\n\nfor x, y in dataset:\n    x = model.transform_one(x)\n    print(x)\n    break\n
    {0: 92.89572746525327, 1: 1344540.5692342375, 2: 0}\n

    model = (\n    preprocessing.SparseRandomProjector(\n        n_components=5,\n        seed=42\n    ) |\n    preprocessing.StandardScaler() |\n    linear_model.LinearRegression()\n)\nevaluate.progressive_val_score(dataset, model, metrics.MAE())\n
    MAE: 1.292572\n

    "},{"location":"api/preprocessing/SparseRandomProjector/#methods","title":"Methods","text":"learn_one

    Update with a set of features x.

    A lot of transformers don't actually have to do anything during the learn_one step because they are stateless. For this reason the default behavior of this function is to do nothing. Transformers that however do something during the learn_one can override this method.

    Parameters

    • x \u2014 'dict'

    Returns

    Transformer: self

    transform_one

    Transform a set of features x.

    Parameters

    • x \u2014 'dict'

    Returns

    dict: The transformed values.

    1. D. Achlioptas. 2003. Database-friendly random projections: Johnson-Lindenstrauss with binary coins. Journal of Computer and System Sciences 66 (2003) 671-687\u00a0\u21a9

    2. Ping Li, Trevor J. Hastie, and Kenneth W. Church. 2006. Very sparse random projections. In Proceedings of the 12th ACM SIGKDD international conference on Knowledge discovery and data mining (KDD'06). ACM, New York, NY, USA, 287-296.\u00a0\u21a9

    "},{"location":"api/preprocessing/StandardScaler/","title":"StandardScaler","text":"

    Scales the data so that it has zero mean and unit variance.

    Under the hood, a running mean and a running variance are maintained. The scaling is slightly different than when scaling the data in batch because the exact means and variances are not known in advance. However, this doesn't have a detrimental impact on performance in the long run.

    This transformer supports mini-batches as well as single instances. In the mini-batch case, the number of columns and the ordering of the columns are allowed to change between subsequent calls. In other words, this transformer will keep working even if you add and/or remove features every time you call learn_many and transform_many.

    "},{"location":"api/preprocessing/StandardScaler/#parameters","title":"Parameters","text":"
    • with_std

      Default \u2192 True

      Whether or not each feature should be divided by its standard deviation.

    "},{"location":"api/preprocessing/StandardScaler/#examples","title":"Examples","text":"

    import random\nfrom river import preprocessing\n\nrandom.seed(42)\nX = [{'x': random.uniform(8, 12), 'y': random.uniform(8, 12)} for _ in range(6)]\nfor x in X:\n    print(x)\n
    {'x': 10.557, 'y': 8.100}\n{'x': 9.100, 'y': 8.892}\n{'x': 10.945, 'y': 10.706}\n{'x': 11.568, 'y': 8.347}\n{'x': 9.687, 'y': 8.119}\n{'x': 8.874, 'y': 10.021}\n

    scaler = preprocessing.StandardScaler()\n\nfor x in X:\n    print(scaler.learn_one(x).transform_one(x))\n
    {'x': 0.0, 'y': 0.0}\n{'x': -0.999, 'y': 0.999}\n{'x': 0.937, 'y': 1.350}\n{'x': 1.129, 'y': -0.651}\n{'x': -0.776, 'y': -0.729}\n{'x': -1.274, 'y': 0.992}\n

    This transformer also supports mini-batch updates. You can call learn_many and provide a pandas.DataFrame:

    import pandas as pd\nX = pd.DataFrame.from_dict(X)\n\nscaler = preprocessing.StandardScaler()\nscaler = scaler.learn_many(X[:3])\nscaler = scaler.learn_many(X[3:])\n

    You can then call transform_many to scale a mini-batch of features:

    scaler.transform_many(X)\n
        x         y\n0  0.444600 -0.933384\n1 -1.044259 -0.138809\n2  0.841106  1.679208\n3  1.477301 -0.685117\n4 -0.444084 -0.914195\n5 -1.274664  0.992296\n

    "},{"location":"api/preprocessing/StandardScaler/#methods","title":"Methods","text":"learn_many

    Update with a mini-batch of features.

    Note that the update formulas for mean and variance are slightly different than in the single instance case, but they produce exactly the same result.

    Parameters

    • X \u2014 'pd.DataFrame'

    learn_one

    Update with a set of features x.

    A lot of transformers don't actually have to do anything during the learn_one step because they are stateless. For this reason the default behavior of this function is to do nothing. Transformers that however do something during the learn_one can override this method.

    Parameters

    • x \u2014 'dict'

    Returns

    Transformer: self

    transform_many

    Scale a mini-batch of features.

    Parameters

    • X \u2014 'pd.DataFrame'

    transform_one

    Transform a set of features x.

    Parameters

    • x \u2014 'dict'

    Returns

    dict: The transformed values.

    1. Welford's Method (and Friends) \u21a9

    2. Batch updates for simple statistics \u21a9

    "},{"location":"api/preprocessing/StatImputer/","title":"StatImputer","text":"

    Replaces missing values with a statistic.

    This transformer allows you to replace missing values with the value of a running statistic. During a call to learn_one, for each feature, a statistic is updated whenever a numeric feature is observed. When transform_one is called, each feature with a None value is replaced with the current value of the corresponding statistic.

    "},{"location":"api/preprocessing/StatImputer/#parameters","title":"Parameters","text":"
    • imputers

      A list of tuples where each tuple has two elements. The first elements is a feature name and the second value is an instance of stats.base.Univariate. The second value can also be an arbitrary value, such as -1, in which case the missing values will be replaced with it.

    "},{"location":"api/preprocessing/StatImputer/#examples","title":"Examples","text":"
    from river import preprocessing\nfrom river import stats\n

    For numeric data, we can use a stats.Mean()` to replace missing values by the running average of the previously seen values:

    X = [\n    {'temperature': 1},\n    {'temperature': 8},\n    {'temperature': 3},\n    {'temperature': None},\n    {'temperature': 4}\n]\n\nimp = preprocessing.StatImputer(('temperature', stats.Mean()))\n\nfor x in X:\n    imp = imp.learn_one(x)\n    print(imp.transform_one(x))\n
    {'temperature': 1}\n{'temperature': 8}\n{'temperature': 3}\n{'temperature': 4.0}\n{'temperature': 4}\n

    For discrete/categorical data, a common practice is to stats.Mode to replace missing values by the most commonly seen value:

    X = [\n    {'weather': 'sunny'},\n    {'weather': 'rainy'},\n    {'weather': 'sunny'},\n    {'weather': None},\n    {'weather': 'rainy'},\n    {'weather': 'rainy'},\n    {'weather': None}\n]\n\nimp = preprocessing.StatImputer(('weather', stats.Mode()))\n\nfor x in X:\n    imp = imp.learn_one(x)\n    print(imp.transform_one(x))\n
    {'weather': 'sunny'}\n{'weather': 'rainy'}\n{'weather': 'sunny'}\n{'weather': 'sunny'}\n{'weather': 'rainy'}\n{'weather': 'rainy'}\n{'weather': 'rainy'}\n

    You can also choose to replace missing values with a constant value, as so:

    imp = preprocessing.StatImputer(('weather', 'missing'))\n\nfor x in X:\n    imp = imp.learn_one(x)\n    print(imp.transform_one(x))\n
    {'weather': 'sunny'}\n{'weather': 'rainy'}\n{'weather': 'sunny'}\n{'weather': 'missing'}\n{'weather': 'rainy'}\n{'weather': 'rainy'}\n{'weather': 'missing'}\n

    Multiple imputers can be defined by providing a tuple for each feature which you want to impute:

    X = [\n    {'weather': 'sunny', 'temperature': 8},\n    {'weather': 'rainy', 'temperature': 3},\n    {'weather': 'sunny', 'temperature': None},\n    {'weather': None, 'temperature': 4},\n    {'weather': 'snowy', 'temperature': -4},\n    {'weather': 'snowy', 'temperature': -3},\n    {'weather': 'snowy', 'temperature': -3},\n    {'weather': None, 'temperature': None}\n]\n\nimp = preprocessing.StatImputer(\n    ('temperature', stats.Mean()),\n    ('weather', stats.Mode())\n)\n\nfor x in X:\n    imp = imp.learn_one(x)\n    print(imp.transform_one(x))\n
    {'weather': 'sunny', 'temperature': 8}\n{'weather': 'rainy', 'temperature': 3}\n{'weather': 'sunny', 'temperature': 5.5}\n{'weather': 'sunny', 'temperature': 4}\n{'weather': 'snowy', 'temperature': -4}\n{'weather': 'snowy', 'temperature': -3}\n{'weather': 'snowy', 'temperature': -3}\n{'weather': 'snowy', 'temperature': 0.8333}\n

    A sophisticated way to go about imputation is condition the statistics on a given feature. For instance, we might want to replace a missing temperature with the average temperature of a particular weather condition. As an example, consider the following dataset where the temperature is missing, but not the weather condition:

    X = [\n    {'weather': 'sunny', 'temperature': 8},\n    {'weather': 'rainy', 'temperature': 3},\n    {'weather': 'sunny', 'temperature': None},\n    {'weather': 'rainy', 'temperature': 4},\n    {'weather': 'sunny', 'temperature': 10},\n    {'weather': 'sunny', 'temperature': None},\n    {'weather': 'sunny', 'temperature': 12},\n    {'weather': 'rainy', 'temperature': None}\n]\n

    Each missing temperature can be replaced with the average temperature of the corresponding weather condition as so:

    from river import compose\n\nimp = compose.Grouper(\n    preprocessing.StatImputer(('temperature', stats.Mean())),\n    by='weather'\n)\n\nfor x in X:\n    imp = imp.learn_one(x)\n    print(imp.transform_one(x))\n
    {'weather': 'sunny', 'temperature': 8}\n{'weather': 'rainy', 'temperature': 3}\n{'weather': 'sunny', 'temperature': 8.0}\n{'weather': 'rainy', 'temperature': 4}\n{'weather': 'sunny', 'temperature': 10}\n{'weather': 'sunny', 'temperature': 9.0}\n{'weather': 'sunny', 'temperature': 12}\n{'weather': 'rainy', 'temperature': 3.5}\n

    Note that you can also create a Grouper with the * operator:

    imp = preprocessing.StatImputer(('temperature', stats.Mean())) * 'weather'\n
    "},{"location":"api/preprocessing/StatImputer/#methods","title":"Methods","text":"learn_one

    Update with a set of features x.

    A lot of transformers don't actually have to do anything during the learn_one step because they are stateless. For this reason the default behavior of this function is to do nothing. Transformers that however do something during the learn_one can override this method.

    Parameters

    • x \u2014 'dict'

    Returns

    Transformer: self

    transform_one

    Transform a set of features x.

    Parameters

    • x \u2014 'dict'

    Returns

    dict: The transformed values.

    "},{"location":"api/preprocessing/TargetMinMaxScaler/","title":"TargetMinMaxScaler","text":"

    Applies min-max scaling to the target.

    "},{"location":"api/preprocessing/TargetMinMaxScaler/#parameters","title":"Parameters","text":"
    • regressor

      Type \u2192 base.Regressor

      Regression model to wrap.

    "},{"location":"api/preprocessing/TargetMinMaxScaler/#examples","title":"Examples","text":"

    from river import datasets\nfrom river import evaluate\nfrom river import linear_model\nfrom river import metrics\nfrom river import preprocessing\n\ndataset = datasets.TrumpApproval()\nmodel = (\n    preprocessing.StandardScaler() |\n    preprocessing.TargetMinMaxScaler(\n        regressor=linear_model.LinearRegression(intercept_lr=0.15)\n    )\n)\nmetric = metrics.MSE()\n\nevaluate.progressive_val_score(dataset, model, metric)\n
    MSE: 2.018905\n

    "},{"location":"api/preprocessing/TargetMinMaxScaler/#methods","title":"Methods","text":"learn_one

    Fits to a set of features x and a real-valued target y.

    Parameters

    • x
    • y

    Returns

    self

    predict_one

    Predict the output of features x.

    Parameters

    • x

    Returns

    The prediction.

    "},{"location":"api/preprocessing/TargetStandardScaler/","title":"TargetStandardScaler","text":"

    Applies standard scaling to the target.

    "},{"location":"api/preprocessing/TargetStandardScaler/#parameters","title":"Parameters","text":"
    • regressor

      Type \u2192 base.Regressor

      Regression model to wrap.

    "},{"location":"api/preprocessing/TargetStandardScaler/#examples","title":"Examples","text":"

    from river import datasets\nfrom river import evaluate\nfrom river import linear_model\nfrom river import metrics\nfrom river import preprocessing\n\ndataset = datasets.TrumpApproval()\nmodel = (\n    preprocessing.StandardScaler() |\n    preprocessing.TargetStandardScaler(\n        regressor=linear_model.LinearRegression(intercept_lr=0.15)\n    )\n)\nmetric = metrics.MSE()\n\nevaluate.progressive_val_score(dataset, model, metric)\n
    MSE: 2.005999\n

    "},{"location":"api/preprocessing/TargetStandardScaler/#methods","title":"Methods","text":"learn_one

    Fits to a set of features x and a real-valued target y.

    Parameters

    • x
    • y

    Returns

    self

    predict_one

    Predict the output of features x.

    Parameters

    • x

    Returns

    The prediction.

    "},{"location":"api/proba/Beta/","title":"Beta","text":"

    Beta distribution for binary data.

    A Beta distribution is very similar to a Bernoulli distribution in that it counts occurrences of boolean events. The differences lies in what is being measured. A Binomial distribution models the probability of an event occurring, whereas a Beta distribution models the probability distribution itself. In other words, it's a probability distribution over probability distributions.

    "},{"location":"api/proba/Beta/#parameters","title":"Parameters","text":"
    • alpha

      Type \u2192 int

      Default \u2192 1

      Initial alpha parameter.

    • beta

      Type \u2192 int

      Default \u2192 1

      Initial beta parameter.

    • seed

      Type \u2192 int | None

      Default \u2192 None

      Random number generator seed for reproducibility.

    "},{"location":"api/proba/Beta/#attributes","title":"Attributes","text":"
    • mode

      The most likely value in the distribution.

    • n_samples

      The number of observed samples.

    "},{"location":"api/proba/Beta/#examples","title":"Examples","text":"

    from river import proba\n\nsuccesses = 81\nfailures = 219\nbeta = proba.Beta(successes, failures)\n\nbeta(.21), beta(.35)\n
    (0.867..., 0.165...)\n

    for success in range(100):\n    beta = beta.update(True)\nfor failure in range(200):\n    beta = beta.update(False)\n\nbeta(.21), beta(.35)\n
    (2.525...e-05, 0.841...)\n

    beta.cdf(.35)\n
    0.994168...\n

    "},{"location":"api/proba/Beta/#methods","title":"Methods","text":"call

    Probability mass/density function.

    Parameters

    • p \u2014 'float'

    cdf

    Cumulative density function, i.e. P(X <= x).

    Parameters

    • x \u2014 'float'

    revert

    Reverts the parameters of the distribution for a given observation.

    Parameters

    • x \u2014 'float'

    sample

    Sample a random value from the distribution.

    update

    Updates the parameters of the distribution given a new observation.

    Parameters

    • x \u2014 'float'

    1. What is the intuition behind beta distribution? \u21a9

    "},{"location":"api/proba/Gaussian/","title":"Gaussian","text":"

    Normal distribution with parameters mu and sigma.

    "},{"location":"api/proba/Gaussian/#parameters","title":"Parameters","text":"
    • seed

      Default \u2192 None

      Random number generator seed for reproducibility.

    "},{"location":"api/proba/Gaussian/#attributes","title":"Attributes","text":"
    • mode

      The most likely value in the distribution.

    • mu

    • n_samples

      The number of observed samples.

    • sigma

    "},{"location":"api/proba/Gaussian/#examples","title":"Examples","text":"

    from river import proba\n\np = proba.Gaussian().update(6).update(7)\n\np\n
    \ud835\udca9(\u03bc=6.500, \u03c3=0.707)\n

    p(6.5)\n
    0.564189\n

    p.revert(7)\n
    \ud835\udca9(\u03bc=6.000, \u03c3=0.000)\n

    "},{"location":"api/proba/Gaussian/#methods","title":"Methods","text":"call

    Probability mass/density function.

    Parameters

    • x \u2014 'typing.Any'

    cdf

    Cumulative density function, i.e. P(X <= x).

    Parameters

    • x \u2014 'float'

    revert

    Reverts the parameters of the distribution for a given observation.

    Parameters

    • x \u2014 'float'
    • w \u2014 defaults to 1.0

    sample

    Sample a random value from the distribution.

    update

    Updates the parameters of the distribution given a new observation.

    Parameters

    • x \u2014 'float'
    • w \u2014 defaults to 1.0

    "},{"location":"api/proba/Multinomial/","title":"Multinomial","text":"

    Multinomial distribution for categorical data.

    "},{"location":"api/proba/Multinomial/#parameters","title":"Parameters","text":"
    • events

      Type \u2192 dict | list | None

      Default \u2192 None

      An optional list of events that already occurred.

    • seed

      Default \u2192 None

      Random number generator seed for reproducibility.

    "},{"location":"api/proba/Multinomial/#attributes","title":"Attributes","text":"
    • mode

      The most likely value in the distribution.

    • n_samples

      The number of observed samples.

    "},{"location":"api/proba/Multinomial/#examples","title":"Examples","text":"

    from river import proba\n\np = proba.Multinomial(['green'] * 3)\np = p.update('red')\n\np('red')\n
    0.25\n

    p = p.update('red').update('red')\np('green')\n
    0.5\n

    p = p.revert('red').revert('red')\np('red')\n
    0.25\n

    You can wrap this with a utils.Rolling to measure a distribution over a window:

    from river import utils\n\nX = ['red', 'green', 'green', 'blue', 'blue']\n\ndist = utils.Rolling(\n    proba.Multinomial(),\n    window_size=3\n)\n\nfor x in X:\n    dist = dist.update(x)\n    print(dist)\n    print()\n
    P(red) = 1.000\n<BLANKLINE>\nP(red) = 0.500\nP(green) = 0.500\n<BLANKLINE>\nP(green) = 0.667\nP(red) = 0.333\n<BLANKLINE>\nP(green) = 0.667\nP(blue) = 0.333\nP(red) = 0.000\n<BLANKLINE>\nP(blue) = 0.667\nP(green) = 0.333\nP(red) = 0.000\n<BLANKLINE>\n

    You can wrap this with a utils.Rolling to measure a distribution over a window of time:

    import datetime as dt\n\nX = ['red', 'green', 'green', 'blue']\ndays = [1, 2, 3, 4]\n\ndist = utils.TimeRolling(\n    proba.Multinomial(),\n    period=dt.timedelta(days=2)\n)\n\nfor x, day in zip(X, days):\n    dist = dist.update(x, t=dt.datetime(2019, 1, day))\n    print(dist)\n    print()\n
    P(red) = 1.000\n<BLANKLINE>\nP(red) = 0.500\nP(green) = 0.500\n<BLANKLINE>\nP(green) = 1.000\nP(red) = 0.000\n<BLANKLINE>\nP(green) = 0.500\nP(blue) = 0.500\nP(red) = 0.000\n<BLANKLINE>\n

    "},{"location":"api/proba/Multinomial/#methods","title":"Methods","text":"call

    Probability mass/density function.

    Parameters

    • x \u2014 'typing.Any'

    revert

    Reverts the parameters of the distribution for a given observation.

    Parameters

    • x \u2014 'typing.Hashable'

    sample

    Sample a random value from the distribution.

    update

    Updates the parameters of the distribution given a new observation.

    Parameters

    • x \u2014 'typing.Hashable'

    "},{"location":"api/proba/MultivariateGaussian/","title":"MultivariateGaussian","text":"

    Multivariate normal distribution with parameters mu and var.

    "},{"location":"api/proba/MultivariateGaussian/#parameters","title":"Parameters","text":"
    • seed

      Default \u2192 None

      Random number generator seed for reproducibility.

    "},{"location":"api/proba/MultivariateGaussian/#attributes","title":"Attributes","text":"
    • mode

      The most likely value in the distribution.

    • mu

      The mean value of the distribution.

    • n_samples

      The number of observed samples.

    • sigma

      The standard deviation of the distribution.

    • var

      The variance of the distribution.

    "},{"location":"api/proba/MultivariateGaussian/#examples","title":"Examples","text":"

    import numpy as np\nimport pandas as pd\nfrom river import proba\n\nnp.random.seed(42)\nX = pd.DataFrame(\n    np.random.random((8, 3)),\n    columns=[\"red\", \"green\", \"blue\"]\n)\nX\n
            red     green      blue\n0  0.374540  0.950714  0.731994\n1  0.598658  0.156019  0.155995\n2  0.058084  0.866176  0.601115\n3  0.708073  0.020584  0.969910\n4  0.832443  0.212339  0.181825\n5  0.183405  0.304242  0.524756\n6  0.431945  0.291229  0.611853\n7  0.139494  0.292145  0.366362\n

    p = proba.MultivariateGaussian(seed=42)\np.n_samples\n
    0.0\n

    for x in X.to_dict(orient=\"records\"):\n    p = p.update(x)\np.var\n
               blue     green       red\nblue   0.076119  0.020292 -0.010128\ngreen  0.020292  0.112931 -0.053268\nred   -0.010128 -0.053268  0.078961\n

    Retrieving current state in nice format is simple

    p\n
    \ud835\udca9(\n    \u03bc=(0.518, 0.387, 0.416),\n    \u03c3^2=(\n        [ 0.076  0.020 -0.010]\n        [ 0.020  0.113 -0.053]\n        [-0.010 -0.053  0.079]\n    )\n)\n

    To retrieve number of samples and mode:

    p.n_samples\n
    8.0\n
    p.mode\n
    {'blue': 0.5179..., 'green': 0.3866..., 'red': 0.4158...}\n

    To retrieve the PDF and CDF:

    p(x)\n
    0.97967...\n
    p.cdf(x)\n
    0.00787...\n

    To sample data from distribution:

    p.sample()\n
    {'blue': -0.179..., 'green': -0.051..., 'red': 0.376...}\n

    MultivariateGaussian works with utils.Rolling:

    from river import utils\n\np = utils.Rolling(MultivariateGaussian(), window_size=5)\nfor x in X.to_dict(orient=\"records\"):\n    p = p.update(x)\np.var\n
               blue     green       red\nblue   0.087062 -0.022873  0.007765\ngreen -0.022873  0.014279 -0.025181\nred    0.007765 -0.025181  0.095066\n

    MultivariateGaussian works with utils.TimeRolling:

    from datetime import datetime as dt, timedelta as td\nX.index = [dt(2023, 3, 28, 0, 0, 0) + td(seconds=x) for x in range(8)]\np = utils.TimeRolling(MultivariateGaussian(), period=td(seconds=5))\nfor t, x in X.iterrows():\n    p = p.update(x.to_dict(), t=t)\np.var\n
               blue     green       red\nblue   0.087062 -0.022873  0.007765\ngreen -0.022873  0.014279 -0.025181\nred    0.007765 -0.025181  0.095066\n

    Variance on diagonal is consistent with proba.Gaussian.

    multi = proba.MultivariateGaussian()\nsingle = proba.Gaussian()\nfor x in X.to_dict(orient='records'):\n    multi = multi.update(x)\n    single = single.update(x['blue'])\nmulti.mu['blue'] == single.mu\n
    True\n
    multi.sigma['blue']['blue'] == single.sigma\n
    True\n

    "},{"location":"api/proba/MultivariateGaussian/#methods","title":"Methods","text":"call

    PDF(x) method.

    Parameters

    • x \u2014 'dict[str, float]'

    cdf

    Cumulative density function, i.e. P(X <= x).

    Parameters

    • x \u2014 'dict[str, float]'

    revert

    Reverts the parameters of the distribution for a given observation.

    Parameters

    • x \u2014 'dict[str, float]'

    sample

    Sample a random value from the distribution.

    update

    Updates the parameters of the distribution given a new observation.

    Parameters

    • x \u2014 'dict[str, float]'

    "},{"location":"api/proba/base/BinaryDistribution/","title":"BinaryDistribution","text":"

    A probability distribution for discrete values.

    "},{"location":"api/proba/base/BinaryDistribution/#parameters","title":"Parameters","text":"
    • seed

      Type \u2192 int | None

      Default \u2192 None

      Random number generator seed for reproducibility.

    "},{"location":"api/proba/base/BinaryDistribution/#attributes","title":"Attributes","text":"
    • mode

      The most likely value in the distribution.

    • n_samples

      The number of observed samples.

    "},{"location":"api/proba/base/BinaryDistribution/#methods","title":"Methods","text":"call

    Probability mass/density function.

    Parameters

    • x \u2014 'typing.Any'

    revert

    Reverts the parameters of the distribution for a given observation.

    Parameters

    • x \u2014 'bool'

    sample

    Sample a random value from the distribution.

    update

    Updates the parameters of the distribution given a new observation.

    Parameters

    • x \u2014 'bool'

    "},{"location":"api/proba/base/ContinuousDistribution/","title":"ContinuousDistribution","text":"

    A probability distribution for continuous values.

    "},{"location":"api/proba/base/ContinuousDistribution/#parameters","title":"Parameters","text":"
    • seed

      Type \u2192 int | None

      Default \u2192 None

      Random number generator seed for reproducibility.

    "},{"location":"api/proba/base/ContinuousDistribution/#attributes","title":"Attributes","text":"
    • mode

      The most likely value in the distribution.

    • n_samples

      The number of observed samples.

    "},{"location":"api/proba/base/ContinuousDistribution/#methods","title":"Methods","text":"call

    Probability mass/density function.

    Parameters

    • x \u2014 'typing.Any'

    cdf

    Cumulative density function, i.e. P(X <= x).

    Parameters

    • x \u2014 'float'

    revert

    Reverts the parameters of the distribution for a given observation.

    Parameters

    • x \u2014 'float'

    sample

    Sample a random value from the distribution.

    update

    Updates the parameters of the distribution given a new observation.

    Parameters

    • x \u2014 'float'

    "},{"location":"api/proba/base/DiscreteDistribution/","title":"DiscreteDistribution","text":"

    A probability distribution for discrete values.

    "},{"location":"api/proba/base/DiscreteDistribution/#parameters","title":"Parameters","text":"
    • seed

      Type \u2192 int | None

      Default \u2192 None

      Random number generator seed for reproducibility.

    "},{"location":"api/proba/base/DiscreteDistribution/#attributes","title":"Attributes","text":"
    • mode

      The most likely value in the distribution.

    • n_samples

      The number of observed samples.

    "},{"location":"api/proba/base/DiscreteDistribution/#methods","title":"Methods","text":"call

    Probability mass/density function.

    Parameters

    • x \u2014 'typing.Any'

    revert

    Reverts the parameters of the distribution for a given observation.

    Parameters

    • x \u2014 'typing.Hashable'

    sample

    Sample a random value from the distribution.

    update

    Updates the parameters of the distribution given a new observation.

    Parameters

    • x \u2014 'typing.Hashable'

    "},{"location":"api/proba/base/Distribution/","title":"Distribution","text":"

    General distribution.

    "},{"location":"api/proba/base/Distribution/#parameters","title":"Parameters","text":"
    • seed

      Type \u2192 int | None

      Default \u2192 None

      Random number generator seed for reproducibility.

    "},{"location":"api/proba/base/Distribution/#attributes","title":"Attributes","text":"
    • mode

      The most likely value in the distribution.

    • n_samples

      The number of observed samples.

    "},{"location":"api/proba/base/Distribution/#methods","title":"Methods","text":"call

    Probability mass/density function.

    Parameters

    • x \u2014 'typing.Any'

    sample

    Sample a random value from the distribution.

    "},{"location":"api/reco/Baseline/","title":"Baseline","text":"

    Baseline for recommender systems.

    A first-order approximation of the bias involved in target. The model equation is defined as:

    \\[\\hat{y}(x) = \\bar{y} + bu_{u} + bi_{i}\\]

    Where \\(bu_{u}\\) and \\(bi_{i}\\) are respectively the user and item biases.

    This model expects a dict input with a user and an item entries without any type constraint on their values (i.e. can be strings or numbers). Other entries are ignored.

    "},{"location":"api/reco/Baseline/#parameters","title":"Parameters","text":"
    • optimizer

      Type \u2192 optim.base.Optimizer | None

      Default \u2192 None

      The sequential optimizer used for updating the weights.

    • loss

      Type \u2192 optim.losses.Loss | None

      Default \u2192 None

      The loss function to optimize for.

    • l2

      Default \u2192 0.0

      regularization amount used to push weights towards 0.

    • initializer

      Type \u2192 optim.initializers.Initializer | None

      Default \u2192 None

      Weights initialization scheme.

    • clip_gradient

      Default \u2192 1000000000000.0

      Clips the absolute value of each gradient value.

    • seed

      Default \u2192 None

      Random number generation seed. Set this for reproducibility.

    "},{"location":"api/reco/Baseline/#attributes","title":"Attributes","text":"
    • global_mean (stats.Mean)

      The target arithmetic mean.

    • u_biases (collections.defaultdict)

      The user bias weights.

    • i_biases (collections.defaultdict)

      The item bias weights.

    • u_optimizer (optim.base.Optimizer)

      The sequential optimizer used for updating the user bias weights.

    • i_optimizer (optim.base.Optimizer)

      The sequential optimizer used for updating the item bias weights.

    "},{"location":"api/reco/Baseline/#examples","title":"Examples","text":"

    from river import optim\nfrom river import reco\n\ndataset = (\n    ({'user': 'Alice', 'item': 'Superman'}, 8),\n    ({'user': 'Alice', 'item': 'Terminator'}, 9),\n    ({'user': 'Alice', 'item': 'Star Wars'}, 8),\n    ({'user': 'Alice', 'item': 'Notting Hill'}, 2),\n    ({'user': 'Alice', 'item': 'Harry Potter'}, 5),\n    ({'user': 'Bob', 'item': 'Superman'}, 8),\n    ({'user': 'Bob', 'item': 'Terminator'}, 9),\n    ({'user': 'Bob', 'item': 'Star Wars'}, 8),\n    ({'user': 'Bob', 'item': 'Notting Hill'}, 2)\n)\n\nmodel = reco.Baseline(optimizer=optim.SGD(0.005))\n\nfor x, y in dataset:\n    _ = model.learn_one(**x, y=y)\n\nmodel.predict_one(user='Bob', item='Harry Potter')\n
    6.538120\n

    "},{"location":"api/reco/Baseline/#methods","title":"Methods","text":"learn_one

    Fits a user-item pair and a real-valued target y.

    Parameters

    • user \u2014 'ID'
    • item \u2014 'ID'
    • y \u2014 'Reward'
    • x \u2014 'dict | None' \u2014 defaults to None

    predict_one

    Predicts the target value of a set of features x.

    Parameters

    • user \u2014 'ID'
    • item \u2014 'ID'
    • x \u2014 'dict | None' \u2014 defaults to None

    Returns

    Reward: The predicted preference from the user for the item.

    rank

    Rank models by decreasing order of preference for a given user.

    Parameters

    • user \u2014 'ID'
    • items \u2014 'set[ID]'
    • x \u2014 'dict | None' \u2014 defaults to None

    1. Matrix factorization techniques for recommender systems \u21a9

    "},{"location":"api/reco/BiasedMF/","title":"BiasedMF","text":"

    Biased Matrix Factorization for recommender systems.

    The model equation is defined as:

    \\[\\hat{y}(x) = \\bar{y} + bu_{u} + bi_{i} + \\langle \\mathbf{v}_u, \\mathbf{v}_i \\rangle\\]

    Where \\(bu_{u}\\) and \\(bi_{i}\\) are respectively the user and item biases. The last term being simply the dot product between the latent vectors of the given user-item pair:

    \\[\\langle \\mathbf{v}_u, \\mathbf{v}_i \\rangle = \\sum_{f=1}^{k} \\mathbf{v}_{u, f} \\cdot \\mathbf{v}_{i, f}\\]

    where \\(k\\) is the number of latent factors.

    This model expects a dict input with a user and an item entries without any type constraint on their values (i.e. can be strings or numbers). Other entries are ignored.

    "},{"location":"api/reco/BiasedMF/#parameters","title":"Parameters","text":"
    • n_factors

      Default \u2192 10

      Dimensionality of the factorization or number of latent factors.

    • bias_optimizer

      Type \u2192 optim.base.Optimizer | None

      Default \u2192 None

      The sequential optimizer used for updating the bias weights.

    • latent_optimizer

      Type \u2192 optim.base.Optimizer | None

      Default \u2192 None

      The sequential optimizer used for updating the latent weights.

    • loss

      Type \u2192 optim.losses.Loss | None

      Default \u2192 None

      The loss function to optimize for.

    • l2_bias

      Default \u2192 0.0

      Amount of L2 regularization used to push bias weights towards 0.

    • l2_latent

      Default \u2192 0.0

      Amount of L2 regularization used to push latent weights towards 0.

    • weight_initializer

      Type \u2192 optim.initializers.Initializer | None

      Default \u2192 None

      Weights initialization scheme.

    • latent_initializer

      Type \u2192 optim.initializers.Initializer | None

      Default \u2192 None

      Latent factors initialization scheme.

    • clip_gradient

      Default \u2192 1000000000000.0

      Clips the absolute value of each gradient value.

    • seed

      Default \u2192 None

      Random number generation seed. Set this for reproducibility.

    "},{"location":"api/reco/BiasedMF/#attributes","title":"Attributes","text":"
    • global_mean (stats.Mean)

      The target arithmetic mean.

    • u_biases (collections.defaultdict)

      The user bias weights.

    • i_biases (collections.defaultdict)

      The item bias weights.

    • u_latents (collections.defaultdict)

      The user latent vectors randomly initialized.

    • i_latents (collections.defaultdict)

      The item latent vectors randomly initialized.

    • u_bias_optimizer (optim.base.Optimizer)

      The sequential optimizer used for updating the user bias weights.

    • i_bias_optimizer (optim.base.Optimizer)

      The sequential optimizer used for updating the item bias weights.

    • u_latent_optimizer (optim.base.Optimizer)

      The sequential optimizer used for updating the user latent weights.

    • i_latent_optimizer (optim.base.Optimizer)

      The sequential optimizer used for updating the item latent weights.

    "},{"location":"api/reco/BiasedMF/#examples","title":"Examples","text":"

    from river import optim\nfrom river import reco\n\ndataset = (\n    ({'user': 'Alice', 'item': 'Superman'}, 8),\n    ({'user': 'Alice', 'item': 'Terminator'}, 9),\n    ({'user': 'Alice', 'item': 'Star Wars'}, 8),\n    ({'user': 'Alice', 'item': 'Notting Hill'}, 2),\n    ({'user': 'Alice', 'item': 'Harry Potter'}, 5),\n    ({'user': 'Bob', 'item': 'Superman'}, 8),\n    ({'user': 'Bob', 'item': 'Terminator'}, 9),\n    ({'user': 'Bob', 'item': 'Star Wars'}, 8),\n    ({'user': 'Bob', 'item': 'Notting Hill'}, 2)\n)\n\nmodel = reco.BiasedMF(\n    n_factors=10,\n    bias_optimizer=optim.SGD(0.025),\n    latent_optimizer=optim.SGD(0.025),\n    latent_initializer=optim.initializers.Normal(mu=0., sigma=0.1, seed=71)\n)\n\nfor x, y in dataset:\n    _ = model.learn_one(**x, y=y)\n\nmodel.predict_one(user='Bob', item='Harry Potter')\n
    6.489025\n

    "},{"location":"api/reco/BiasedMF/#methods","title":"Methods","text":"learn_one

    Fits a user-item pair and a real-valued target y.

    Parameters

    • user \u2014 'ID'
    • item \u2014 'ID'
    • y \u2014 'Reward'
    • x \u2014 'dict | None' \u2014 defaults to None

    predict_one

    Predicts the target value of a set of features x.

    Parameters

    • user \u2014 'ID'
    • item \u2014 'ID'
    • x \u2014 'dict | None' \u2014 defaults to None

    Returns

    Reward: The predicted preference from the user for the item.

    rank

    Rank models by decreasing order of preference for a given user.

    Parameters

    • user \u2014 'ID'
    • items \u2014 'set[ID]'
    • x \u2014 'dict | None' \u2014 defaults to None

    1. Paterek, A., 2007, August. Improving regularized singular value decomposition for collaborative filtering. In Proceedings of KDD cup and workshop (Vol. 2007, pp. 5-8) \u21a9

    2. Matrix factorization techniques for recommender systems \u21a9

    "},{"location":"api/reco/FunkMF/","title":"FunkMF","text":"

    Funk Matrix Factorization for recommender systems.

    The model equation is defined as:

    \\[\\hat{y}(x) = \\langle \\mathbf{v}_u, \\mathbf{v}_i \\rangle = \\sum_{f=1}^{k} \\mathbf{v}_{u, f} \\cdot \\mathbf{v}_{i, f}\\]

    where \\(k\\) is the number of latent factors.

    This model expects a dict input with a user and an item entries without any type constraint on their values (i.e. can be strings or numbers). Other entries are ignored.

    "},{"location":"api/reco/FunkMF/#parameters","title":"Parameters","text":"
    • n_factors

      Default \u2192 10

      Dimensionality of the factorization or number of latent factors.

    • optimizer

      Type \u2192 optim.base.Optimizer | None

      Default \u2192 None

      The sequential optimizer used for updating the latent factors.

    • loss

      Type \u2192 optim.losses.Loss | None

      Default \u2192 None

      The loss function to optimize for.

    • l2

      Default \u2192 0.0

      Amount of L2 regularization used to push weights towards 0.

    • initializer

      Type \u2192 optim.initializers.Initializer | None

      Default \u2192 None

      Latent factors initialization scheme.

    • clip_gradient

      Default \u2192 1000000000000.0

      Clips the absolute value of each gradient value.

    • seed

      Default \u2192 None

      Random number generation seed. Set this for reproducibility.

    "},{"location":"api/reco/FunkMF/#attributes","title":"Attributes","text":"
    • u_latents (collections.defaultdict)

      The user latent vectors randomly initialized.

    • i_latents (collections.defaultdict)

      The item latent vectors randomly initialized.

    • u_optimizer (optim.base.Optimizer)

      The sequential optimizer used for updating the user latent weights.

    • i_optimizer (optim.base.Optimizer)

      The sequential optimizer used for updating the item latent weights.

    "},{"location":"api/reco/FunkMF/#examples","title":"Examples","text":"

    from river import optim\nfrom river import reco\n\ndataset = (\n    ({'user': 'Alice', 'item': 'Superman'}, 8),\n    ({'user': 'Alice', 'item': 'Terminator'}, 9),\n    ({'user': 'Alice', 'item': 'Star Wars'}, 8),\n    ({'user': 'Alice', 'item': 'Notting Hill'}, 2),\n    ({'user': 'Alice', 'item': 'Harry Potter'}, 5),\n    ({'user': 'Bob', 'item': 'Superman'}, 8),\n    ({'user': 'Bob', 'item': 'Terminator'}, 9),\n    ({'user': 'Bob', 'item': 'Star Wars'}, 8),\n    ({'user': 'Bob', 'item': 'Notting Hill'}, 2)\n)\n\nmodel = reco.FunkMF(\n    n_factors=10,\n    optimizer=optim.SGD(0.1),\n    initializer=optim.initializers.Normal(mu=0., sigma=0.1, seed=11),\n)\n\nfor x, y in dataset:\n    _ = model.learn_one(**x, y=y)\n\nmodel.predict_one(user='Bob', item='Harry Potter')\n
    1.866272\n

    "},{"location":"api/reco/FunkMF/#methods","title":"Methods","text":"learn_one

    Fits a user-item pair and a real-valued target y.

    Parameters

    • user \u2014 'ID'
    • item \u2014 'ID'
    • y \u2014 'Reward'
    • x \u2014 'dict | None' \u2014 defaults to None

    predict_one

    Predicts the target value of a set of features x.

    Parameters

    • user \u2014 'ID'
    • item \u2014 'ID'
    • x \u2014 'dict | None' \u2014 defaults to None

    Returns

    Reward: The predicted preference from the user for the item.

    rank

    Rank models by decreasing order of preference for a given user.

    Parameters

    • user \u2014 'ID'
    • items \u2014 'set[ID]'
    • x \u2014 'dict | None' \u2014 defaults to None

    1. Netflix update: Try this at home \u21a9

    2. Matrix factorization techniques for recommender systems \u21a9

    "},{"location":"api/reco/RandomNormal/","title":"RandomNormal","text":"

    Predicts random values sampled from a normal distribution.

    The parameters of the normal distribution are fitted with running statistics. They parameters are independent of the user, the item, or the context, and are instead fitted globally. This recommender therefore acts as a dummy model that any serious model should easily outperform.

    "},{"location":"api/reco/RandomNormal/#parameters","title":"Parameters","text":"
    • seed

      Default \u2192 None

      Random number generation seed. Set this for reproducibility.

    "},{"location":"api/reco/RandomNormal/#attributes","title":"Attributes","text":"
    • mean

      stats.Mean

    • variance

      stats.Var

    "},{"location":"api/reco/RandomNormal/#examples","title":"Examples","text":"

    from river import reco\n\ndataset = (\n    ({'user': 'Alice', 'item': 'Superman'}, 8),\n    ({'user': 'Alice', 'item': 'Terminator'}, 9),\n    ({'user': 'Alice', 'item': 'Star Wars'}, 8),\n    ({'user': 'Alice', 'item': 'Notting Hill'}, 2),\n    ({'user': 'Alice', 'item': 'Harry Potter'}, 5),\n    ({'user': 'Bob', 'item': 'Superman'}, 8),\n    ({'user': 'Bob', 'item': 'Terminator'}, 9),\n    ({'user': 'Bob', 'item': 'Star Wars'}, 8),\n    ({'user': 'Bob', 'item': 'Notting Hill'}, 2)\n)\n\nmodel = reco.RandomNormal(seed=42)\n\nfor x, y in dataset:\n    _ = model.learn_one(**x, y=y)\n\nmodel.predict_one(user='Bob', item='Harry Potter')\n
    6.147299621751425\n

    "},{"location":"api/reco/RandomNormal/#methods","title":"Methods","text":"learn_one

    Fits a user-item pair and a real-valued target y.

    Parameters

    • user \u2014 'ID'
    • item \u2014 'ID'
    • y \u2014 'Reward'
    • x \u2014 'dict | None' \u2014 defaults to None

    predict_one

    Predicts the target value of a set of features x.

    Parameters

    • user \u2014 'ID'
    • item \u2014 'ID'
    • x \u2014 'dict | None' \u2014 defaults to None

    Returns

    Reward: The predicted preference from the user for the item.

    rank

    Rank models by decreasing order of preference for a given user.

    Parameters

    • user \u2014 'ID'
    • items \u2014 'set[ID]'
    • x \u2014 'dict | None' \u2014 defaults to None

    "},{"location":"api/reco/base/Ranker/","title":"Ranker","text":"

    Base class for ranking models.

    "},{"location":"api/reco/base/Ranker/#parameters","title":"Parameters","text":"
    • seed

      Type \u2192 int | None

      Default \u2192 None

      Random number generation seed. Set this for reproducibility.

    "},{"location":"api/reco/base/Ranker/#attributes","title":"Attributes","text":"
    • is_contextual
    "},{"location":"api/reco/base/Ranker/#methods","title":"Methods","text":"learn_one

    Fits a user-item pair and a real-valued target y.

    Parameters

    • user \u2014 'ID'
    • item \u2014 'ID'
    • y \u2014 'Reward'
    • x \u2014 'dict | None' \u2014 defaults to None

    predict_one

    Predicts the target value of a set of features x.

    Parameters

    • user \u2014 'ID'
    • item \u2014 'ID'
    • x \u2014 'dict | None' \u2014 defaults to None

    Returns

    Reward: The predicted preference from the user for the item.

    rank

    Rank models by decreasing order of preference for a given user.

    Parameters

    • user \u2014 'ID'
    • items \u2014 'set[ID]'
    • x \u2014 'dict | None' \u2014 defaults to None

    "},{"location":"api/rules/AMRules/","title":"AMRules","text":"

    Adaptive Model Rules.

    AMRules1 is a rule-based algorithm for incremental regression tasks. AMRules relies on the Hoeffding bound to build its rule set, similarly to Hoeffding Trees. The Variance-Ratio heuristic is used to evaluate rules' splits. Moreover, this rule-based regressor has additional capacities not usually found in decision trees.

    Firstly, each created decision rule has a built-in drift detection mechanism. Every time a drift is detected, the affected decision rule is removed. In addition, AMRules' rules also have anomaly detection capabilities. After a warm-up period, each rule tests whether or not the incoming instances are anomalies. Anomalous instances are not used for training.

    Every time no rule is covering an incoming example, a default rule is used to learn from it. A rule covers an instance when all of the rule's literals (tests joined by the logical operation and) match the input case. The default rule is also applied for predicting examples not covered by any rules from the rule set.

    "},{"location":"api/rules/AMRules/#parameters","title":"Parameters","text":"
    • n_min

      Type \u2192 int

      Default \u2192 200

      The total weight that must be observed by a rule between expansion attempts.

    • delta

      Type \u2192 float

      Default \u2192 1e-07

      The split test significance. The split confidence is given by 1 - delta.

    • tau

      Type \u2192 float

      Default \u2192 0.05

      The tie-breaking threshold.

    • pred_type

      Type \u2192 str

      Default \u2192 adaptive

      The prediction strategy used by the decision rules. Can be either: - \"mean\": outputs the target mean within the partitions defined by the decision rules. - \"model\": always use instances of the model passed pred_model to make predictions. - \"adaptive\": dynamically selects between \"mean\" and \"model\" for each incoming example. The most accurate option at the moment will be used.

    • pred_model

      Type \u2192 base.Regressor | None

      Default \u2192 None

      The regression model that will be replicated for every rule when pred_type is either \"model\" or \"adaptive\".

    • splitter

      Type \u2192 spl.Splitter | None

      Default \u2192 None

      The Splitter or Attribute Observer (AO) used to monitor the class statistics of numeric features and perform splits. Splitters are available in the tree.splitter module. Different splitters are available for classification and regression tasks. Classification and regression splitters can be distinguished by their property is_target_class. This is an advanced option. Special care must be taken when choosing different splitters. By default, tree.splitter.TEBSTSplitter is used if splitter is None.

    • drift_detector

      Type \u2192 base.DriftDetector | None

      Default \u2192 None

      The drift detection model that is used by each rule. Care must be taken to avoid the triggering of too many false alarms or delaying too much the concept drift detection. By default, drift.ADWIN is used if drift_detector is None.

    • fading_factor

      Type \u2192 float

      Default \u2192 0.99

      The exponential decaying factor applied to the learning models' absolute errors, that are monitored if pred_type='adaptive'. Must be between 0 and 1. The closer to 1, the more importance is going to be given to past observations. On the other hand, if its value approaches 0, the recent observed errors are going to have more influence on the final decision.

    • anomaly_threshold

      Type \u2192 float

      Default \u2192 -0.75

      The threshold below which instances will be considered anomalies by the rules.

    • m_min

      Type \u2192 int

      Default \u2192 30

      The minimum total weight a rule must observe before it starts to skip anomalous instances during training.

    • ordered_rule_set

      Type \u2192 bool

      Default \u2192 True

      If True, only the first rule that covers an instance will be used for training or prediction. If False, all the rules covering an instance will be updated during training, and the predictions for an instance will be the average prediction of all rules covering that example.

    • min_samples_split

      Type \u2192 int

      Default \u2192 5

      The minimum number of samples each partition of a binary split candidate must have to be considered valid.

    "},{"location":"api/rules/AMRules/#attributes","title":"Attributes","text":"
    • n_drifts_detected

      The number of detected concept drifts.

    "},{"location":"api/rules/AMRules/#examples","title":"Examples","text":"

    from river import datasets\nfrom river import drift\nfrom river import evaluate\nfrom river import metrics\nfrom river import preprocessing\nfrom river import rules\n\ndataset = datasets.TrumpApproval()\n\nmodel = (\n    preprocessing.StandardScaler() |\n    rules.AMRules(\n        delta=0.01,\n        n_min=50,\n        drift_detector=drift.ADWIN()\n    )\n)\n\nmetric = metrics.MAE()\n\nevaluate.progressive_val_score(dataset, model, metric)\n
    MAE: 1.119553\n

    "},{"location":"api/rules/AMRules/#methods","title":"Methods","text":"anomaly_score

    Aggregated anomaly score computed using all the rules that cover the input instance.

    Returns the mean anomaly score, the standard deviation of the score, and the proportion of rules that cover the instance (support). If the support is zero, it means that the default rule was used (not other rule covered x).

    Parameters

    • x

    Returns

    tuple[float, float, float]: mean_anomaly_score, std_anomaly_score, support

    debug_one

    Return an explanation of how x is predicted

    Parameters

    • x

    Returns

    str: A representation of the rules that cover the input and their prediction.

    learn_one

    Fits to a set of features x and a real-valued target y.

    Parameters

    • x \u2014 'dict'
    • y \u2014 'base.typing.RegTarget'
    • w \u2014 'int' \u2014 defaults to 1

    Returns

    AMRules: self

    predict_one

    Predict the output of features x.

    Parameters

    • x \u2014 'dict'

    Returns

    base.typing.RegTarget: The prediction.

    "},{"location":"api/rules/AMRules/#notes","title":"Notes","text":"

    AMRules treats all the non-numerical inputs as nominal features. All instances of numbers.Number will be treated as continuous, even if they represent integer categories. When using nominal features, pred_type should be set to \"mean\", otherwise errors will be thrown while trying to update the underlying rules' prediction models. Prediction strategies other than \"mean\" can be used, as long as the prediction model passed to pred_model supports nominal features.

    1. Duarte, J., Gama, J. and Bifet, A., 2016. Adaptive model rules from high-speed data streams. ACM Transactions on Knowledge Discovery from Data (TKDD), 10(3), pp.1-22.\u00a0\u21a9

    "},{"location":"api/sketch/Counter/","title":"Counter","text":"

    Counting using the Count-Min Sketch (CMS) algorithm.

    Contrary to an exhaustive approach, e.g., using a collections.Counter, CMS uses a limited and fixed amount of memory. The CMS algorithm uses a sketch structure consisting of a matrix \\(w \\times d\\).

    These dimensions are obtained via:

    • \\(w = \\lceil \\frac{e}{\\epsilon} \\rceil\\), where \\(e\\) is the Euler number.

    • \\(d = \\lceil \\ln\\left(\\frac{1}{\\delta} \\right) \\rceil\\).

    Decreasing the values of \\(\\epsilon\\) (epsilon) and \\(\\delta\\) (delta) increase the accuracy of the algorithm, at the cost of increased memory usage. The values of w and d control the hash tables' capability and the amount of hash collisions, respectively.

    CMS works by keeping d hash tables with w slots each. Elements are mapped to a slot in each hash table. These tables store the counting estimates. This implementation assumes the turnstile case described in the paper, i.e., count values and updates can be negative.

    The count values obtained by CMS are always overestimates. Suppose \\(c_i\\) and \\(\\hat{c}_i\\) are the ground truth and estimated count values, respectively, for a given element \\(i\\). CMS guarantees that \\(c_i \\le \\hat{c}_i\\) and, with probability \\(1 - \\delta\\), \\(\\hat{c}_i \\le c_i + \\epsilon||\\mathbf{c}||_1\\). In the expression, \\(||\\mathbf{c}||_1 = \\sum_i |c_i|\\).

    "},{"location":"api/sketch/Counter/#parameters","title":"Parameters","text":"
    • epsilon

      Type \u2192 float

      Default \u2192 0.1

      The approximation error parameter. The error in answering a query is within a factor of epsilon with probability delta.

    • delta

      Type \u2192 float

      Default \u2192 0.05

      A query estimates have a probability of 1 - delta of having errors which are a factor of epsilon. See the CMS description above for more details.

    • seed

      Type \u2192 int | None

      Default \u2192 None

      Random seed for reproducibility.

    "},{"location":"api/sketch/Counter/#attributes","title":"Attributes","text":"
    • n_slots

      The number of slots in each hash table.

    • n_tables

      The number of stored hash tables.

    "},{"location":"api/sketch/Counter/#examples","title":"Examples","text":"
    import collections\nfrom river import sketch\n\ncms = sketch.Counter(epsilon=0.005, seed=0)\n\nrng = random.Random(7)\n\ncounter = collections.Counter()\n

    We can check the number of slots per hash table:

    cms.n_slots\n
    544\n

    And the number of hash tables:

    cms.n_tables\n
    3\n

    Let's compare the sketch against a brute force approach:

    vals = []\nfor _ in range(10000):\n    v = rng.randint(-1000, 1000)\n    cms = cms.update(v)\n    counter[v] += 1\n    vals.append(v)\n

    Now, we can compare the estimates of CMS against the exhaustive counting strategy:

    counter[7]\n
    5\n
    cms[7]\n
    12\n
    counter[532]\n
    4\n
    cms[532]\n
    15\n

    Keep in mind that CMS is an approximate sketch algorithm. Couting estimates for unseen values might not be always reliable:

    cms[1001]\n
    9\n

    We can check the number of elements stored by each approach:

    len(counter), len(cms)\n
    (1982, 1632)\n

    And also retrieve the total sum of counts:

    cms.total()\n
    10000\n

    We can decrease the error by allocating more memory in the CMS:

    cms_a = sketch.Counter(epsilon=0.001, delta=0.01, seed=0)\nfor v in vals:\n    cms_a = cms_a.update(v)\n\ncms_a[7]\n
    5\n
    cms_a[532]\n
    4\n

    We can also obtain estimates of the dot product between two instances of river.collections.Counter. This could be useful, for instance, to estimate the cosine distance between the data monitored in two different counter sketch instances. Suppose we create another CMS instance (the number of slots and hash tables must match) that monitors another sample of the same data generating process:

    cms_b = sketch.Counter(epsilon=0.001, delta=0.01, seed=7)\n\nfor _ in range(10000):\n    v = rng.randint(-1000, 1000)\n    cms_b = cms_b.update(v)\n

    Now, we can define a cosine distance function:

    def cosine_dist(cms_a, cms_b):\n    num = cms_a @ cms_b\n    den = math.sqrt(cms_a @ cms_a) * math.sqrt(cms_b @ cms_b)\n    return num / den\n

    And use it to calculate the cosine distance between the elements monitored in cms_a and cms_b:

    cosine_dist(cms_a, cms_b)\n
    0.175363...\n

    "},{"location":"api/sketch/Counter/#methods","title":"Methods","text":"total

    Return the total count.

    update
    1. Cormode, G., & Muthukrishnan, S. (2005). An improved data stream summary: the count-min sketch and its applications. Journal of Algorithms, 55(1), 58-75. \u21a9

    2. Count-Min Sketch \u21a9

    3. Hash functions family generator in Python \u21a9

    "},{"location":"api/sketch/HeavyHitters/","title":"HeavyHitters","text":"

    Find the Heavy Hitters using the Lossy Count with Forgetting factor algorithm1.

    Keep track of the most frequent item(set)s in a data stream and apply a forgetting factor to discard previous frequent items that do not often appear anymore. This is an approximation algorithm designed to work with a limited amount of memory rather than accounting for every possible solution (thus using an unbounded memory footprint). Any hashable type can be passed as input, hence tuples or frozensets can also be monitored.

    Considering a data stream where n elements were observed so far, the Lossy Count algorithm has the following properties:

    • All item(set)s whose true frequency exceeds support * n are output. There are no

    false negatives;

    • No item(set) whose true frequency is less than (support - epsilon) * n is outputted;

    • Estimated frequencies are less than the true frequencies by at most epsilon * n.

    "},{"location":"api/sketch/HeavyHitters/#parameters","title":"Parameters","text":"
    • support

      Type \u2192 float

      Default \u2192 0.001

      The support threshold used to determine if an item is frequent. The value of support must be in \\([0, 1]\\). Elements whose frequency is higher than support times the number of observations seen so far are outputted.

    • epsilon

      Type \u2192 float

      Default \u2192 0.005

      Error parameter to control the accuracy-memory tradeoff. The value of epsilon must be in \\((0, 1]\\) and typically epsilon \\(\\ll\\) support. The smaller the epsilon, the more accurate the estimates will be, but the count sketch will have an increased memory footprint.

    • fading_factor

      Type \u2192 float

      Default \u2192 0.999

      Forgetting factor applied to the frequency estimates to reduce the impact of old items. The value of fading_factor must be in \\((0, 1]\\).

    "},{"location":"api/sketch/HeavyHitters/#examples","title":"Examples","text":"
    import random\nimport string\nfrom river import sketch\n\nrng = random.Random(42)\nhh = sketch.HeavyHitters()\n

    We will feed the counter with printable ASCII characters:

    for _ in range(10_000):\n    hh = hh.update(rng.choice(string.printable))\n

    We can retrieve estimates of the n top elements and their frequencies. Let's try n=3

    hh.most_common(3)\n
    [(',', 122.099142...), ('[', 116.049510...), ('W', 115.013402...)]\n

    We can also access estimates of individual elements:

    hh['A']\n
    99.483575...\n

    Unobserved elements are handled just fine:

    hh[(1, 2, 3)]\n
    0.0\n

    "},{"location":"api/sketch/HeavyHitters/#methods","title":"Methods","text":"most_common update
    1. Veloso, B., Tabassum, S., Martins, C., Espanha, R., Azevedo, R., & Gama, J. (2020). Interconnect bypass fraud detection: a case study. Annals of Telecommunications, 75(9), 583-596.\u00a0\u21a9

    "},{"location":"api/sketch/Histogram/","title":"Histogram","text":"

    Streaming histogram.

    "},{"location":"api/sketch/Histogram/#parameters","title":"Parameters","text":"
    • max_bins

      Default \u2192 256

      Maximal number of bins.

    "},{"location":"api/sketch/Histogram/#attributes","title":"Attributes","text":"
    • n

      Total number of seen values.

    "},{"location":"api/sketch/Histogram/#examples","title":"Examples","text":"

    from river import sketch\nimport numpy as np\n\nnp.random.seed(42)\n\nvalues = np.hstack((\n    np.random.normal(-3, 1, 1000),\n    np.random.normal(3, 1, 1000),\n))\n\nhist = sketch.Histogram(max_bins=15)\n\nfor x in values:\n    hist = hist.update(x)\n\nfor bin in hist:\n    print(bin)\n
    [-6.24127, -6.24127]: 1\n[-5.69689, -5.19881]: 8\n[-5.12390, -4.43014]: 57\n[-4.42475, -3.72574]: 158\n[-3.71984, -3.01642]: 262\n[-3.01350, -2.50668]: 206\n[-2.50329, -0.81020]: 294\n[-0.80954, 0.29677]: 19\n[0.40896, 0.82733]: 7\n[0.84661, 1.25147]: 24\n[1.26029, 2.30758]: 178\n[2.31081, 3.05701]: 284\n[3.05963, 3.69695]: 242\n[3.69822, 5.64434]: 258\n[6.13775, 6.19311]: 2\n

    "},{"location":"api/sketch/Histogram/#methods","title":"Methods","text":"cdf

    Cumulative distribution function.

    Parameters

    • x

    iter_cdf

    Yields CDF values for a sorted iterable of values.

    This is faster than calling cdf with many values.

    Parameters

    • X
    • verbose \u2014 defaults to False

    1. Ben-Haim, Y. and Tom-Tov, E., 2010. A streaming parallel decision tree algorithm. Journal of Machine Learning Research, 11(Feb), pp.849-872. \u21a9

    2. Go implementation \u21a9

    "},{"location":"api/sketch/Set/","title":"Set","text":"

    Approximate tracking of observed items using Bloom filters.

    Bloom filters enable using a limited amount of memory to check whether a given item was already observed in a stream. They can be used similarly to Python's built-in sets with the difference that items are not explicitly stored. For that reason, element removal and set difference are not currently supported.

    Bloom filters store a bit array and map incoming items to k index positions in the such array. The selected positions are set to True. Therefore, a binary code representation is created for each item. Membership works by projecting the query item and checking if every position of its binary code is True. If that is not the case, the item was not observed yet. A nice property of Bloom filters is that they do not yield false negatives: unobserved items might be signalized as observed, but observed items are never signalized as unobserved.

    If more than one item has the same binary code, i.e., hash collisions happen, the accuracy of the Bloom filter decreases, and false positives are produced. For instance, a previously unobserved item is signalized as observed. Increasing the size of the binary array and the value of k increase the filter's accuracy as hash collisions are avoided. Nonetheless, even using an increased number of hash functions, hash collisions will frequently happen if the array capacity is too small. The length of the bit array and the number of hash functions are inferred automatically from the supplied capacity and fp_rate.

    "},{"location":"api/sketch/Set/#parameters","title":"Parameters","text":"
    • capacity

      Type \u2192 int

      Default \u2192 2048

      The maximum capacity of the Bloom filter, i.e., the maximum number of distinct items to store given the selected fp_rate.

    • fp_rate

      Type \u2192 float

      Default \u2192 0.01

      The allowed rate of false positives. The probability of obtaining a true positive is 1 - fp_rate.

    • seed

      Type \u2192 int | None

      Default \u2192 None

      Random seed for reproducibility.

    "},{"location":"api/sketch/Set/#attributes","title":"Attributes","text":"
    • n_bits

      Return the size of the binary array used by the Bloom filter.

    • n_hash

      Return the number of used hash functions.

    "},{"location":"api/sketch/Set/#examples","title":"Examples","text":"
    import random\nfrom river import sketch\n\nrng = random.Random(42)\ns_set = sketch.Set(capacity=100, seed=0)\n

    We can retrieve the number of selected hash functions:

    s_set.n_hash\n
    7\n

    And the size of the binary array used by the Bloom filter:

    s_set.n_bits\n
    959\n

    We can add new items and check for membership using the same calls used by Python's standard sets:

    for _ in range(1000):\n    s_set.add(rng.randint(0, 200))\n\n1 in s_set\n
    True\n

    False positives might happen if the capacity is not large enough:

    -10 in s_set\n
    True\n

    Iterables can also be supplied to perform multiple updates with a single call to update:

    s_set = s_set.update([1, 2, 3, 4, 5, 6, 7])\n

    We can also combine instances of sketch.Set using the intersection and union operations, as long as they share the same hash functions and capability. In other words, all they hyperparameters match. Let's create two instances that will monitor different portions of a stream of random numbers:

    s1 = sketch.Set(seed=8)\ns2 = sketch.Set(seed=8)\n\nfor _ in range(1000):\n    s1.add(rng.randint(0, 5000))\n\nfor _ in range(1000):\n    s2.add(rng.randint(0, 5000))\n\n43 in s1\n
    True\n
    43 in s2\n
    False\n

    We can get the intersection between the two instances by using:

    s_intersection = s1 & s2\n43 in s_intersection\n
    False\n

    We can also obtain the set union:

    s_union = s1 | s2\n\n43 in s_union\n
    True\n

    The same effect of the non-inplace dunder methods can be achieved via explicit method calls:

    43 in s1.intersection(s2)\n
    False\n

    43 in s1.union(s2)\n
    True\n

    "},{"location":"api/sketch/Set/#methods","title":"Methods","text":"add intersection

    Set intersection.

    Return a new instance that results from the set intersection between the current Set object and other. Dunder operators can be used to replace the method call, i.e., a &= b and a & b for inplace and non-inplace intersections, respectively.

    Parameters

    • other \u2014 'Set'

    union

    Set union.

    Return a new instance that results from the set union between the current Set object and other. Dunder operators can be used to replace the method call, i.e., a |= b and a | b for inplace and non-inplace unions, respectively.

    Parameters

    • other \u2014 'Set'

    update"},{"location":"api/sketch/Set/#notes","title":"Notes","text":"

    This implementation uses an integer to represent the binary array. Bitwise operations are performed in the integer to reflect the Bloom filter updates.

    1. Florian Hartmann's blog article on Bloom Filters.\u00a0\u21a9

    2. Wikipedia entry on Bloom filters.\u00a0\u21a9

    "},{"location":"api/stats/AbsMax/","title":"AbsMax","text":"

    Running absolute max.

    "},{"location":"api/stats/AbsMax/#attributes","title":"Attributes","text":"
    • abs_max (float)

      The current absolute max.

    "},{"location":"api/stats/AbsMax/#examples","title":"Examples","text":"

    from river import stats\n\nX = [1, -4, 3, -2, 5, -6]\nabs_max = stats.AbsMax()\nfor x in X:\n    print(abs_max.update(x).get())\n
    1\n4\n4\n4\n5\n6\n

    "},{"location":"api/stats/AbsMax/#methods","title":"Methods","text":"get

    Return the current value of the statistic.

    update

    Update and return the called instance.

    Parameters

    • x \u2014 'numbers.Number'

    "},{"location":"api/stats/AutoCorr/","title":"AutoCorr","text":"

    Measures the serial correlation.

    This method computes the Pearson correlation between the current value and the value seen n steps before.

    "},{"location":"api/stats/AutoCorr/#parameters","title":"Parameters","text":"
    • lag

      Type \u2192 int

    "},{"location":"api/stats/AutoCorr/#attributes","title":"Attributes","text":"
    • name
    "},{"location":"api/stats/AutoCorr/#examples","title":"Examples","text":"

    The following examples are taken from the pandas documentation.

    from river import stats\n\nauto_corr = stats.AutoCorr(lag=1)\nfor x in [0.25, 0.5, 0.2, -0.05]:\n    print(auto_corr.update(x).get())\n
    0\n0\n-1.0\n0.103552\n

    auto_corr = stats.AutoCorr(lag=2)\nfor x in [0.25, 0.5, 0.2, -0.05]:\n    print(auto_corr.update(x).get())\n
    0\n0\n0\n-1.0\n

    auto_corr = stats.AutoCorr(lag=1)\nfor x in [1, 0, 0, 0]:\n    print(auto_corr.update(x).get())\n
    0\n0\n0\n0\n

    "},{"location":"api/stats/AutoCorr/#methods","title":"Methods","text":"get

    Return the current value of the statistic.

    update

    Update and return the called instance.

    Parameters

    • x \u2014 'numbers.Number'

    "},{"location":"api/stats/BayesianMean/","title":"BayesianMean","text":"

    Estimates a mean using outside information.

    "},{"location":"api/stats/BayesianMean/#parameters","title":"Parameters","text":"
    • prior

      Type \u2192 float

    • prior_weight

      Type \u2192 float

    "},{"location":"api/stats/BayesianMean/#attributes","title":"Attributes","text":"
    • name
    "},{"location":"api/stats/BayesianMean/#methods","title":"Methods","text":"get

    Return the current value of the statistic.

    revert update

    Update and return the called instance.

    Parameters

    • x \u2014 'numbers.Number'

    1. Additive smoothing \u21a9

    2. Bayesian average \u21a9

    3. Practical example of Bayes estimators \u21a9

    "},{"location":"api/stats/Count/","title":"Count","text":"

    A simple counter.

    "},{"location":"api/stats/Count/#attributes","title":"Attributes","text":"
    • n (int)

      The current number of observations.

    "},{"location":"api/stats/Count/#methods","title":"Methods","text":"get

    Return the current value of the statistic.

    update

    Update and return the called instance.

    Parameters

    • x \u2014 'numbers.Number' \u2014 defaults to None

    "},{"location":"api/stats/Cov/","title":"Cov","text":"

    Covariance.

    "},{"location":"api/stats/Cov/#parameters","title":"Parameters","text":"
    • ddof

      Default \u2192 1

      Delta Degrees of Freedom.

    "},{"location":"api/stats/Cov/#attributes","title":"Attributes","text":"
    • n
    "},{"location":"api/stats/Cov/#examples","title":"Examples","text":"

    from river import stats\n\nx = [-2.1,  -1,  4.3]\ny = [   3, 1.1, 0.12]\n\ncov = stats.Cov()\n\nfor xi, yi in zip(x, y):\n    print(cov.update(xi, yi).get())\n
    0.0\n-1.044999\n-4.286\n

    This class has a revert method, and can thus be wrapped by utils.Rolling:

    from river import utils\n\nx = [-2.1,  -1, 4.3, 1, -2.1,  -1, 4.3]\ny = [   3, 1.1, .12, 1,    3, 1.1, .12]\n\nrcov = utils.Rolling(stats.Cov(), window_size=3)\n\nfor xi, yi in zip(x, y):\n    print(rcov.update(xi, yi).get())\n
    0.0\n-1.045\n-4.286\n-1.382\n-4.589\n-1.415\n-4.286\n

    "},{"location":"api/stats/Cov/#methods","title":"Methods","text":"get

    Return the current value of the statistic.

    revert update

    Update and return the called instance.

    Parameters

    • x
    • y
    • w \u2014 defaults to 1.0

    update_many"},{"location":"api/stats/Cov/#notes","title":"Notes","text":"

    The outcomes of the incremental and parallel updates are consistent with numpy's batch processing when \\(\\text{ddof} \\le 1\\).

    1. Wikipedia article on algorithms for calculating variance \u21a9

    2. Schubert, E. and Gertz, M., 2018, July. Numerically stable parallel computation of (co-) variance. In Proceedings of the 30th International Conference on Scientific and Statistical Database Management (pp. 1-12).\u00a0\u21a9

    "},{"location":"api/stats/EWMean/","title":"EWMean","text":"

    Exponentially weighted mean.

    "},{"location":"api/stats/EWMean/#parameters","title":"Parameters","text":"
    • fading_factor

      Default \u2192 0.5

      The closer fading_factor is to 1 the more the statistic will adapt to recent values.

    "},{"location":"api/stats/EWMean/#attributes","title":"Attributes","text":"
    • mean (float)

      The running exponentially weighted mean.

    "},{"location":"api/stats/EWMean/#examples","title":"Examples","text":"

    from river import stats\n\nX = [1, 3, 5, 4, 6, 8, 7, 9, 11]\newm = stats.EWMean(fading_factor=0.5)\nfor x in X:\n    print(ewm.update(x).get())\n
    1.0\n2.0\n3.5\n3.75\n4.875\n6.4375\n6.71875\n7.859375\n9.4296875\n

    "},{"location":"api/stats/EWMean/#methods","title":"Methods","text":"get

    Return the current value of the statistic.

    update

    Update and return the called instance.

    Parameters

    • x \u2014 'numbers.Number'

    1. Finch, T., 2009. Incremental calculation of weighted mean and variance. University of Cambridge, 4(11-5), pp.41-42. \u21a9

    2. Exponential Moving Average on Streaming Data \u21a9

    "},{"location":"api/stats/EWVar/","title":"EWVar","text":"

    Exponentially weighted variance.

    To calculate the variance we use the fact that Var(X) = Mean(x^2) - Mean(x)^2 and internally we use the exponentially weighted mean of x/x^2 to calculate this.

    "},{"location":"api/stats/EWVar/#parameters","title":"Parameters","text":"
    • fading_factor

      Default \u2192 0.5

      The closer fading_factor is to 1 the more the statistic will adapt to recent values.

    "},{"location":"api/stats/EWVar/#attributes","title":"Attributes","text":"
    • variance (float)

      The running exponentially weighted variance.

    "},{"location":"api/stats/EWVar/#examples","title":"Examples","text":"

    from river import stats\n\nX = [1, 3, 5, 4, 6, 8, 7, 9, 11]\newv = stats.EWVar(fading_factor=0.5)\nfor x in X:\n    print(ewv.update(x).get())\n
    0.0\n1.0\n2.75\n1.4375\n1.984375\n3.43359375\n1.7958984375\n2.198974609375\n3.56536865234375\n

    "},{"location":"api/stats/EWVar/#methods","title":"Methods","text":"get

    Return the current value of the statistic.

    update

    Update and return the called instance.

    Parameters

    • x \u2014 'numbers.Number'

    1. Finch, T., 2009. Incremental calculation of weighted mean and variance. University of Cambridge, 4(11-5), pp.41-42. \u21a9

    2. Exponential Moving Average on Streaming Data \u21a9

    "},{"location":"api/stats/Entropy/","title":"Entropy","text":"

    Running entropy.

    "},{"location":"api/stats/Entropy/#parameters","title":"Parameters","text":"
    • fading_factor

      Default \u2192 1

      Fading factor.

    • eps

      Default \u2192 1e-08

      Small value that will be added to the denominator to avoid division by zero.

    "},{"location":"api/stats/Entropy/#attributes","title":"Attributes","text":"
    • entropy (float)

      The running entropy.

    • n (int)

      The current number of observations.

    • counter (collections.Counter)

      Count the number of times the values have occurred

    "},{"location":"api/stats/Entropy/#examples","title":"Examples","text":"

    import math\nimport random\nimport numpy as np\nfrom scipy.stats import entropy\nfrom river import stats\n\ndef entropy_list(labels, base=None):\n  value,counts = np.unique(labels, return_counts=True)\n  return entropy(counts, base=base)\n\nSEED = 42 * 1337\nrandom.seed(SEED)\n\nentro = stats.Entropy(fading_factor=1)\n\nlist_animal = []\nfor animal, num_val in zip(['cat', 'dog', 'bird'],[301, 401, 601]):\n    list_animal += [animal for i in range(num_val)]\nrandom.shuffle(list_animal)\n\nfor animal in list_animal:\n    _ = entro.update(animal)\n\nprint(f'{entro.get():.6f}')\n
    1.058093\n
    print(f'{entropy_list(list_animal):.6f}')\n
    1.058093\n

    "},{"location":"api/stats/Entropy/#methods","title":"Methods","text":"get

    Return the current value of the statistic.

    update

    Update and return the called instance.

    Parameters

    • x \u2014 'numbers.Number'

    1. Sovdat, B., 2014. Updating Formulas and Algorithms for Computing Entropy and Gini Index from Time-Changing Data Streams. arXiv preprint arXiv:1403.6348. \u21a9

    "},{"location":"api/stats/IQR/","title":"IQR","text":"

    Computes the interquartile range.

    "},{"location":"api/stats/IQR/#parameters","title":"Parameters","text":"
    • q_inf

      Default \u2192 0.25

      Desired inferior quantile, must be between 0 and 1. Defaults to 0.25.

    • q_sup

      Default \u2192 0.75

      Desired superior quantile, must be between 0 and 1. Defaults to 0.75.

    "},{"location":"api/stats/IQR/#attributes","title":"Attributes","text":"
    • name
    "},{"location":"api/stats/IQR/#examples","title":"Examples","text":"

    from river import stats\n\niqr = stats.IQR(q_inf=0.25, q_sup=0.75)\n\nfor i in range(0, 1001):\n    iqr = iqr.update(i)\n    if i % 100 == 0:\n        print(iqr.get())\n
    0.0\n50.0\n100.0\n150.0\n200.0\n250.0\n300.0\n350.0\n400.0\n450.0\n500.0\n

    "},{"location":"api/stats/IQR/#methods","title":"Methods","text":"get

    Return the current value of the statistic.

    update

    Update and return the called instance.

    Parameters

    • x \u2014 'numbers.Number'

    "},{"location":"api/stats/Kurtosis/","title":"Kurtosis","text":"

    Running kurtosis using Welford's algorithm.

    "},{"location":"api/stats/Kurtosis/#parameters","title":"Parameters","text":"
    • bias

      Default \u2192 False

      If False, then the calculations are corrected for statistical bias.

    "},{"location":"api/stats/Kurtosis/#attributes","title":"Attributes","text":"
    • name
    "},{"location":"api/stats/Kurtosis/#examples","title":"Examples","text":"

    from river import stats\nimport scipy.stats\nimport numpy as np\n\nnp.random.seed(42)\nX = np.random.normal(loc=0, scale=1, size=10)\n\nkurtosis = stats.Kurtosis(bias=False)\nfor x in X:\n    print(kurtosis.update(x).get())\n
    -3.0\n-2.0\n-1.5\n1.4130027920707047\n0.15367976585756438\n0.46142633246812653\n-1.620647789230658\n-1.3540178492487054\n-1.2310268787102745\n-0.9490372374384453\n

    for i in range(2, len(X)+1):\n    print(scipy.stats.kurtosis(X[:i], bias=False))\n
    -2.0\n-1.4999999999999998\n1.4130027920707082\n0.15367976585756082\n0.46142633246812403\n-1.620647789230658\n-1.3540178492487063\n-1.2310268787102738\n-0.9490372374384459\n

    kurtosis = stats.Kurtosis(bias=True)\nfor x in X:\n    print(kurtosis.update(x).get())\n
    -3.0\n-2.0\n-1.5\n-1.011599627723906\n-0.9615800585356089\n-0.6989395431537853\n-1.4252699121794408\n-1.311437071070812\n-1.246289111322894\n-1.082283689864171\n

    for i in range(2, len(X)+1):\n    print(scipy.stats.kurtosis(X[:i], bias=True))\n
    -2.0\n-1.4999999999999998\n-1.0115996277239057\n-0.9615800585356098\n-0.6989395431537861\n-1.425269912179441\n-1.3114370710708125\n-1.2462891113228936\n-1.0822836898641714\n

    "},{"location":"api/stats/Kurtosis/#methods","title":"Methods","text":"get

    Return the current value of the statistic.

    update

    Update and return the called instance.

    Parameters

    • x \u2014 'numbers.Number'

    1. Wikipedia article on algorithms for calculating variance \u21a9

    "},{"location":"api/stats/Link/","title":"Link","text":"

    A link joins two univariate statistics as a sequence.

    This can be used to pipe the output of one statistic to the input of another. This can be used, for instance, to calculate the mean of the variance of a variable. It can also be used to compute shifted statistics by piping statistics with an instance of stats.Shift.

    Note that a link is not meant to be instantiated via this class definition. Instead, users can link statistics together via the | operator.

    "},{"location":"api/stats/Link/#parameters","title":"Parameters","text":"
    • left

      Type \u2192 stats.base.Univariate

    • right

      Type \u2192 stats.base.Univariate

      The output from left's get method is passed to right's update method if left's get method doesn't produce None.

    "},{"location":"api/stats/Link/#attributes","title":"Attributes","text":"
    • name
    "},{"location":"api/stats/Link/#examples","title":"Examples","text":"
    from river import stats\nstat = stats.Shift(1) | stats.Mean()\n

    No values have been seen, therefore get defaults to the initial value of stats.Mean, which is 0.

    stat.get()\n
    0.\n

    Let us now call update.

    stat = stat.update(1)\n

    The output from get will still be 0. The reason is that stats.Shift has not enough values, and therefore outputs it's default value, which is None. The stats.Mean instance is therefore not updated.

    stat.get()\n
    0.0\n

    On the next call to update, the stats.Shift instance has seen enough values, and therefore the mean can be updated. The mean is therefore equal to 1, because that's the only value from the past.

    stat = stat.update(3)\nstat.get()\n
    1.0\n

    On the subsequent call to update, the mean will be updated with the value 3.

    stat = stat.update(4)\nstat.get()\n
    2.0\n

    Note that composing statistics returns a new statistic with it's own name.

    stat.name\n
    'mean_of_shift_1'\n

    "},{"location":"api/stats/Link/#methods","title":"Methods","text":"get

    Return the current value of the statistic.

    update

    Update and return the called instance.

    Parameters

    • x \u2014 'numbers.Number'

    "},{"location":"api/stats/MAD/","title":"MAD","text":"

    Median Absolute Deviation (MAD).

    The median absolute deviation is the median of the absolute differences between each data point and the data's overall median. In an online setting, the median of the data is unknown beforehand. Therefore, both the median of the data and the median of the differences of the data with respect to the latter are updated online. To be precise, the median of the data is updated before the median of the differences. As a consequence, this online version of the MAD does not coincide exactly with its batch counterpart.

    "},{"location":"api/stats/MAD/#attributes","title":"Attributes","text":"
    • median (stats.Median)

      The median of the data.

    "},{"location":"api/stats/MAD/#examples","title":"Examples","text":"

    from river import stats\n\nX = [4, 2, 5, 3, 0, 4]\n\nmad = stats.MAD()\nfor x in X:\n    print(mad.update(x).get())\n
    0.0\n2.0\n1.0\n1.0\n1.0\n1.0\n

    "},{"location":"api/stats/MAD/#methods","title":"Methods","text":"get

    Return the current value of the statistic.

    update

    Update and return the called instance.

    Parameters

    • x \u2014 'numbers.Number'

    1. Median absolute deviation article on Wikipedia \u21a9

    "},{"location":"api/stats/Max/","title":"Max","text":"

    Running max.

    "},{"location":"api/stats/Max/#attributes","title":"Attributes","text":"
    • max (float)

      The current max.

    "},{"location":"api/stats/Max/#examples","title":"Examples","text":"

    from river import stats\n\nX = [1, -4, 3, -2, 5, -6]\n_max = stats.Max()\nfor x in X:\n    print(_max.update(x).get())\n
    1\n1\n3\n3\n5\n5\n

    "},{"location":"api/stats/Max/#methods","title":"Methods","text":"get

    Return the current value of the statistic.

    update

    Update and return the called instance.

    Parameters

    • x \u2014 'numbers.Number'

    "},{"location":"api/stats/Mean/","title":"Mean","text":"

    Running mean.

    "},{"location":"api/stats/Mean/#attributes","title":"Attributes","text":"
    • n (float)

      The current sum of weights. If each passed weight was 1, then this is equal to the number of seen observations.

    "},{"location":"api/stats/Mean/#examples","title":"Examples","text":"

    from river import stats\n\nX = [-5, -3, -1, 1, 3, 5]\nmean = stats.Mean()\nfor x in X:\n    print(mean.update(x).get())\n
    -5.0\n-4.0\n-3.0\n-2.0\n-1.0\n0.0\n

    You can calculate a rolling average by wrapping a utils.Rolling around:

    from river import utils\n\nX = [1, 2, 3, 4, 5, 6]\nrmean = utils.Rolling(stats.Mean(), window_size=2)\n\nfor x in X:\n    print(rmean.update(x).get())\n
    1.0\n1.5\n2.5\n3.5\n4.5\n5.5\n

    "},{"location":"api/stats/Mean/#methods","title":"Methods","text":"get

    Return the current value of the statistic.

    revert update

    Update and return the called instance.

    Parameters

    • x \u2014 'numbers.Number'
    • w \u2014 defaults to 1.0

    update_many
    1. West, D. H. D. (1979). Updating mean and variance estimates: An improved method. Communications of the ACM, 22(9), 532-535. \u21a9

    2. Finch, T., 2009. Incremental calculation of weighted mean and variance. University of Cambridge, 4(11-5), pp.41-42. \u21a9

    3. Chan, T.F., Golub, G.H. and LeVeque, R.J., 1983. Algorithms for computing the sample variance: Analysis and recommendations. The American Statistician, 37(3), pp.242-247. \u21a9

    "},{"location":"api/stats/Min/","title":"Min","text":"

    Running min.

    "},{"location":"api/stats/Min/#attributes","title":"Attributes","text":"
    • min (float)

      The current min.

    "},{"location":"api/stats/Min/#methods","title":"Methods","text":"get

    Return the current value of the statistic.

    update

    Update and return the called instance.

    Parameters

    • x \u2014 'numbers.Number'

    "},{"location":"api/stats/Mode/","title":"Mode","text":"

    Running mode.

    The mode is simply the most common value. An approximate mode can be computed by setting the number of first unique values to count.

    "},{"location":"api/stats/Mode/#parameters","title":"Parameters","text":"
    • k

      Default \u2192 25

      Only the first k unique values will be included. If k equals -1, the exact mode is computed.

    "},{"location":"api/stats/Mode/#attributes","title":"Attributes","text":"
    • name
    "},{"location":"api/stats/Mode/#examples","title":"Examples","text":"

    from river import stats\n\nX = ['sunny', 'cloudy', 'cloudy', 'rainy', 'rainy', 'rainy']\nmode = stats.Mode(k=2)\nfor x in X:\n    print(mode.update(x).get())\n
    sunny\nsunny\ncloudy\ncloudy\ncloudy\ncloudy\n

    mode = stats.Mode(k=-1)\nfor x in X:\n    print(mode.update(x).get())\n
    sunny\nsunny\ncloudy\ncloudy\ncloudy\nrainy\n

    "},{"location":"api/stats/Mode/#methods","title":"Methods","text":"get

    Return the current value of the statistic.

    update

    Update and return the called instance.

    Parameters

    • x \u2014 'numbers.Number'

    "},{"location":"api/stats/NUnique/","title":"NUnique","text":"

    Approximate number of unique values counter.

    This is basically an implementation of the HyperLogLog algorithm. Adapted from hypy. The code is a bit too terse but it will do for now.

    "},{"location":"api/stats/NUnique/#parameters","title":"Parameters","text":"
    • error_rate

      Default \u2192 0.01

      Desired error rate. Memory usage is inversely proportional to this value.

    • seed

      Type \u2192 int | None

      Default \u2192 None

      Set the seed to produce identical results.

    "},{"location":"api/stats/NUnique/#attributes","title":"Attributes","text":"
    • n_bits (int)

    • n_buckets (int)

    • buckets (list)

    "},{"location":"api/stats/NUnique/#examples","title":"Examples","text":"

    import string\nfrom river import stats\n\nalphabet = string.ascii_lowercase\nn_unique = stats.NUnique(error_rate=0.2, seed=42)\n\nn_unique.update('a').get()\n
    1\n

    n_unique.update('b').get()\n
    2\n

    for letter in alphabet:\n    n_unique = n_unique.update(letter)\nn_unique.get()\n
    31\n

    Lowering the error_rate parameter will increase the precision.

    n_unique = stats.NUnique(error_rate=0.01, seed=42)\nfor letter in alphabet:\n    n_unique = n_unique.update(letter)\nn_unique.get()\n
    26\n

    "},{"location":"api/stats/NUnique/#methods","title":"Methods","text":"get

    Return the current value of the statistic.

    update

    Update and return the called instance.

    Parameters

    • x \u2014 'numbers.Number'

    1. My favorite algorithm (and data structure): HyperLogLog \u21a9

    2. Flajolet, P., Fusy, \u00c9., Gandouet, O. and Meunier, F., 2007, June. Hyperloglog: the analysis of a near-optimal cardinality estimation algorithm. \u21a9

    "},{"location":"api/stats/PeakToPeak/","title":"PeakToPeak","text":"

    Running peak to peak (max - min).

    "},{"location":"api/stats/PeakToPeak/#attributes","title":"Attributes","text":"
    • name
    "},{"location":"api/stats/PeakToPeak/#examples","title":"Examples","text":"

    from river import stats\n\nX = [1, -4, 3, -2, 2, 4]\nptp = stats.PeakToPeak()\nfor x in X:\n    print(ptp.update(x).get())\n
    0.\n5.\n7.\n7.\n7.\n8.\n

    "},{"location":"api/stats/PeakToPeak/#methods","title":"Methods","text":"get

    Return the current value of the statistic.

    update

    Update and return the called instance.

    Parameters

    • x \u2014 'numbers.Number'

    "},{"location":"api/stats/PearsonCorr/","title":"PearsonCorr","text":"

    Online Pearson correlation.

    "},{"location":"api/stats/PearsonCorr/#parameters","title":"Parameters","text":"
    • ddof

      Default \u2192 1

      Delta Degrees of Freedom.

    "},{"location":"api/stats/PearsonCorr/#attributes","title":"Attributes","text":"
    • var_x (stats.Var)

      Running variance of x.

    • var_y (stats.Var)

      Running variance of y.

    • cov_xy (stats.Cov)

      Running covariance of x and y.

    "},{"location":"api/stats/PearsonCorr/#examples","title":"Examples","text":"

    from river import stats\n\nx = [0, 0, 0, 1, 1, 1, 1]\ny = [0, 1, 2, 3, 4, 5, 6]\n\npearson = stats.PearsonCorr()\n\nfor xi, yi in zip(x, y):\n    print(pearson.update(xi, yi).get())\n
    0\n0\n0\n0.774596\n0.866025\n0.878310\n0.866025\n

    You can also do this in a rolling fashion:

    from river import utils\n\nx = [0, 0, 0, 1, 1, 1, 1]\ny = [0, 1, 2, 3, 4, 5, 6]\n\npearson = utils.Rolling(stats.PearsonCorr(), window_size=4)\n\nfor xi, yi in zip(x, y):\n    print(pearson.update(xi, yi).get())\n
    0\n0\n0\n0.7745966692414834\n0.8944271909999159\n0.7745966692414832\n-4.712160915387242e-09\n

    "},{"location":"api/stats/PearsonCorr/#methods","title":"Methods","text":"get

    Return the current value of the statistic.

    revert update

    Update and return the called instance.

    Parameters

    • x
    • y

    "},{"location":"api/stats/Quantile/","title":"Quantile","text":"

    Running quantile.

    Uses the P\u00b2 algorithm, which is also known as the \"Piecewise-Parabolic quantile estimator\". The code is inspired by LiveStat's implementation 2.

    "},{"location":"api/stats/Quantile/#parameters","title":"Parameters","text":"
    • q

      Type \u2192 float

      Default \u2192 0.5

      Determines which quantile to compute, must be comprised between 0 and 1.

    "},{"location":"api/stats/Quantile/#attributes","title":"Attributes","text":"
    • name
    "},{"location":"api/stats/Quantile/#examples","title":"Examples","text":"

    from river import stats\nimport numpy as np\n\nnp.random.seed(42 * 1337)\nmu, sigma = 0, 1\ns = np.random.normal(mu, sigma, 500)\n\nmedian = stats.Quantile(0.5)\nfor x in s:\n   _ = median.update(x)\nprint(f'The estimated value of the 50th (median) quantile is {median.get():.4f}')\n
    The estimated value of the 50th (median) quantile is -0.0275\n

    print(f'The real value of the 50th (median) quantile is {np.median(s):.4f}')\n
    The real value of the 50th (median) quantile is -0.0135\n

    percentile_17 = stats.Quantile(0.17)\nfor x in s:\n   _ = percentile_17.update(x)\nprint(f'The estimated value of the 17th quantile is {percentile_17.get():.4f}')\n
    The estimated value of the 17th quantile is -0.8652\n

    print(f'The real value of the 17th quantile is {np.percentile(s,17):.4f}')\n
    The real value of the 17th quantile is -0.9072\n

    "},{"location":"api/stats/Quantile/#methods","title":"Methods","text":"get

    Return the current value of the statistic.

    update

    Update and return the called instance.

    Parameters

    • x \u2014 'numbers.Number'

    1. The P\u00b2 Algorithm for Dynamic Univariateal Computing Calculation of Quantiles and Editor Histograms Without Storing Observations \u21a9

    2. LiveStats \u21a9

    3. P\u00b2 quantile estimator: estimating the median without storing values \u21a9

    "},{"location":"api/stats/RollingAbsMax/","title":"RollingAbsMax","text":"

    Running absolute max over a window.

    "},{"location":"api/stats/RollingAbsMax/#parameters","title":"Parameters","text":"
    • window_size

      Type \u2192 int

      Size of the rolling window.

    "},{"location":"api/stats/RollingAbsMax/#attributes","title":"Attributes","text":"
    • name

    • window_size

    "},{"location":"api/stats/RollingAbsMax/#examples","title":"Examples","text":"

    from river import stats\n\nX = [1, -4, 3, -2, 2, 1]\nrolling_absmax = stats.RollingAbsMax(window_size=2)\nfor x in X:\n    print(rolling_absmax.update(x).get())\n
    1\n4\n4\n3\n2\n2\n

    "},{"location":"api/stats/RollingAbsMax/#methods","title":"Methods","text":"get

    Return the current value of the statistic.

    update

    Update and return the called instance.

    Parameters

    • x \u2014 'numbers.Number'

    "},{"location":"api/stats/RollingIQR/","title":"RollingIQR","text":"

    Computes the rolling interquartile range.

    "},{"location":"api/stats/RollingIQR/#parameters","title":"Parameters","text":"
    • window_size

      Type \u2192 int

      Size of the window.

    • q_inf

      Default \u2192 0.25

      Desired inferior quantile, must be between 0 and 1. Defaults to 0.25.

    • q_sup

      Default \u2192 0.75

      Desired superior quantile, must be between 0 and 1. Defaults to 0.75.

    "},{"location":"api/stats/RollingIQR/#attributes","title":"Attributes","text":"
    • name

    • window_size

    "},{"location":"api/stats/RollingIQR/#examples","title":"Examples","text":"

    from river import stats\nrolling_iqr = stats.RollingIQR(\n    q_inf=0.25,\n    q_sup=0.75,\n    window_size=101\n)\n\nfor i in range(0, 1001):\n    rolling_iqr = rolling_iqr.update(i)\n    if i % 100 == 0:\n        print(rolling_iqr.get())\n
    0.0\n50.0\n50.0\n50.0\n50.0\n50.0\n50.0\n50.0\n50.0\n50.0\n50.0\n

    "},{"location":"api/stats/RollingIQR/#methods","title":"Methods","text":"get

    Return the current value of the statistic.

    update

    Update and return the called instance.

    Parameters

    • x \u2014 'numbers.Number'

    "},{"location":"api/stats/RollingMax/","title":"RollingMax","text":"

    Running max over a window.

    "},{"location":"api/stats/RollingMax/#parameters","title":"Parameters","text":"
    • window_size

      Type \u2192 int

      Size of the rolling window.

    "},{"location":"api/stats/RollingMax/#attributes","title":"Attributes","text":"
    • name

    • window_size

    "},{"location":"api/stats/RollingMax/#examples","title":"Examples","text":"

    from river import stats\n\nX = [1, -4, 3, -2, 2, 1]\nrolling_max = stats.RollingMax(window_size=2)\nfor x in X:\n    print(rolling_max.update(x).get())\n
    1\n1\n3\n3\n2\n2\n

    "},{"location":"api/stats/RollingMax/#methods","title":"Methods","text":"get

    Return the current value of the statistic.

    update

    Update and return the called instance.

    Parameters

    • x \u2014 'numbers.Number'

    "},{"location":"api/stats/RollingMin/","title":"RollingMin","text":"

    Running min over a window.

    "},{"location":"api/stats/RollingMin/#parameters","title":"Parameters","text":"
    • window_size

      Type \u2192 int

      Size of the rolling window.

    "},{"location":"api/stats/RollingMin/#attributes","title":"Attributes","text":"
    • name

    • window_size

    "},{"location":"api/stats/RollingMin/#examples","title":"Examples","text":"

    from river import stats\n\nX = [1, -4, 3, -2, 2, 1]\nrolling_min = stats.RollingMin(2)\nfor x in X:\n    print(rolling_min.update(x).get())\n
    1\n-4\n-4\n-2\n-2\n1\n

    "},{"location":"api/stats/RollingMin/#methods","title":"Methods","text":"get

    Return the current value of the statistic.

    update

    Update and return the called instance.

    Parameters

    • x \u2014 'numbers.Number'

    "},{"location":"api/stats/RollingMode/","title":"RollingMode","text":"

    Running mode over a window.

    The mode is the most common value.

    "},{"location":"api/stats/RollingMode/#parameters","title":"Parameters","text":"
    • window_size

      Type \u2192 int

      Size of the rolling window.

    "},{"location":"api/stats/RollingMode/#attributes","title":"Attributes","text":"
    • counts (collections.defaultdict)

      Value counts.

    "},{"location":"api/stats/RollingMode/#examples","title":"Examples","text":"

    from river import stats\n\nX = ['sunny', 'sunny', 'sunny', 'rainy', 'rainy', 'rainy', 'rainy']\nrolling_mode = stats.RollingMode(window_size=2)\nfor x in X:\n    print(rolling_mode.update(x).get())\n
    sunny\nsunny\nsunny\nsunny\nrainy\nrainy\nrainy\n

    rolling_mode = stats.RollingMode(window_size=5)\nfor x in X:\n    print(rolling_mode.update(x).get())\n
    sunny\nsunny\nsunny\nsunny\nsunny\nrainy\nrainy\n

    "},{"location":"api/stats/RollingMode/#methods","title":"Methods","text":"get

    Return the current value of the statistic.

    update

    Update and return the called instance.

    Parameters

    • x \u2014 'numbers.Number'

    "},{"location":"api/stats/RollingPeakToPeak/","title":"RollingPeakToPeak","text":"

    Running peak to peak (max - min) over a window.

    "},{"location":"api/stats/RollingPeakToPeak/#parameters","title":"Parameters","text":"
    • window_size

      Type \u2192 int

      Size of the rolling window.

    "},{"location":"api/stats/RollingPeakToPeak/#attributes","title":"Attributes","text":"
    • max (stats.RollingMax)

      The running rolling max.

    • min (stats.RollingMin)

      The running rolling min.

    "},{"location":"api/stats/RollingPeakToPeak/#examples","title":"Examples","text":"

    from river import stats\n\nX = [1, -4, 3, -2, 2, 1]\nptp = stats.RollingPeakToPeak(window_size=2)\nfor x in X:\n    print(ptp.update(x).get())\n
    0\n5\n7\n5\n4\n1\n

    "},{"location":"api/stats/RollingPeakToPeak/#methods","title":"Methods","text":"get

    Return the current value of the statistic.

    update

    Update and return the called instance.

    Parameters

    • x \u2014 'numbers.Number'

    "},{"location":"api/stats/RollingQuantile/","title":"RollingQuantile","text":"

    Running quantile over a window.

    "},{"location":"api/stats/RollingQuantile/#parameters","title":"Parameters","text":"
    • q

      Type \u2192 float

      Determines which quantile to compute, must be comprised between 0 and 1.

    • window_size

      Type \u2192 int

      Size of the window.

    "},{"location":"api/stats/RollingQuantile/#attributes","title":"Attributes","text":"
    • name

    • window_size

    "},{"location":"api/stats/RollingQuantile/#examples","title":"Examples","text":"

    from river import stats\n\nrolling_quantile = stats.RollingQuantile(\n    q=.5,\n    window_size=101,\n)\n\nfor i in range(1001):\n    rolling_quantile = rolling_quantile.update(i)\n    if i % 100 == 0:\n        print(rolling_quantile.get())\n
    0.0\n50.0\n150.0\n250.0\n350.0\n450.0\n550.0\n650.0\n750.0\n850.0\n950.0\n

    "},{"location":"api/stats/RollingQuantile/#methods","title":"Methods","text":"get

    Return the current value of the statistic.

    update

    Update and return the called instance.

    Parameters

    • x \u2014 'numbers.Number'

    1. Left sorted \u21a9

    "},{"location":"api/stats/SEM/","title":"SEM","text":"

    Running standard error of the mean using Welford's algorithm.

    "},{"location":"api/stats/SEM/#parameters","title":"Parameters","text":"
    • ddof

      Default \u2192 1

      Delta Degrees of Freedom. The divisor used in calculations is n - ddof, where n is the number of seen elements.

    "},{"location":"api/stats/SEM/#attributes","title":"Attributes","text":"
    • n (int)

      Number of observations.

    "},{"location":"api/stats/SEM/#examples","title":"Examples","text":"

    from river import stats\n\nX = [3, 5, 4, 7, 10, 12]\n\nsem = stats.SEM()\nfor x in X:\n    print(sem.update(x).get())\n
    0.0\n1.0\n0.577350\n0.853912\n1.240967\n1.447219\n

    from river import utils\n\nX = [1, 4, 2, -4, -8, 0]\n\nrolling_sem = utils.Rolling(stats.SEM(ddof=1), window_size=3)\nfor x in X:\n    print(rolling_sem.update(x).get())\n
    0.0\n1.5\n0.881917\n2.403700\n2.905932\n2.309401\n

    "},{"location":"api/stats/SEM/#methods","title":"Methods","text":"get

    Return the current value of the statistic.

    revert update

    Update and return the called instance.

    Parameters

    • x \u2014 'numbers.Number'
    • w \u2014 defaults to 1.0

    update_many
    1. Wikipedia article on algorithms for calculating variance \u21a9

    "},{"location":"api/stats/Shift/","title":"Shift","text":"

    Shifts a data stream by returning past values.

    This can be used to compute statistics over past data. For instance, if you're computing daily averages, then shifting by 7 will be equivalent to computing averages from a week ago.

    Shifting values is useful when you're calculating an average over a target value. Indeed, in this case it's important to shift the values in order not to introduce leakage. The recommended way to do this is to feature_extraction.TargetAgg, which already takes care of shifting the target values once.

    "},{"location":"api/stats/Shift/#parameters","title":"Parameters","text":"
    • amount

      Default \u2192 1

      Shift amount. The get method will return the t - amount value, where t is the current moment.

    • fill_value

      Default \u2192 None

      This value will be returned by the get method if not enough values have been observed.

    "},{"location":"api/stats/Shift/#attributes","title":"Attributes","text":"
    • name
    "},{"location":"api/stats/Shift/#examples","title":"Examples","text":"

    It is rare to have to use Shift by itself. A more common usage is to compose it with other statistics. This can be done via the | operator.

    from river import stats\n\nstat = stats.Shift(1) | stats.Mean()\n\nfor i in range(5):\n    stat = stat.update(i)\n    print(stat.get())\n
    0.0\n0.0\n0.5\n1.0\n1.5\n

    A common usecase for using Shift is when computing statistics on shifted data. For instance, say you have a dataset which records the amount of sales for a set of shops. You might then have a shop field and a sales field. Let's say you want to look at the average amount of sales per shop. You can do this by using a feature_extraction.Agg. When you call transform_one, you're expecting it to return the average amount of sales, without including today's sales. You can do this by prepending an instance of stats.Mean with an instance of stats.Shift.

    from river import feature_extraction\n\nagg = feature_extraction.Agg(\n    on='sales',\n    how=stats.Shift(1) | stats.Mean(),\n    by='shop'\n)\n

    Let's define a little example dataset.

    X = iter([\n    {'shop': 'Ikea', 'sales': 10},\n    {'shop': 'Ikea', 'sales': 15},\n    {'shop': 'Ikea', 'sales': 20}\n])\n

    Now let's call the learn_one method to update our feature extractor.

    x = next(X)\nagg = agg.learn_one(x)\n

    At this point, the average defaults to the initial value of stats.Mean, which is 0.

    agg.transform_one(x)\n
    {'sales_mean_of_shift_1_by_shop': 0.0}\n

    We can now update our feature extractor with the next data point and check the output.

    agg = agg.learn_one(next(X))\nagg.transform_one(x)\n
    {'sales_mean_of_shift_1_by_shop': 10.0}\n

    agg = agg.learn_one(next(X))\nagg.transform_one(x)\n
    {'sales_mean_of_shift_1_by_shop': 12.5}\n

    "},{"location":"api/stats/Shift/#methods","title":"Methods","text":"get

    Return the current value of the statistic.

    update

    Update and return the called instance.

    Parameters

    • x \u2014 'numbers.Number'

    "},{"location":"api/stats/Skew/","title":"Skew","text":"

    Running skew using Welford's algorithm.

    "},{"location":"api/stats/Skew/#parameters","title":"Parameters","text":"
    • bias

      Default \u2192 False

      If False, then the calculations are corrected for statistical bias.

    "},{"location":"api/stats/Skew/#attributes","title":"Attributes","text":"
    • name
    "},{"location":"api/stats/Skew/#examples","title":"Examples","text":"

    from river import stats\nimport numpy as np\n\nnp.random.seed(42)\nX = np.random.normal(loc=0, scale=1, size=10)\n\nskew = stats.Skew(bias=False)\nfor x in X:\n    print(skew.update(x).get())\n
    0.0\n0.0\n-1.4802398132849872\n0.5127437186677888\n0.7803466510704751\n1.056115628922055\n0.5057840774320389\n0.3478402420400934\n0.4536710660918704\n0.4123070197493227\n

    skew = stats.Skew(bias=True)\nfor x in X:\n    print(skew.update(x).get())\n
    0.0\n0.0\n-0.6043053732501439\n0.2960327239981376\n0.5234724473423674\n0.7712778043924866\n0.39022088752624845\n0.278892645224261\n0.37425953513864063\n0.3476878073823696\n

    "},{"location":"api/stats/Skew/#methods","title":"Methods","text":"get

    Return the current value of the statistic.

    update

    Update and return the called instance.

    Parameters

    • x \u2014 'numbers.Number'

    1. Wikipedia article on algorithms for calculating variance \u21a9

    "},{"location":"api/stats/Sum/","title":"Sum","text":"

    Running sum.

    "},{"location":"api/stats/Sum/#attributes","title":"Attributes","text":"
    • sum (float)

      The running sum.

    "},{"location":"api/stats/Sum/#examples","title":"Examples","text":"

    from river import stats\n\nX = [-5, -3, -1, 1, 3, 5]\nmean = stats.Sum()\nfor x in X:\n    print(mean.update(x).get())\n
    -5.0\n-8.0\n-9.0\n-8.0\n-5.0\n0.0\n

    from river import utils\n\nX = [1, -4, 3, -2, 2, 1]\nrolling_sum = utils.Rolling(stats.Sum(), window_size=2)\nfor x in X:\n    print(rolling_sum.update(x).get())\n
    1.0\n-3.0\n-1.0\n1.0\n0.0\n3.0\n

    "},{"location":"api/stats/Sum/#methods","title":"Methods","text":"get

    Return the current value of the statistic.

    revert update

    Update and return the called instance.

    Parameters

    • x \u2014 'numbers.Number'

    "},{"location":"api/stats/Var/","title":"Var","text":"

    Running variance using Welford's algorithm.

    "},{"location":"api/stats/Var/#parameters","title":"Parameters","text":"
    • ddof

      Default \u2192 1

      Delta Degrees of Freedom. The divisor used in calculations is n - ddof, where n represents the number of seen elements.

    "},{"location":"api/stats/Var/#attributes","title":"Attributes","text":"
    • mean

      It is necessary to calculate the mean of the data in order to calculate its variance.

    "},{"location":"api/stats/Var/#examples","title":"Examples","text":"

    from river import stats\n\nX = [3, 5, 4, 7, 10, 12]\n\nvar = stats.Var()\nfor x in X:\n    print(var.update(x).get())\n
    0.0\n2.0\n1.0\n2.916666\n7.7\n12.56666\n

    You can measure a rolling variance by using a utils.Rolling wrapper:

    from river import utils\n\nX = [1, 4, 2, -4, -8, 0]\nrvar = utils.Rolling(stats.Var(ddof=1), window_size=3)\nfor x in X:\n    print(rvar.update(x).get())\n
    0.0\n4.5\n2.333333\n17.333333\n25.333333\n16.0\n

    "},{"location":"api/stats/Var/#methods","title":"Methods","text":"get

    Return the current value of the statistic.

    revert update

    Update and return the called instance.

    Parameters

    • x \u2014 'numbers.Number'
    • w \u2014 defaults to 1.0

    update_many"},{"location":"api/stats/Var/#notes","title":"Notes","text":"

    The outcomes of the incremental and parallel updates are consistent with numpy's batch processing when \\(\\text{ddof} \\le 1\\).

    1. Wikipedia article on algorithms for calculating variance \u21a9

    2. Chan, T.F., Golub, G.H. and LeVeque, R.J., 1983. Algorithms for computing the sample variance: Analysis and recommendations. The American Statistician, 37(3), pp.242-247. \u21a9

    3. Schubert, E. and Gertz, M., 2018, July. Numerically stable parallel computation of (co-)variance. In Proceedings of the 30th International Conference on Scientific and Statistical Database Management (pp. 1-12).\u00a0\u21a9

    "},{"location":"api/stats/base/Bivariate/","title":"Bivariate","text":"

    A bivariate statistic measures a relationship between two variables.

    "},{"location":"api/stats/base/Bivariate/#methods","title":"Methods","text":"get

    Return the current value of the statistic.

    update

    Update and return the called instance.

    Parameters

    • x
    • y

    "},{"location":"api/stats/base/Univariate/","title":"Univariate","text":"

    A univariate statistic measures a property of a variable.

    "},{"location":"api/stats/base/Univariate/#attributes","title":"Attributes","text":"
    • name
    "},{"location":"api/stats/base/Univariate/#methods","title":"Methods","text":"get

    Return the current value of the statistic.

    update

    Update and return the called instance.

    Parameters

    • x \u2014 'numbers.Number'

    "},{"location":"api/stream/Cache/","title":"Cache","text":"

    Utility for caching iterables.

    This can be used to save a stream of data to the disk in order to iterate over it faster the following time. This can save time depending on the nature of stream. The more processing happens in a stream, the more time will be saved. Even in the case where no processing is done apart from reading the data, the cache will save some time because it is using the pickle binary protocol. It can thus improve the speed in common cases such as reading from a CSV file.

    "},{"location":"api/stream/Cache/#parameters","title":"Parameters","text":"
    • directory

      Default \u2192 None

      The path where to store the pickled data streams. If not provided, then it will be automatically inferred whenever possible, if not an exception will be raised.

    "},{"location":"api/stream/Cache/#attributes","title":"Attributes","text":"
    • keys (set)

      The set of keys that are being cached.

    "},{"location":"api/stream/Cache/#examples","title":"Examples","text":"
    import time\nfrom river import datasets\nfrom river import stream\n\ndataset = datasets.Phishing()\ncache = stream.Cache()\n

    The cache can be used by wrapping it around an iterable. Because this is the first time are iterating over the data, nothing is cached.

    tic = time.time()\nfor x, y in cache(dataset, key='phishing'):\n    pass\ntoc = time.time()\nprint(toc - tic)  # doctest: +SKIP\n
    0.012813\n

    If we do the same thing again, we can see the loop is now faster.

    tic = time.time()\nfor x, y in cache(dataset, key='phishing'):\n    pass\ntoc = time.time()\nprint(toc - tic)  # doctest: +SKIP\n
    0.001927\n

    We can see an overview of the cache. The first line indicates the location of the cache.

    cache  # doctest: +SKIP\n
    /tmp\nphishing - 125.2KiB\n

    Finally, we can clear the stream from the cache.

    cache.clear('phishing')\ncache  # doctest: +SKIP\n
    /tmp\n

    There is also a clear_all method to remove all the items in the cache.

    cache.clear_all()\n
    "},{"location":"api/stream/Cache/#methods","title":"Methods","text":"call

    Call self as a function.

    Parameters

    • stream
    • key \u2014 defaults to None

    clear

    Delete the cached stream associated with the given key.

    Parameters

    • key \u2014 'str'

    clear_all

    Delete all the cached streams.

    "},{"location":"api/stream/TwitchChatStream/","title":"TwitchChatStream","text":"

    Twitch chat stream client.

    This client gives access to a live stream of chat messages in Twitch channels using IRC protocol. You need to have a Twitch account and receive an OAuth token from https://twitchapps.com/tmi/.

    "},{"location":"api/stream/TwitchChatStream/#parameters","title":"Parameters","text":"
    • nickname

      Type \u2192 str

      The nickname of your account.

    • token

      Type \u2192 str

      OAuth token which has been generated.

    • channels

      Type \u2192 list[str]

      A list of channel names like [\"asmongold\", \"shroud\"] you want to collect messages from.

    • buffer_size

      Type \u2192 int

      Default \u2192 2048

      Size of buffer in bytes used for receiving responses from Twitch with IRC (default 2 kB).

    • timeout

      Type \u2192 int

      Default \u2192 60

      A timeout value in seconds for waiting response from Twitch (default 60s). It can be useful if all requested channels are offline or chat is not active enough.

    "},{"location":"api/stream/TwitchChatStream/#examples","title":"Examples","text":"

    The live stream is instantiated by passing your Twitch account nickname, OAuth token and list of channels. Other parameters are optional.

    from river import stream\n\ntwitch_chat = stream.TwitchChatStream(\n    nickname=\"twitch_user1\",\n    token=\"oauth:okrip6j6fjio8n5xpy2oum1lph4fbve\",\n    channels=[\"asmongold\", \"shroud\"]\n)\n

    The stream can be iterated over like this:

    for item in twitch_chat:\n    print(item)\n

    Here's a single stream item example:

    {\n    'dt': datetime.datetime(2022, 9, 14, 10, 33, 37, 989560),\n    'channel': 'asmongold',\n    'username': 'moojiejaa',\n    'msg': 'damn this chat mod are wild'\n}\n

    1. Twitch IRC doc \u21a9

    "},{"location":"api/stream/TwitterLiveStream/","title":"TwitterLiveStream","text":"

    Twitter API v2 live stream client.

    This client gives access to a live stream of Tweets. That is, Tweets that have just been published. This is different to stream.TwitterRecentStream, which also covers Tweets that have been published over recent days, and not necessarily in real-time.

    A list of filtering rules has to be provided. For instance, this allows focusing on a subset of topics and/or users.

    Note

    Using this requires having the requests package installed.

    "},{"location":"api/stream/TwitterLiveStream/#parameters","title":"Parameters","text":"
    • rules

      See the documentation[^2] for a comprehensive overview of filtering rules.

    • bearer_token

      A bearer token that is available in each account's developer portal.

    "},{"location":"api/stream/TwitterLiveStream/#examples","title":"Examples","text":"

    The live stream is instantiated by passing a list of filtering rules, as well as a bearer token. For instance, we can listen to all the breaking news Tweets from the BBC and CNN.

    from river import stream\n\ntweets = stream.TwitterLiveStream(\n    rules=[\"from:BBCBreaking\", \"from:cnnbrk\"],\n    bearer_token=\"<insert_bearer_token>\"\n)\n
    The stream can then be iterated over, possibly in an infinite loop. This will listen to the\nlive feed of Tweets and produce a Tweet right after it's been published.\n\n```py\nimport logging\n\nwhile True:\n    try:\n        for tweet in tweets:\n            print(tweet)\n    except requests.exceptions.RequestException as e:\n        logging.warning(str(e))\n        time.sleep(10)\n```\n\nHere's a Tweet example:\n\n```py\n{\n    'data': {\n        'author_id': '428333',\n        'created_at': '2022-08-26T12:59:48.000Z',\n        'id': '1563149212774445058',\n        'text': \"Ukraine's Zaporizhzhia nuclear power plant, which is currently held by\n

    Russian forces, has been reconnected to Ukraine's electricity grid, according to the country's nuclear operator https://t.co/xfylkBs4JR\" }, 'includes': { 'users': [ { 'created_at': '2007-01-02T01:48:14.000Z', 'id': '428333', 'name': 'CNN Breaking News', 'username': 'cnnbrk' } ] }, 'matching_rules': [{'id': '1563148866333151233', 'tag': 'from:cnnbrk'}] } ``` [^1]: Filtered stream introduction [^2]: Building rules for filtered stream [^3]: Stream Tweets in real-time

    "},{"location":"api/stream/iter-arff/","title":"iter_arff","text":"

    Iterates over rows from an ARFF file.

    "},{"location":"api/stream/iter-arff/#parameters","title":"Parameters","text":"
    • filepath_or_buffer

      Either a string indicating the location of a file, or a buffer object that has a read method.

    • target

      Type \u2192 str | list[str] | None

      Default \u2192 None

      Name(s) of the target field. If None, then the target field is ignored. If a list of names is passed, then a dictionary is returned instead of a single value.

    • compression

      Default \u2192 infer

      For on-the-fly decompression of on-disk data. If this is set to 'infer' and filepath_or_buffer is a path, then the decompression method is inferred for the following extensions: '.gz', '.zip'.

    • sparse

      Default \u2192 False

      Whether the data is sparse or not.

    "},{"location":"api/stream/iter-arff/#examples","title":"Examples","text":"

    cars = '''\n@relation CarData\n@attribute make {Toyota, Honda, Ford, Chevrolet}\n@attribute model string\n@attribute year numeric\n@attribute price numeric\n@attribute mpg numeric\n@data\nToyota, Corolla, 2018, 15000, 30.5\nHonda, Civic, 2019, 16000, 32.2\nFord, Mustang, 2020, 25000, 25.0\nChevrolet, Malibu, 2017, 18000, 28.9\nToyota, Camry, 2019, 22000, 29.8\n'''\nwith open('cars.arff', mode='w') as f:\n    _ = f.write(cars)\n\nfrom river import stream\n\nfor x, y in stream.iter_arff('cars.arff', target='price'):\n    print(x, y)\n
    {'make': 'Toyota', 'model': ' Corolla', 'year': 2018.0, 'mpg': 30.5} 15000.0\n{'make': 'Honda', 'model': ' Civic', 'year': 2019.0, 'mpg': 32.2} 16000.0\n{'make': 'Ford', 'model': ' Mustang', 'year': 2020.0, 'mpg': 25.0} 25000.0\n{'make': 'Chevrolet', 'model': ' Malibu', 'year': 2017.0, 'mpg': 28.9} 18000.0\n{'make': 'Toyota', 'model': ' Camry', 'year': 2019.0, 'mpg': 29.8} 22000.0\n

    Finally, let's delete the example file.

    import os; os.remove('cars.arff')\n

    ARFF files support sparse data. Let's create a sparse ARFF file.

    sparse = '''\n% traindata\n@RELATION \"traindata: -C 6\"\n@ATTRIBUTE y0 {0, 1}\n@ATTRIBUTE y1 {0, 1}\n@ATTRIBUTE y2 {0, 1}\n@ATTRIBUTE y3 {0, 1}\n@ATTRIBUTE y4 {0, 1}\n@ATTRIBUTE y5 {0, 1}\n@ATTRIBUTE X0 NUMERIC\n@ATTRIBUTE X1 NUMERIC\n@ATTRIBUTE X2 NUMERIC\n@DATA\n{ 3 1,6 0.863382,8 0.820094 }\n{ 2 1,6 0.659761 }\n{ 0 1,3 1,6 0.437881,8 0.818882 }\n{ 2 1,6 0.676477,7 0.724635,8 0.755123 }\n'''\n\nwith open('sparse.arff', mode='w') as f:\n    _ = f.write(sparse)\n

    In addition, we'll specify that there are several target fields.

    arff_stream = stream.iter_arff(\n    'sparse.arff',\n    target=['y0', 'y1', 'y2', 'y3', 'y4', 'y5'],\n    sparse=True\n)\n\nfor x, y in arff_stream:\n    print(x)\n    print(y)\n
    {'X0': '0.863382', 'X2': '0.820094'}\n{'y0': 0, 'y1': 0, 'y2': 0, 'y3': '1', 'y4': 0, 'y5': 0}\n{'X0': '0.659761'}\n{'y0': 0, 'y1': 0, 'y2': '1', 'y3': 0, 'y4': 0, 'y5': 0}\n{'X0': '0.437881', 'X2': '0.818882'}\n{'y0': '1', 'y1': 0, 'y2': 0, 'y3': '1', 'y4': 0, 'y5': 0}\n{'X0': '0.676477', 'X1': '0.724635', 'X2': '0.755123'}\n{'y0': 0, 'y1': 0, 'y2': '1', 'y3': 0, 'y4': 0, 'y5': 0}\n

    This function can also deal with missing features in non-sparse data. These are indicated with a question mark.

    data = '''\n@relation giveMeLoan-weka.filters.unsupervised.attribute.Remove-R1\n@attribute RevolvingUtilizationOfUnsecuredLines numeric\n@attribute age numeric\n@attribute NumberOfTime30-59DaysPastDueNotWorse numeric\n@attribute DebtRatio numeric\n@attribute MonthlyIncome numeric\n@attribute NumberOfOpenCreditLinesAndLoans numeric\n@attribute NumberOfTimes90DaysLate numeric\n@attribute NumberRealEstateLoansOrLines numeric\n@attribute NumberOfTime60-89DaysPastDueNotWorse numeric\n@attribute NumberOfDependents numeric\n@attribute isFraud {0,1}\n@data\n0.213179,74,0,0.375607,3500,3,0,1,0,1,0\n0.305682,57,0,5710,?,8,0,3,0,0,0\n0.754464,39,0,0.20994,3500,8,0,0,0,0,0\n0.116951,27,0,46,?,2,0,0,0,0,0\n0.189169,57,0,0.606291,23684,9,0,4,0,2,0\n'''\n\nwith open('data.arff', mode='w') as f:\n    _ = f.write(data)\n\nfor x, y in stream.iter_arff('data.arff', target='isFraud'):\n    print(len(x))\n
    10\n9\n10\n9\n10\n

    1. ARFF format description from Weka \u21a9

    "},{"location":"api/stream/iter-array/","title":"iter_array","text":"

    Iterates over the rows from an array of features and an array of targets.

    This method is intended to work with numpy arrays, but should also work with Python lists.

    "},{"location":"api/stream/iter-array/#parameters","title":"Parameters","text":"
    • X

      Type \u2192 np.ndarray

      A 2D array of features. This can also be a 1D array of strings, which can be the case if you're working with text.

    • y

      Type \u2192 np.ndarray | None

      Default \u2192 None

      An optional array of targets.

    • feature_names

      Type \u2192 list[base.typing.FeatureName] | None

      Default \u2192 None

      An optional list of feature names. The features will be labeled with integers if no names are provided.

    • target_names

      Type \u2192 list[base.typing.FeatureName] | None

      Default \u2192 None

      An optional list of output names. The outputs will be labeled with integers if no names are provided. Only applies if there are multiple outputs, i.e. if y is a 2D array.

    • shuffle

      Type \u2192 bool

      Default \u2192 False

      Indicates whether or not to shuffle the input arrays before iterating over them.

    • seed

      Type \u2192 int | None

      Default \u2192 None

      Random seed used for shuffling the data.

    "},{"location":"api/stream/iter-array/#examples","title":"Examples","text":"

    from river import stream\nimport numpy as np\n\nX = np.array([[1, 2, 3], [11, 12, 13]])\nY = np.array([True, False])\n\ndataset = stream.iter_array(\n    X, Y,\n    feature_names=['x1', 'x2', 'x3']\n)\nfor x, y in dataset:\n    print(x, y)\n
    {'x1': 1, 'x2': 2, 'x3': 3} True\n{'x1': 11, 'x2': 12, 'x3': 13} False\n

    This also works with a array of texts:

    X = [\"foo\", \"bar\"]\ndataset = stream.iter_array(\n    X, Y,\n    feature_names=['x1', 'x2', 'x3']\n)\nfor x, y in dataset:\n    print(x, y)\n
    foo True\nbar False\n

    "},{"location":"api/stream/iter-csv/","title":"iter_csv","text":"

    Iterates over rows from a CSV file.

    Reading CSV files can be quite slow. If, for whatever reason, you're going to loop through the same file multiple times, then we recommend that you to use the stream.Cache utility.

    "},{"location":"api/stream/iter-csv/#parameters","title":"Parameters","text":"
    • filepath_or_buffer

      Either a string indicating the location of a file, or a buffer object that has a read method.

    • target

      Type \u2192 str | list[str] | None

      Default \u2192 None

      A single target column is assumed if a string is passed. A multiple output scenario is assumed if a list of strings is passed. A None value will be assigned to each y if this parameter is omitted.

    • converters

      Type \u2192 dict | None

      Default \u2192 None

      All values in the CSV are interpreted as strings by default. You can use this parameter to cast values to the desired type. This should be a dict mapping feature names to callables used to parse their associated values. Note that a callable may be a type, such as float and int.

    • parse_dates

      Type \u2192 dict | None

      Default \u2192 None

      A dict mapping feature names to a format passed to the datetime.datetime.strptime method.

    • drop

      Type \u2192 list[str] | None

      Default \u2192 None

      Fields to ignore.

    • drop_nones

      Default \u2192 False

      Whether or not to drop fields where the value is a None.

    • fraction

      Default \u2192 1.0

      Sampling fraction.

    • compression

      Default \u2192 infer

      For on-the-fly decompression of on-disk data. If this is set to 'infer' and filepath_or_buffer is a path, then the decompression method is inferred for the following extensions: '.gz', '.zip'.

    • seed

      Type \u2192 int | None

      Default \u2192 None

      If specified, the sampling will be deterministic.

    • field_size_limit

      Type \u2192 int | None

      Default \u2192 None

      If not None, this will be passed to the csv.field_size_limit function.

    • kwargs

      All other keyword arguments are passed to the underlying csv.DictReader.

    "},{"location":"api/stream/iter-csv/#examples","title":"Examples","text":"

    Although this function is designed to handle different kinds of inputs, the most common use case is to read a file on the disk. We'll first create a little CSV file to illustrate.

    tv_shows = '''name,year,rating\nPlanet Earth II,2016,9.5\nPlanet Earth,2006,9.4\nBand of Brothers,2001,9.4\nBreaking Bad,2008,9.4\nChernobyl,2019,9.4\n'''\nwith open('tv_shows.csv', mode='w') as f:\n    _ = f.write(tv_shows)\n

    We can now go through the rows one by one. We can use the converters parameter to cast the rating field value as a float. We can also convert the year to a datetime via the parse_dates parameter.

    from river import stream\n\nparams = {\n    'converters': {'rating': float},\n    'parse_dates': {'year': '%Y'}\n}\nfor x, y in stream.iter_csv('tv_shows.csv', **params):\n    print(x, y)\n
    {'name': 'Planet Earth II', 'year': datetime.datetime(2016, 1, 1, 0, 0), 'rating': 9.5} None\n{'name': 'Planet Earth', 'year': datetime.datetime(2006, 1, 1, 0, 0), 'rating': 9.4} None\n{'name': 'Band of Brothers', 'year': datetime.datetime(2001, 1, 1, 0, 0), 'rating': 9.4} None\n{'name': 'Breaking Bad', 'year': datetime.datetime(2008, 1, 1, 0, 0), 'rating': 9.4} None\n{'name': 'Chernobyl', 'year': datetime.datetime(2019, 1, 1, 0, 0), 'rating': 9.4} None\n

    The value of y is always None because we haven't provided a value for the target parameter. Here is an example where a target is provided:

    dataset = stream.iter_csv('tv_shows.csv', target='rating', **params)\nfor x, y in dataset:\n    print(x, y)\n
    {'name': 'Planet Earth II', 'year': datetime.datetime(2016, 1, 1, 0, 0)} 9.5\n{'name': 'Planet Earth', 'year': datetime.datetime(2006, 1, 1, 0, 0)} 9.4\n{'name': 'Band of Brothers', 'year': datetime.datetime(2001, 1, 1, 0, 0)} 9.4\n{'name': 'Breaking Bad', 'year': datetime.datetime(2008, 1, 1, 0, 0)} 9.4\n{'name': 'Chernobyl', 'year': datetime.datetime(2019, 1, 1, 0, 0)} 9.4\n

    Finally, let's delete the example file.

    import os; os.remove('tv_shows.csv')\n
    "},{"location":"api/stream/iter-libsvm/","title":"iter_libsvm","text":"

    Iterates over a dataset in LIBSVM format.

    The LIBSVM format is a popular way in the machine learning community to store sparse datasets. Only numerical feature values are supported. The feature names will be considered as strings.

    "},{"location":"api/stream/iter-libsvm/#parameters","title":"Parameters","text":"
    • filepath_or_buffer

      Type \u2192 str

      Either a string indicating the location of a file, or a buffer object that has a read method.

    • target_type

      Default \u2192 <class 'float'>

      The type of the target value.

    • compression

      Default \u2192 infer

      For on-the-fly decompression of on-disk data. If this is set to 'infer' and filepath_or_buffer is a path, then the decompression method is inferred for the following extensions: '.gz', '.zip'.

    "},{"location":"api/stream/iter-libsvm/#examples","title":"Examples","text":"

    import io\nfrom river import stream\n\ndata = io.StringIO('''+1 x:-134.26 y:0.2563\n1 x:-12 z:0.3\n-1 y:.25\n''')\n\nfor x, y in stream.iter_libsvm(data, target_type=int):\n    print(y, x)\n
    1 {'x': -134.26, 'y': 0.2563}\n1 {'x': -12.0, 'z': 0.3}\n-1 {'y': 0.25}\n

    1. LIBSVM documentation \u21a9

    "},{"location":"api/stream/iter-pandas/","title":"iter_pandas","text":"

    Iterates over the rows of a pandas.DataFrame.

    "},{"location":"api/stream/iter-pandas/#parameters","title":"Parameters","text":"
    • X

      Type \u2192 pd.DataFrame

      A dataframe of features.

    • y

      Type \u2192 pd.Series | pd.DataFrame | None

      Default \u2192 None

      A series or a dataframe with one column per target.

    • kwargs

      Extra keyword arguments are passed to the underlying call to stream.iter_array.

    "},{"location":"api/stream/iter-pandas/#examples","title":"Examples","text":"

    import pandas as pd\nfrom river import stream\n\nX = pd.DataFrame({\n    'x1': [1, 2, 3, 4],\n    'x2': ['blue', 'yellow', 'yellow', 'blue'],\n    'y': [True, False, False, True]\n})\ny = X.pop('y')\n\nfor xi, yi in stream.iter_pandas(X, y):\n    print(xi, yi)\n
    {'x1': 1, 'x2': 'blue'} True\n{'x1': 2, 'x2': 'yellow'} False\n{'x1': 3, 'x2': 'yellow'} False\n{'x1': 4, 'x2': 'blue'} True\n

    "},{"location":"api/stream/iter-sklearn-dataset/","title":"iter_sklearn_dataset","text":"

    Iterates rows from one of the datasets provided by scikit-learn.

    This allows you to use any dataset from scikit-learn's datasets module. For instance, you can use the fetch_openml function to get access to all of the datasets from the OpenML website.

    "},{"location":"api/stream/iter-sklearn-dataset/#parameters","title":"Parameters","text":"
    • dataset

      Type \u2192 sklearn.utils.Bunch

      A scikit-learn dataset.

    • kwargs

      Extra keyword arguments are passed to the underlying call to stream.iter_array.

    "},{"location":"api/stream/iter-sklearn-dataset/#examples","title":"Examples","text":"

    import pprint\nfrom sklearn import datasets\nfrom river import stream\n\ndataset = datasets.load_diabetes()\n\nfor xi, yi in stream.iter_sklearn_dataset(dataset):\n    pprint.pprint(xi)\n    print(yi)\n    break\n
    {'age': 0.038075906433423026,\n 'bmi': 0.061696206518683294,\n 'bp': 0.0218723855140367,\n 's1': -0.04422349842444599,\n 's2': -0.03482076283769895,\n 's3': -0.04340084565202491,\n 's4': -0.002592261998183278,\n 's5': 0.019907486170462722,\n 's6': -0.01764612515980379,\n 'sex': 0.05068011873981862}\n151.0\n

    "},{"location":"api/stream/iter-sql/","title":"iter_sql","text":"

    Iterates over the results from an SQL query.

    By default, SQLAlchemy prefetches results. Therefore, even though you can iterate over the resulting rows one by one, the results are in fact loaded in batch. You can modify this behavior by configuring the connection you pass to iter_sql. For instance, you can set the stream_results parameter to True, as explained in SQLAlchemy's documentation. Note, however, that this isn't available for all database engines.

    "},{"location":"api/stream/iter-sql/#parameters","title":"Parameters","text":"
    • query

      Type \u2192 str | sqlalchemy.TextClause | sqlalchemy.Select

      SQL query to be executed.

    • conn

      Type \u2192 sqlalchemy.Connection

      An SQLAlchemy construct which has an execute method. In other words you can pass an engine, a connection, or a session.

    • target_name

      Type \u2192 str | None

      Default \u2192 None

      The name of the target field. If this is None, then y will also be None.

    "},{"location":"api/stream/iter-sql/#examples","title":"Examples","text":"

    As an example we'll create an in-memory database with SQLAlchemy.

    import datetime as dt\nimport sqlalchemy\n\nengine = sqlalchemy.create_engine('sqlite://')\n\nmetadata = sqlalchemy.MetaData()\n\nt_sales = sqlalchemy.Table('sales', metadata,\n    sqlalchemy.Column('shop', sqlalchemy.String, primary_key=True),\n    sqlalchemy.Column('date', sqlalchemy.Date, primary_key=True),\n    sqlalchemy.Column('amount', sqlalchemy.Integer)\n)\n\nmetadata.create_all(engine)\n\nsales = [\n    {'shop': 'Hema', 'date': dt.date(2016, 8, 2), 'amount': 20},\n    {'shop': 'Ikea', 'date': dt.date(2016, 8, 2), 'amount': 18},\n    {'shop': 'Hema', 'date': dt.date(2016, 8, 3), 'amount': 22},\n    {'shop': 'Ikea', 'date': dt.date(2016, 8, 3), 'amount': 14},\n    {'shop': 'Hema', 'date': dt.date(2016, 8, 4), 'amount': 12},\n    {'shop': 'Ikea', 'date': dt.date(2016, 8, 4), 'amount': 16}\n]\n\nwith engine.connect() as conn:\n    _ = conn.execute(t_sales.insert(), sales)\n    conn.commit()\n

    We can now query the database. We will set amount to be the target field.

    from river import stream\n\nwith engine.connect() as conn:\n    query = sqlalchemy.sql.select(t_sales)\n    dataset = stream.iter_sql(query, conn, target_name='amount')\n    for x, y in dataset:\n        print(x, y)\n
    {'shop': 'Hema', 'date': datetime.date(2016, 8, 2)} 20\n{'shop': 'Ikea', 'date': datetime.date(2016, 8, 2)} 18\n{'shop': 'Hema', 'date': datetime.date(2016, 8, 3)} 22\n{'shop': 'Ikea', 'date': datetime.date(2016, 8, 3)} 14\n{'shop': 'Hema', 'date': datetime.date(2016, 8, 4)} 12\n{'shop': 'Ikea', 'date': datetime.date(2016, 8, 4)} 16\n

    This also with raw SQL queries.

    with engine.connect() as conn:\n    query = \"SELECT * FROM sales WHERE shop = 'Hema'\"\n    dataset = stream.iter_sql(query, conn, target_name='amount')\n    for x, y in dataset:\n        print(x, y)\n
    {'shop': 'Hema', 'date': '2016-08-02'} 20\n{'shop': 'Hema', 'date': '2016-08-03'} 22\n{'shop': 'Hema', 'date': '2016-08-04'} 12\n

    "},{"location":"api/stream/shuffle/","title":"shuffle","text":"

    Shuffles a stream of data.

    This works by maintaining a buffer of elements. The first buffer_size elements are stored in memory. Once the buffer is full, a random element inside the buffer is yielded. Every time an element is yielded, the next element in the stream replaces it and the buffer is sampled again. Increasing buffer_size will improve the quality of the shuffling.

    If you really want to stream over your dataset in a \"good\" random order, the best way is to split your dataset into smaller datasets and loop over them in a round-robin fashion. You may do this by using the roundrobin recipe from the itertools module.

    "},{"location":"api/stream/shuffle/#parameters","title":"Parameters","text":"
    • stream

      Type \u2192 typing.Iterator

      The stream to shuffle.

    • buffer_size

      Type \u2192 int

      The size of the buffer which contains the elements help in memory. Increasing this will increase randomness but will incur more memory usage.

    • seed

      Type \u2192 int | None

      Default \u2192 None

      Random seed used for sampling.

    "},{"location":"api/stream/shuffle/#examples","title":"Examples","text":"

    from river import stream\n\nfor i in stream.shuffle(range(15), buffer_size=5, seed=42):\n    print(i)\n
    0\n5\n2\n1\n8\n9\n6\n4\n11\n12\n10\n7\n14\n13\n3\n

    1. Visualizing TensorFlow's streaming shufflers \u21a9

    "},{"location":"api/stream/simulate-qa/","title":"simulate_qa","text":"

    Simulate a time-ordered question and answer session.

    This method allows looping through a dataset in the order in which it arrived. Indeed, it usually is the case that labels arrive after features. Being able to go through a dataset in arrival order enables assessing a model's performance in a reliable manner. For instance, the evaluate.progressive_val_score is a high-level method that can be used to score a model on a dataset. Under the hood it uses this method to determine the correct arrival order.

    "},{"location":"api/stream/simulate-qa/#parameters","title":"Parameters","text":"
    • dataset

      Type \u2192 base.typing.Dataset

      A stream of (features, target) tuples.

    • moment

      Type \u2192 str | typing.Callable[[dict], dt.datetime] | None

      The attribute used for measuring time. If a callable is passed, then it is expected to take as input a dict of features. If None, then the observations are implicitly timestamped in the order in which they arrive. If a str is passed, then it will be used to obtain the time from the input features.

    • delay

      Type \u2192 str | int | dt.timedelta | typing.Callable | None

      The amount of time to wait before revealing the target associated with each observation to the model. This value is expected to be able to sum with the moment value. For instance, if moment is a datetime.date, then delay is expected to be a datetime.timedelta. If a callable is passed, then it is expected to take as input a dict of features and the target. If a str is passed, then it will be used to access the relevant field from the features. If None is passed, then no delay will be used, which leads to doing standard online validation. If a scalar is passed, such an int or a datetime.timedelta, then the delay is constant.

    • copy

      Type \u2192 bool

      Default \u2192 True

      If True, then a separate copy of the features are yielded the second time around. This ensures that inadvertent modifications in downstream code don't have any effect.

    "},{"location":"api/stream/simulate-qa/#examples","title":"Examples","text":"

    The arrival delay isn't usually indicated in a dataset, but it might be able to be inferred from the features. As an example, we'll simulate the departure and arrival time of taxi trips. Let's first create a time table which records the departure time and the duration of seconds of several taxi trips.

    import datetime as dt\ntime_table = [\n    (dt.datetime(2020, 1, 1, 20,  0, 0),  900),\n    (dt.datetime(2020, 1, 1, 20, 10, 0), 1800),\n    (dt.datetime(2020, 1, 1, 20, 20, 0),  300),\n    (dt.datetime(2020, 1, 1, 20, 45, 0),  400),\n    (dt.datetime(2020, 1, 1, 20, 50, 0),  240),\n    (dt.datetime(2020, 1, 1, 20, 55, 0),  450)\n]\n

    We can now create a streaming dataset where the features are the departure dates and the targets are the durations.

    dataset = (\n    ({'date': date}, duration)\n    for date, duration in time_table\n)\n

    Now, we can use simulate_qa to iterate over the events in the order in which they are meant to occur.

    delay = lambda _, y: dt.timedelta(seconds=y)\n\nfor i, x, y in simulate_qa(dataset, moment='date', delay=delay):\n    if y is None:\n        print(f'{x[\"date\"]} - trip #{i} departs')\n    else:\n        arrival_date = x['date'] + dt.timedelta(seconds=y)\n        print(f'{arrival_date} - trip #{i} arrives after {y} seconds')\n
    2020-01-01 20:00:00 - trip #0 departs\n2020-01-01 20:10:00 - trip #1 departs\n2020-01-01 20:15:00 - trip #0 arrives after 900 seconds\n2020-01-01 20:20:00 - trip #2 departs\n2020-01-01 20:25:00 - trip #2 arrives after 300 seconds\n2020-01-01 20:40:00 - trip #1 arrives after 1800 seconds\n2020-01-01 20:45:00 - trip #3 departs\n2020-01-01 20:50:00 - trip #4 departs\n2020-01-01 20:51:40 - trip #3 arrives after 400 seconds\n2020-01-01 20:54:00 - trip #4 arrives after 240 seconds\n2020-01-01 20:55:00 - trip #5 departs\n2020-01-01 21:02:30 - trip #5 arrives after 450 seconds\n

    This function is extremely practical because it provides a reliable way to evaluate the performance of a model in a real scenario. Indeed, it allows to make predictions and perform model updates in exactly the same manner that would happen live. For instance, it is used in evaluate.progressive_val_score, which is a higher level function for evaluating models in an online manner.

    "},{"location":"api/time-series/ForecastingMetric/","title":"ForecastingMetric","text":""},{"location":"api/time-series/ForecastingMetric/#methods","title":"Methods","text":"get

    Return the current performance along the horizon.

    Returns

    list[float]: The current performance.

    update

    Update the metric at each step along the horizon.

    Parameters

    • y_true \u2014 'list[Number]'
    • y_pred \u2014 'list[Number]'

    Returns

    ForecastingMetric: self

    "},{"location":"api/time-series/HoltWinters/","title":"HoltWinters","text":"

    Holt-Winters forecaster.

    This is a standard implementation of the Holt-Winters forecasting method. Certain parametrisations result in special cases, such as simple exponential smoothing.

    Optimal parameters and initialisation values can be determined in a batch setting. However, in an online setting, it is necessary to wait and observe enough values. The first k = max(2, seasonality) values are indeed used to initialize the components.

    Level initialization

    \\[l = \\frac{1}{k} \\sum_{i=1}{k} y_i\\]

    Trend initialization

    \\[t = \\frac{1}{k - 1} \\sum_{i=2}{k} y_i - y_{i-1}\\]

    Trend initialization

    \\[s_i = \\frac{y_i}{k}\\]"},{"location":"api/time-series/HoltWinters/#parameters","title":"Parameters","text":"
    • alpha

      Smoothing parameter for the level.

    • beta

      Default \u2192 None

      Smoothing parameter for the trend.

    • gamma

      Default \u2192 None

      Smoothing parameter for the seasonality.

    • seasonality

      Default \u2192 0

      The number of periods in a season. For instance, this should be 4 for quarterly data, and 12 for yearly data.

    • multiplicative

      Default \u2192 False

      Whether or not to use a multiplicative formulation.

    "},{"location":"api/time-series/HoltWinters/#examples","title":"Examples","text":"

    from river import datasets\nfrom river import metrics\nfrom river import time_series\n\ndataset = datasets.AirlinePassengers()\n\nmodel = time_series.HoltWinters(\n    alpha=0.3,\n    beta=0.1,\n    gamma=0.6,\n    seasonality=12,\n    multiplicative=True\n)\n\nmetric = metrics.MAE()\n\ntime_series.evaluate(\n    dataset,\n    model,\n    metric,\n    horizon=12\n)\n
    +1  MAE: 25.899087\n+2  MAE: 26.26131\n+3  MAE: 25.735903\n+4  MAE: 25.625678\n+5  MAE: 26.093842\n+6  MAE: 26.90249\n+7  MAE: 28.634398\n+8  MAE: 29.284769\n+9  MAE: 31.018351\n+10 MAE: 32.252349\n+11 MAE: 33.518946\n+12 MAE: 33.975057\n

    "},{"location":"api/time-series/HoltWinters/#methods","title":"Methods","text":"forecast

    Makes forecast at each step of the given horizon.

    Parameters

    • horizon \u2014 'int'
    • xs \u2014 'list[dict] | None' \u2014 defaults to None

    learn_one

    Updates the model.

    Parameters

    • y \u2014 'float'
    • x \u2014 'dict | None' \u2014 defaults to None

    1. Exponential smoothing \u2014 Wikipedia \u21a9

    2. Exponential smoothing \u2014 Forecasting: Principles and Practice \u21a9

    3. What is Exponential Smoothing? \u2014 Engineering statistics handbook \u21a9

    "},{"location":"api/time-series/HorizonAggMetric/","title":"HorizonAggMetric","text":"

    Same as HorizonMetric, but aggregates the result based on an provided function.

    This allows, for instance, to measure the average performance of a forecasting model along the horizon.

    "},{"location":"api/time-series/HorizonAggMetric/#parameters","title":"Parameters","text":"
    • metric

      Type \u2192 metrics.base.RegressionMetric

      A regression metric.

    • agg_func

      Type \u2192 typing.Callable[[list[float]], float]

      A function that takes as input a list of floats and outputs a single float. You may want to min, max, as well as statistics.mean and statistics.median.

    "},{"location":"api/time-series/HorizonAggMetric/#examples","title":"Examples","text":"

    This is used internally by the time_series.evaluate function when you pass an agg_func.

    import statistics\nfrom river import datasets\nfrom river import metrics\nfrom river import time_series\n\nmetric = time_series.evaluate(\n    dataset=datasets.AirlinePassengers(),\n    model=time_series.HoltWinters(alpha=0.1),\n    metric=metrics.MAE(),\n    agg_func=statistics.mean,\n    horizon=4\n)\n\nmetric\n
    mean(MAE): 42.901748\n

    "},{"location":"api/time-series/HorizonAggMetric/#methods","title":"Methods","text":"get

    Return the current performance along the horizon.

    Returns

    list[float]: The current performance.

    update

    Update the metric at each step along the horizon.

    Parameters

    • y_true \u2014 'list[Number]'
    • y_pred \u2014 'list[Number]'

    Returns

    ForecastingMetric: self

    "},{"location":"api/time-series/HorizonMetric/","title":"HorizonMetric","text":"

    Measures performance at each time step ahead.

    This allows to measure the performance of a model at each time step along the horizon. A copy of the provided regression metric is made for each time step. At each time step ahead, the metric is thus evaluated on each prediction for said time step, and not for the time steps before or after that.

    "},{"location":"api/time-series/HorizonMetric/#parameters","title":"Parameters","text":"
    • metric

      Type \u2192 metrics.base.RegressionMetric

      A regression metric.

    "},{"location":"api/time-series/HorizonMetric/#examples","title":"Examples","text":"

    This is used internally by the time_series.evaluate function.

    from river import datasets\nfrom river import metrics\nfrom river import time_series\n\nmetric = time_series.evaluate(\n    dataset=datasets.AirlinePassengers(),\n    model=time_series.HoltWinters(alpha=0.1),\n    metric=metrics.MAE(),\n    horizon=4\n)\n\nmetric\n
    +1 MAE: 40.931286\n+2 MAE: 42.667998\n+3 MAE: 44.158092\n+4 MAE: 43.849617\n

    "},{"location":"api/time-series/HorizonMetric/#methods","title":"Methods","text":"get

    Return the current performance along the horizon.

    Returns

    list[float]: The current performance.

    update

    Update the metric at each step along the horizon.

    Parameters

    • y_true \u2014 'list[Number]'
    • y_pred \u2014 'list[Number]'

    Returns

    ForecastingMetric: self

    "},{"location":"api/time-series/SNARIMAX/","title":"SNARIMAX","text":"

    SNARIMAX model.

    SNARIMAX stands for (S)easonal (N)on-linear (A)uto(R)egressive (I)ntegrated (M)oving-(A)verage with e(X)ogenous inputs model.

    This model generalizes many established time series models in a single interface that can be trained online. It assumes that the provided training data is ordered in time and is uniformly spaced. It is made up of the following components:

    • S (Seasonal)

    • N (Non-linear): Any online regression model can be used, not necessarily a linear regression

      as is done in textbooks. - AR (Autoregressive): Lags of the target variable are used as features.

    • I (Integrated): The model can be fitted on a differenced version of a time series. In this

      context, integration is the reverse of differencing. - MA (Moving average): Lags of the errors are used as features.

    • X (Exogenous): Users can provide additional features. Care has to be taken to include

      features that will be available both at training and prediction time.

    Each of these components can be switched on and off by specifying the appropriate parameters. Classical time series models such as AR, MA, ARMA, and ARIMA can thus be seen as special parametrizations of the SNARIMAX model.

    This model is tailored for time series that are homoskedastic. In other words, it might not work well if the variance of the time series varies widely along time.

    "},{"location":"api/time-series/SNARIMAX/#parameters","title":"Parameters","text":"
    • p

      Type \u2192 int

      Order of the autoregressive part. This is the number of past target values that will be included as features.

    • d

      Type \u2192 int

      Differencing order.

    • q

      Type \u2192 int

      Order of the moving average part. This is the number of past error terms that will be included as features.

    • m

      Type \u2192 int

      Default \u2192 1

      Season length used for extracting seasonal features. If you believe your data has a seasonal pattern, then set this accordingly. For instance, if the data seems to exhibit a yearly seasonality, and that your data is spaced by month, then you should set this to 12. Note that for this parameter to have any impact you should also set at least one of the p, d, and q parameters.

    • sp

      Type \u2192 int

      Default \u2192 0

      Seasonal order of the autoregressive part. This is the number of past target values that will be included as features.

    • sd

      Type \u2192 int

      Default \u2192 0

      Seasonal differencing order.

    • sq

      Type \u2192 int

      Default \u2192 0

      Seasonal order of the moving average part. This is the number of past error terms that will be included as features.

    • regressor

      Type \u2192 base.Regressor | None

      Default \u2192 None

      The online regression model to use. By default, a preprocessing.StandardScaler piped with a linear_model.LinearRegression will be used.

    "},{"location":"api/time-series/SNARIMAX/#attributes","title":"Attributes","text":"
    • differencer (Differencer)

    • y_trues (collections.deque)

      The p past target values.

    • errors (collections.deque)

      The q past error values.

    "},{"location":"api/time-series/SNARIMAX/#examples","title":"Examples","text":"

    import datetime as dt\nfrom river import datasets\nfrom river import time_series\nfrom river import utils\n\nperiod = 12\nmodel = time_series.SNARIMAX(\n    p=period,\n    d=1,\n    q=period,\n    m=period,\n    sd=1\n)\n\nfor t, (x, y) in enumerate(datasets.AirlinePassengers()):\n    model = model.learn_one(y)\n\nhorizon = 12\nfuture = [\n    {'month': dt.date(year=1961, month=m, day=1)}\n    for m in range(1, horizon + 1)\n]\nforecast = model.forecast(horizon=horizon)\nfor x, y_pred in zip(future, forecast):\n    print(x['month'], f'{y_pred:.3f}')\n
    1961-01-01 494.542\n1961-02-01 450.825\n1961-03-01 484.972\n1961-04-01 576.401\n1961-05-01 559.489\n1961-06-01 612.251\n1961-07-01 722.410\n1961-08-01 674.604\n1961-09-01 575.716\n1961-10-01 562.808\n1961-11-01 477.049\n1961-12-01 515.191\n

    Classic ARIMA models learn solely on the time series values. You can also include features built at each step.

    import calendar\nimport math\nfrom river import compose\nfrom river import linear_model\nfrom river import optim\nfrom river import preprocessing\n\ndef get_month_distances(x):\n    return {\n        calendar.month_name[month]: math.exp(-(x['month'].month - month) ** 2)\n        for month in range(1, 13)\n    }\n\ndef get_ordinal_date(x):\n    return {'ordinal_date': x['month'].toordinal()}\n\nextract_features = compose.TransformerUnion(\n    get_ordinal_date,\n    get_month_distances\n)\n\nmodel = (\n    extract_features |\n    time_series.SNARIMAX(\n        p=1,\n        d=0,\n        q=0,\n        m=12,\n        sp=3,\n        sq=6,\n        regressor=(\n            preprocessing.StandardScaler() |\n            linear_model.LinearRegression(\n                intercept_init=110,\n                optimizer=optim.SGD(0.01),\n                intercept_lr=0.3\n            )\n        )\n    )\n)\n\nfor x, y in datasets.AirlinePassengers():\n    model = model.learn_one(x, y)\n\nforecast = model.forecast(horizon=horizon)\nfor x, y_pred in zip(future, forecast):\n    print(x['month'], f'{y_pred:.3f}')\n
    1961-01-01 444.821\n1961-02-01 432.612\n1961-03-01 457.739\n1961-04-01 465.544\n1961-05-01 476.575\n1961-06-01 516.255\n1961-07-01 565.405\n1961-08-01 572.470\n1961-09-01 512.645\n1961-10-01 475.919\n1961-11-01 438.033\n1961-12-01 456.892\n

    "},{"location":"api/time-series/SNARIMAX/#methods","title":"Methods","text":"forecast

    Makes forecast at each step of the given horizon.

    Parameters

    • horizon \u2014 'int'
    • xs \u2014 'list[dict] | None' \u2014 defaults to None

    learn_one

    Updates the model.

    Parameters

    • y \u2014 'float'
    • x \u2014 'dict | None' \u2014 defaults to None

    1. ARMA - Wikipedia \u21a9

    2. NARX - Wikipedia \u21a9

    3. ARIMA - Forecasting: Principles and Practice \u21a9

    4. Anava, O., Hazan, E., Mannor, S. and Shamir, O., 2013, June. Online learning for time series prediction. In Conference on learning theory (pp. 172-184) \u21a9

    "},{"location":"api/time-series/evaluate/","title":"evaluate","text":"

    Evaluates the performance of a forecaster on a time series dataset.

    To understand why this method is useful, it's important to understand the difference between nowcasting and forecasting. Nowcasting is about predicting a value at the next time step. This can be seen as a special case of regression, where the value to predict is the value at the next time step. In this case, the evaluate.progressive_val_score function may be used to evaluate a model via progressive validation.

    Forecasting models can also be evaluated via progressive validation. This is the purpose of this function. At each time step t, the forecaster is asked to predict the values at t + 1, t + 2, ..., t + horizon. The performance at each time step is measured and returned.

    "},{"location":"api/time-series/evaluate/#parameters","title":"Parameters","text":"
    • dataset

      Type \u2192 base.typing.Dataset

      A sequential time series.

    • model

      Type \u2192 time_series.base.Forecaster

      A forecaster.

    • metric

      Type \u2192 metrics.base.RegressionMetric

      A regression metric.

    • horizon

      Type \u2192 int

    • agg_func

      Type \u2192 typing.Callable[[list[float]], float] | None

      Default \u2192 None

    • grace_period

      Type \u2192 int | None

      Default \u2192 None

      Initial period during which the metric is not updated. This is to fairly evaluate models which need a warming up period to start producing meaningful forecasts. The value of this parameter is equal to the horizon by default.

    "},{"location":"api/time-series/iter-evaluate/","title":"iter_evaluate","text":"

    Evaluates the performance of a forecaster on a time series dataset and yields results.

    This does exactly the same as evaluate.progressive_val_score. The only difference is that this function returns an iterator, yielding results at every step. This can be useful if you want to have control over what you do with the results. For instance, you might want to plot the results.

    "},{"location":"api/time-series/iter-evaluate/#parameters","title":"Parameters","text":"
    • dataset

      Type \u2192 base.typing.Dataset

      A sequential time series.

    • model

      Type \u2192 time_series.base.Forecaster

      A forecaster.

    • metric

      Type \u2192 metrics.base.RegressionMetric

      A regression metric.

    • horizon

      Type \u2192 int

    • agg_func

      Type \u2192 typing.Callable[[list[float]], float] | None

      Default \u2192 None

    • grace_period

      Type \u2192 int | None

      Default \u2192 None

      Initial period during which the metric is not updated. This is to fairly evaluate models which need a warming up period to start producing meaningful forecasts. The value of this parameter is equal to the horizon by default.

    "},{"location":"api/time-series/base/Forecaster/","title":"Forecaster","text":""},{"location":"api/time-series/base/Forecaster/#methods","title":"Methods","text":"forecast

    Makes forecast at each step of the given horizon.

    Parameters

    • horizon \u2014 'int'
    • xs \u2014 'list[dict] | None' \u2014 defaults to None

    learn_one

    Updates the model.

    Parameters

    • y \u2014 'float'
    • x \u2014 'dict | None' \u2014 defaults to None

    "},{"location":"api/tree/ExtremelyFastDecisionTreeClassifier/","title":"ExtremelyFastDecisionTreeClassifier","text":"

    Extremely Fast Decision Tree classifier.

    Also referred to as Hoeffding AnyTime Tree (HATT) classifier.

    "},{"location":"api/tree/ExtremelyFastDecisionTreeClassifier/#parameters","title":"Parameters","text":"
    • grace_period

      Type \u2192 int

      Default \u2192 200

      Number of instances a leaf should observe between split attempts.

    • max_depth

      Type \u2192 int | None

      Default \u2192 None

      The maximum depth a tree can reach. If None, the tree will grow indefinitely.

    • min_samples_reevaluate

      Type \u2192 int

      Default \u2192 20

      Number of instances a node should observe before reevaluating the best split.

    • split_criterion

      Type \u2192 str

      Default \u2192 info_gain

      Split criterion to use. - 'gini' - Gini - 'info_gain' - Information Gain - 'hellinger' - Helinger Distance

    • delta

      Type \u2192 float

      Default \u2192 1e-07

      Significance level to calculate the Hoeffding bound. The significance level is given by 1 - delta. Values closer to zero imply longer split decision delays.

    • tau

      Type \u2192 float

      Default \u2192 0.05

      Threshold below which a split will be forced to break ties.

    • leaf_prediction

      Type \u2192 str

      Default \u2192 nba

      Prediction mechanism used at leafs. - 'mc' - Majority Class - 'nb' - Naive Bayes - 'nba' - Naive Bayes Adaptive

    • nb_threshold

      Type \u2192 int

      Default \u2192 0

      Number of instances a leaf should observe before allowing Naive Bayes.

    • nominal_attributes

      Type \u2192 list | None

      Default \u2192 None

      List of Nominal attributes identifiers. If empty, then assume that all numeric attributes should be treated as continuous.

    • splitter

      Type \u2192 Splitter | None

      Default \u2192 None

      The Splitter or Attribute Observer (AO) used to monitor the class statistics of numeric features and perform splits. Splitters are available in the tree.splitter module. Different splitters are available for classification and regression tasks. Classification and regression splitters can be distinguished by their property is_target_class. This is an advanced option. Special care must be taken when choosing different splitters. By default, tree.splitter.GaussianSplitter is used if splitter is None.

    • binary_split

      Type \u2192 bool

      Default \u2192 False

      If True, only allow binary splits.

    • min_branch_fraction

      Type \u2192 float

      Default \u2192 0.01

      The minimum percentage of observed data required for branches resulting from split candidates. To validate a split candidate, at least two resulting branches must have a percentage of samples greater than min_branch_fraction. This criterion prevents unnecessary splits when the majority of instances are concentrated in a single branch.

    • max_share_to_split

      Type \u2192 float

      Default \u2192 0.99

      Only perform a split in a leaf if the proportion of elements in the majority class is smaller than this parameter value. This parameter avoids performing splits when most of the data belongs to a single class.

    • max_size

      Type \u2192 float

      Default \u2192 100.0

      The max size of the tree, in Megabytes (MB).

    • memory_estimate_period

      Type \u2192 int

      Default \u2192 1000000

      Interval (number of processed instances) between memory consumption checks.

    • stop_mem_management

      Type \u2192 bool

      Default \u2192 False

      If True, stop growing as soon as memory limit is hit.

    • remove_poor_attrs

      Type \u2192 bool

      Default \u2192 False

      If True, disable poor attributes to reduce memory usage.

    • merit_preprune

      Type \u2192 bool

      Default \u2192 True

      If True, enable merit-based tree pre-pruning.

    "},{"location":"api/tree/ExtremelyFastDecisionTreeClassifier/#attributes","title":"Attributes","text":"
    • height

    • leaf_prediction

      Return the prediction strategy used by the tree at its leaves.

    • max_size

      Max allowed size tree can reach (in MB).

    • n_active_leaves

    • n_branches

    • n_inactive_leaves

    • n_leaves

    • n_nodes

    • split_criterion

      Return a string with the name of the split criterion being used by the tree.

    • summary

      Collect metrics corresponding to the current status of the tree in a string buffer.

    "},{"location":"api/tree/ExtremelyFastDecisionTreeClassifier/#examples","title":"Examples","text":"

    from river.datasets import synth\nfrom river import evaluate\nfrom river import metrics\nfrom river import tree\n\ngen = synth.Agrawal(classification_function=0, seed=42)\ndataset = iter(gen.take(1000))\n\nmodel = tree.ExtremelyFastDecisionTreeClassifier(\n    grace_period=100,\n    delta=1e-5,\n    nominal_attributes=['elevel', 'car', 'zipcode'],\n    min_samples_reevaluate=100\n)\n\nmetric = metrics.Accuracy()\n\nevaluate.progressive_val_score(dataset, model, metric)\n
    Accuracy: 87.29%\n

    "},{"location":"api/tree/ExtremelyFastDecisionTreeClassifier/#methods","title":"Methods","text":"debug_one

    Print an explanation of how x is predicted.

    Parameters

    • x \u2014 'dict'

    Returns

    str | None: A representation of the path followed by the tree to predict x; None if

    draw

    Draw the tree using the graphviz library.

    Since the tree is drawn without passing incoming samples, classification trees will show the majority class in their leaves, whereas regression trees will use the target mean.

    Parameters

    • max_depth \u2014 'int | None' \u2014 defaults to None The maximum depth a tree can reach. If None, the tree will grow indefinitely.

    learn_one

    Incrementally train the model

    Parameters

    • x
    • y
    • sample_weight \u2014 defaults to 1.0

    Returns

    self

    predict_one

    Predict the label of a set of features x.

    Parameters

    • x \u2014 'dict'
    • kwargs

    Returns

    base.typing.ClfTarget | None: The predicted label.

    predict_proba_one

    Predict the probability of each label for a dictionary of features x.

    Parameters

    • x

    Returns

    A dictionary that associates a probability which each label.

    to_dataframe

    Return a representation of the current tree structure organized in a pandas.DataFrame object.

    In case the tree is empty or it only contains a single node (a leaf), None is returned.

    Returns

    df

    "},{"location":"api/tree/ExtremelyFastDecisionTreeClassifier/#notes","title":"Notes","text":"

    The Extremely Fast Decision Tree (EFDT) 1 constructs a tree incrementally. The EFDT seeks to select and deploy a split as soon as it is confident the split is useful, and then revisits that decision, replacing the split if it subsequently becomes evident that a better split is available. The EFDT learns rapidly from a stationary distribution and eventually it learns the asymptotic batch tree if the distribution from which the data are drawn is stationary.

    1. C. Manapragada, G. Webb, and M. Salehi. Extremely Fast Decision Tree. In Proceedings of the 24th ACM SIGKDD International Conference on Knowledge Discovery & Data Mining (KDD '18). ACM, New York, NY, USA, 1953-1962. DOI: https://doi.org/10.1145/3219819.3220005\u00a0\u21a9

    "},{"location":"api/tree/HoeffdingAdaptiveTreeClassifier/","title":"HoeffdingAdaptiveTreeClassifier","text":"

    Hoeffding Adaptive Tree classifier.

    "},{"location":"api/tree/HoeffdingAdaptiveTreeClassifier/#parameters","title":"Parameters","text":"
    • grace_period

      Type \u2192 int

      Default \u2192 200

      Number of instances a leaf should observe between split attempts.

    • max_depth

      Type \u2192 int | None

      Default \u2192 None

      The maximum depth a tree can reach. If None, the tree will grow indefinitely.

    • split_criterion

      Type \u2192 str

      Default \u2192 info_gain

      Split criterion to use. - 'gini' - Gini - 'info_gain' - Information Gain - 'hellinger' - Helinger Distance

    • delta

      Type \u2192 float

      Default \u2192 1e-07

      Significance level to calculate the Hoeffding bound. The significance level is given by 1 - delta. Values closer to zero imply longer split decision delays.

    • tau

      Type \u2192 float

      Default \u2192 0.05

      Threshold below which a split will be forced to break ties.

    • leaf_prediction

      Type \u2192 str

      Default \u2192 nba

      Prediction mechanism used at leafs. - 'mc' - Majority Class - 'nb' - Naive Bayes - 'nba' - Naive Bayes Adaptive

    • nb_threshold

      Type \u2192 int

      Default \u2192 0

      Number of instances a leaf should observe before allowing Naive Bayes.

    • nominal_attributes

      Type \u2192 list | None

      Default \u2192 None

      List of Nominal attributes. If empty, then assume that all numeric attributes should be treated as continuous.

    • splitter

      Type \u2192 Splitter | None

      Default \u2192 None

      The Splitter or Attribute Observer (AO) used to monitor the class statistics of numeric features and perform splits. Splitters are available in the tree.splitter module. Different splitters are available for classification and regression tasks. Classification and regression splitters can be distinguished by their property is_target_class. This is an advanced option. Special care must be taken when choosing different splitters. By default, tree.splitter.GaussianSplitter is used if splitter is None.

    • bootstrap_sampling

      Type \u2192 bool

      Default \u2192 True

      If True, perform bootstrap sampling in the leaf nodes.

    • drift_window_threshold

      Type \u2192 int

      Default \u2192 300

      Minimum number of examples an alternate tree must observe before being considered as a potential replacement to the current one.

    • drift_detector

      Type \u2192 base.DriftDetector | None

      Default \u2192 None

      The drift detector used to build the tree. If None then drift.ADWIN is used.

    • switch_significance

      Type \u2192 float

      Default \u2192 0.05

      The significance level to assess whether alternate subtrees are significantly better than their main subtree counterparts.

    • binary_split

      Type \u2192 bool

      Default \u2192 False

      If True, only allow binary splits.

    • min_branch_fraction

      Type \u2192 float

      Default \u2192 0.01

      The minimum percentage of observed data required for branches resulting from split candidates. To validate a split candidate, at least two resulting branches must have a percentage of samples greater than min_branch_fraction. This criterion prevents unnecessary splits when the majority of instances are concentrated in a single branch.

    • max_share_to_split

      Type \u2192 float

      Default \u2192 0.99

      Only perform a split in a leaf if the proportion of elements in the majority class is smaller than this parameter value. This parameter avoids performing splits when most of the data belongs to a single class.

    • max_size

      Type \u2192 float

      Default \u2192 100.0

      The max size of the tree, in Megabytes (MB).

    • memory_estimate_period

      Type \u2192 int

      Default \u2192 1000000

      Interval (number of processed instances) between memory consumption checks.

    • stop_mem_management

      Type \u2192 bool

      Default \u2192 False

      If True, stop growing as soon as memory limit is hit.

    • remove_poor_attrs

      Type \u2192 bool

      Default \u2192 False

      If True, disable poor attributes to reduce memory usage.

    • merit_preprune

      Type \u2192 bool

      Default \u2192 True

      If True, enable merit-based tree pre-pruning.

    • seed

      Type \u2192 int | None

      Default \u2192 None

      Random seed for reproducibility.

    "},{"location":"api/tree/HoeffdingAdaptiveTreeClassifier/#attributes","title":"Attributes","text":"
    • height

    • leaf_prediction

      Return the prediction strategy used by the tree at its leaves.

    • max_size

      Max allowed size tree can reach (in MB).

    • n_active_leaves

    • n_alternate_trees

    • n_branches

    • n_inactive_leaves

    • n_leaves

    • n_nodes

    • n_pruned_alternate_trees

    • n_switch_alternate_trees

    • split_criterion

      Return a string with the name of the split criterion being used by the tree.

    • summary

      Collect metrics corresponding to the current status of the tree in a string buffer.

    "},{"location":"api/tree/HoeffdingAdaptiveTreeClassifier/#examples","title":"Examples","text":"

    from river.datasets import synth\nfrom river import evaluate\nfrom river import metrics\nfrom river import tree\n\ngen = synth.ConceptDriftStream(stream=synth.SEA(seed=42, variant=0),\n                               drift_stream=synth.SEA(seed=42, variant=1),\n                               seed=1, position=500, width=50)\ndataset = iter(gen.take(1000))\n\nmodel = tree.HoeffdingAdaptiveTreeClassifier(\n    grace_period=100,\n    delta=1e-5,\n    leaf_prediction='nb',\n    nb_threshold=10,\n    seed=0\n)\n\nmetric = metrics.Accuracy()\n\nevaluate.progressive_val_score(dataset, model, metric)\n
    Accuracy: 91.49%\n

    "},{"location":"api/tree/HoeffdingAdaptiveTreeClassifier/#methods","title":"Methods","text":"debug_one

    Print an explanation of how x is predicted.

    Parameters

    • x \u2014 'dict'

    Returns

    str | None: A representation of the path followed by the tree to predict x; None if

    draw

    Draw the tree using the graphviz library.

    Since the tree is drawn without passing incoming samples, classification trees will show the majority class in their leaves, whereas regression trees will use the target mean.

    Parameters

    • max_depth \u2014 'int | None' \u2014 defaults to None The maximum depth a tree can reach. If None, the tree will grow indefinitely.

    learn_one

    Train the model on instance x and corresponding target y.

    Parameters

    • x
    • y
    • sample_weight \u2014 defaults to 1.0

    Returns

    self

    predict_one

    Predict the label of a set of features x.

    Parameters

    • x \u2014 'dict'
    • kwargs

    Returns

    base.typing.ClfTarget | None: The predicted label.

    predict_proba_one

    Predict the probability of each label for a dictionary of features x.

    Parameters

    • x

    Returns

    A dictionary that associates a probability which each label.

    to_dataframe

    Return a representation of the current tree structure organized in a pandas.DataFrame object.

    In case the tree is empty or it only contains a single node (a leaf), None is returned.

    Returns

    df

    "},{"location":"api/tree/HoeffdingAdaptiveTreeClassifier/#notes","title":"Notes","text":"

    The Hoeffding Adaptive Tree 1 uses a drift detector to monitor performance of branches in the tree and to replace them with new branches when their accuracy decreases.

    The bootstrap sampling strategy is an improvement over the original Hoeffding Adaptive Tree algorithm. It is enabled by default since, in general, it results in better performance.

    1. Bifet, Albert, and Ricard Gavald\u00e0. \"Adaptive learning from evolving data streams.\" In International Symposium on Intelligent Data Analysis, pp. 249-260. Springer, Berlin, Heidelberg, 2009.\u00a0\u21a9

    "},{"location":"api/tree/HoeffdingAdaptiveTreeRegressor/","title":"HoeffdingAdaptiveTreeRegressor","text":"

    Hoeffding Adaptive Tree regressor (HATR).

    This class implements a regression version of the Hoeffding Adaptive Tree Classifier. Hence, it also uses an ADWIN concept-drift detector instance at each decision node to monitor possible changes in the data distribution. If a drift is detected in a node, an alternate tree begins to be induced in the background. When enough information is gathered, HATR swaps the node where the change was detected by its alternate tree.

    "},{"location":"api/tree/HoeffdingAdaptiveTreeRegressor/#parameters","title":"Parameters","text":"
    • grace_period

      Type \u2192 int

      Default \u2192 200

      Number of instances a leaf should observe between split attempts.

    • max_depth

      Type \u2192 int | None

      Default \u2192 None

      The maximum depth a tree can reach. If None, the tree will grow indefinitely.

    • delta

      Type \u2192 float

      Default \u2192 1e-07

      Significance level to calculate the Hoeffding bound. The significance level is given by 1 - delta. Values closer to zero imply longer split decision delays.

    • tau

      Type \u2192 float

      Default \u2192 0.05

      Threshold below which a split will be forced to break ties.

    • leaf_prediction

      Type \u2192 str

      Default \u2192 adaptive

      Prediction mechanism used at leafs. - 'mean' - Target mean - 'model' - Uses the model defined in leaf_model - 'adaptive' - Chooses between 'mean' and 'model' dynamically

    • leaf_model

      Type \u2192 base.Regressor | None

      Default \u2192 None

      The regression model used to provide responses if leaf_prediction='model'. If not provided an instance of linear_model.LinearRegression with the default hyperparameters is used.

    • model_selector_decay

      Type \u2192 float

      Default \u2192 0.95

      The exponential decaying factor applied to the learning models' squared errors, that are monitored if leaf_prediction='adaptive'. Must be between 0 and 1. The closer to 1, the more importance is going to be given to past observations. On the other hand, if its value approaches 0, the recent observed errors are going to have more influence on the final decision.

    • nominal_attributes

      Type \u2192 list | None

      Default \u2192 None

      List of Nominal attributes. If empty, then assume that all numeric attributes should be treated as continuous.

    • splitter

      Type \u2192 Splitter | None

      Default \u2192 None

      The Splitter or Attribute Observer (AO) used to monitor the class statistics of numeric features and perform splits. Splitters are available in the tree.splitter module. Different splitters are available for classification and regression tasks. Classification and regression splitters can be distinguished by their property is_target_class. This is an advanced option. Special care must be taken when choosing different splitters. By default, tree.splitter.TEBSTSplitter is used if splitter is None.

    • min_samples_split

      Type \u2192 int

      Default \u2192 5

      The minimum number of samples every branch resulting from a split candidate must have to be considered valid.

    • bootstrap_sampling

      Type \u2192 bool

      Default \u2192 True

      If True, perform bootstrap sampling in the leaf nodes.

    • drift_window_threshold

      Type \u2192 int

      Default \u2192 300

      Minimum number of examples an alternate tree must observe before being considered as a potential replacement to the current one.

    • drift_detector

      Type \u2192 base.DriftDetector | None

      Default \u2192 None

      The drift detector used to build the tree. If None then drift.ADWIN is used. Only detectors that support arbitrarily valued continuous data can be used for regression.

    • switch_significance

      Type \u2192 float

      Default \u2192 0.05

      The significance level to assess whether alternate subtrees are significantly better than their main subtree counterparts.

    • binary_split

      Type \u2192 bool

      Default \u2192 False

      If True, only allow binary splits.

    • max_size

      Type \u2192 float

      Default \u2192 500.0

      The max size of the tree, in Megabytes (MB).

    • memory_estimate_period

      Type \u2192 int

      Default \u2192 1000000

      Interval (number of processed instances) between memory consumption checks.

    • stop_mem_management

      Type \u2192 bool

      Default \u2192 False

      If True, stop growing as soon as memory limit is hit.

    • remove_poor_attrs

      Type \u2192 bool

      Default \u2192 False

      If True, disable poor attributes to reduce memory usage.

    • merit_preprune

      Type \u2192 bool

      Default \u2192 True

      If True, enable merit-based tree pre-pruning.

    • seed

      Type \u2192 int | None

      Default \u2192 None

      Random seed for reproducibility.

    "},{"location":"api/tree/HoeffdingAdaptiveTreeRegressor/#attributes","title":"Attributes","text":"
    • height

    • leaf_prediction

      Return the prediction strategy used by the tree at its leaves.

    • max_size

      Max allowed size tree can reach (in MB).

    • n_active_leaves

    • n_alternate_trees

    • n_branches

    • n_inactive_leaves

    • n_leaves

    • n_nodes

    • n_pruned_alternate_trees

    • n_switch_alternate_trees

    • split_criterion

      Return a string with the name of the split criterion being used by the tree.

    • summary

      Collect metrics corresponding to the current status of the tree in a string buffer.

    "},{"location":"api/tree/HoeffdingAdaptiveTreeRegressor/#examples","title":"Examples","text":"

    from river import datasets\nfrom river import evaluate\nfrom river import metrics\nfrom river import tree\nfrom river import preprocessing\n\ndataset = datasets.TrumpApproval()\n\nmodel = (\n    preprocessing.StandardScaler() |\n    tree.HoeffdingAdaptiveTreeRegressor(\n        grace_period=50,\n        model_selector_decay=0.3,\n        seed=0\n    )\n)\n\nmetric = metrics.MAE()\n\nevaluate.progressive_val_score(dataset, model, metric)\n
    MAE: 0.823026\n

    "},{"location":"api/tree/HoeffdingAdaptiveTreeRegressor/#methods","title":"Methods","text":"debug_one

    Print an explanation of how x is predicted.

    Parameters

    • x \u2014 'dict'

    Returns

    str | None: A representation of the path followed by the tree to predict x; None if

    draw

    Draw the tree using the graphviz library.

    Since the tree is drawn without passing incoming samples, classification trees will show the majority class in their leaves, whereas regression trees will use the target mean.

    Parameters

    • max_depth \u2014 'int | None' \u2014 defaults to None The maximum depth a tree can reach. If None, the tree will grow indefinitely.

    learn_one

    Train the tree model on sample x and corresponding target y.

    Parameters

    • x
    • y
    • sample_weight \u2014 defaults to 1.0

    Returns

    self

    predict_one

    Predict the target value using one of the leaf prediction strategies.

    Parameters

    • x

    Returns

    Predicted target value.

    to_dataframe

    Return a representation of the current tree structure organized in a pandas.DataFrame object.

    In case the tree is empty or it only contains a single node (a leaf), None is returned.

    Returns

    df

    "},{"location":"api/tree/HoeffdingAdaptiveTreeRegressor/#notes","title":"Notes","text":"

    The Hoeffding Adaptive Tree 1 uses drift detectors to monitor performance of branches in the tree and to replace them with new branches when their accuracy decreases.

    The bootstrap sampling strategy is an improvement over the original Hoeffding Adaptive Tree algorithm. It is enabled by default since, in general, it results in better performance.

    To cope with ADWIN's requirements of bounded input data, HATR uses a novel error normalization strategy based on the empiral rule of Gaussian distributions. We assume the deviations of the predictions from the expected values follow a normal distribution. Hence, we subject these errors to a min-max normalization assuming that most of the data lies in the \\(\\left[-3\\sigma, 3\\sigma\\right]\\) range. These normalized errors are passed to the ADWIN instances. This is the same strategy used by Adaptive Random Forest Regressor.

    1. Bifet, Albert, and Ricard Gavald\u00e0. \"Adaptive learning from evolving data streams.\" In International Symposium on Intelligent Data Analysis, pp. 249-260. Springer, Berlin, Heidelberg, 2009.\u00a0\u21a9

    "},{"location":"api/tree/HoeffdingTreeClassifier/","title":"HoeffdingTreeClassifier","text":"

    Hoeffding Tree or Very Fast Decision Tree classifier.

    "},{"location":"api/tree/HoeffdingTreeClassifier/#parameters","title":"Parameters","text":"
    • grace_period

      Type \u2192 int

      Default \u2192 200

      Number of instances a leaf should observe between split attempts.

    • max_depth

      Type \u2192 int | None

      Default \u2192 None

      The maximum depth a tree can reach. If None, the tree will grow indefinitely.

    • split_criterion

      Type \u2192 str

      Default \u2192 info_gain

      Split criterion to use. - 'gini' - Gini - 'info_gain' - Information Gain - 'hellinger' - Helinger Distance

    • delta

      Type \u2192 float

      Default \u2192 1e-07

      Significance level to calculate the Hoeffding bound. The significance level is given by 1 - delta. Values closer to zero imply longer split decision delays.

    • tau

      Type \u2192 float

      Default \u2192 0.05

      Threshold below which a split will be forced to break ties.

    • leaf_prediction

      Type \u2192 str

      Default \u2192 nba

      Prediction mechanism used at leafs. - 'mc' - Majority Class - 'nb' - Naive Bayes - 'nba' - Naive Bayes Adaptive

    • nb_threshold

      Type \u2192 int

      Default \u2192 0

      Number of instances a leaf should observe before allowing Naive Bayes.

    • nominal_attributes

      Type \u2192 list | None

      Default \u2192 None

      List of Nominal attributes identifiers. If empty, then assume that all numeric attributes should be treated as continuous.

    • splitter

      Type \u2192 Splitter | None

      Default \u2192 None

      The Splitter or Attribute Observer (AO) used to monitor the class statistics of numeric features and perform splits. Splitters are available in the tree.splitter module. Different splitters are available for classification and regression tasks. Classification and regression splitters can be distinguished by their property is_target_class. This is an advanced option. Special care must be taken when choosing different splitters. By default, tree.splitter.GaussianSplitter is used if splitter is None.

    • binary_split

      Type \u2192 bool

      Default \u2192 False

      If True, only allow binary splits.

    • min_branch_fraction

      Type \u2192 float

      Default \u2192 0.01

      The minimum percentage of observed data required for branches resulting from split candidates. To validate a split candidate, at least two resulting branches must have a percentage of samples greater than min_branch_fraction. This criterion prevents unnecessary splits when the majority of instances are concentrated in a single branch.

    • max_share_to_split

      Type \u2192 float

      Default \u2192 0.99

      Only perform a split in a leaf if the proportion of elements in the majority class is smaller than this parameter value. This parameter avoids performing splits when most of the data belongs to a single class.

    • max_size

      Type \u2192 float

      Default \u2192 100.0

      The max size of the tree, in Megabytes (MB).

    • memory_estimate_period

      Type \u2192 int

      Default \u2192 1000000

      Interval (number of processed instances) between memory consumption checks.

    • stop_mem_management

      Type \u2192 bool

      Default \u2192 False

      If True, stop growing as soon as memory limit is hit.

    • remove_poor_attrs

      Type \u2192 bool

      Default \u2192 False

      If True, disable poor attributes to reduce memory usage.

    • merit_preprune

      Type \u2192 bool

      Default \u2192 True

      If True, enable merit-based tree pre-pruning.

    "},{"location":"api/tree/HoeffdingTreeClassifier/#attributes","title":"Attributes","text":"
    • height

    • leaf_prediction

      Return the prediction strategy used by the tree at its leaves.

    • max_size

      Max allowed size tree can reach (in MB).

    • n_active_leaves

    • n_branches

    • n_inactive_leaves

    • n_leaves

    • n_nodes

    • split_criterion

      Return a string with the name of the split criterion being used by the tree.

    • summary

      Collect metrics corresponding to the current status of the tree in a string buffer.

    "},{"location":"api/tree/HoeffdingTreeClassifier/#examples","title":"Examples","text":"

    from river.datasets import synth\nfrom river import evaluate\nfrom river import metrics\nfrom river import tree\n\ngen = synth.Agrawal(classification_function=0, seed=42)\ndataset = iter(gen.take(1000))\n\nmodel = tree.HoeffdingTreeClassifier(\n    grace_period=100,\n    delta=1e-5,\n    nominal_attributes=['elevel', 'car', 'zipcode']\n)\n\nmetric = metrics.Accuracy()\n\nevaluate.progressive_val_score(dataset, model, metric)\n
    Accuracy: 84.58%\n

    "},{"location":"api/tree/HoeffdingTreeClassifier/#methods","title":"Methods","text":"debug_one

    Print an explanation of how x is predicted.

    Parameters

    • x \u2014 'dict'

    Returns

    str | None: A representation of the path followed by the tree to predict x; None if

    draw

    Draw the tree using the graphviz library.

    Since the tree is drawn without passing incoming samples, classification trees will show the majority class in their leaves, whereas regression trees will use the target mean.

    Parameters

    • max_depth \u2014 'int | None' \u2014 defaults to None The maximum depth a tree can reach. If None, the tree will grow indefinitely.

    learn_one

    Train the model on instance x and corresponding target y.

    Parameters

    • x
    • y
    • sample_weight \u2014 defaults to 1.0

    Returns

    self

    predict_one

    Predict the label of a set of features x.

    Parameters

    • x \u2014 'dict'
    • kwargs

    Returns

    base.typing.ClfTarget | None: The predicted label.

    predict_proba_one

    Predict the probability of each label for a dictionary of features x.

    Parameters

    • x

    Returns

    A dictionary that associates a probability which each label.

    to_dataframe

    Return a representation of the current tree structure organized in a pandas.DataFrame object.

    In case the tree is empty or it only contains a single node (a leaf), None is returned.

    Returns

    df

    "},{"location":"api/tree/HoeffdingTreeClassifier/#notes","title":"Notes","text":"

    A Hoeffding Tree 1 is an incremental, anytime decision tree induction algorithm that is capable of learning from massive data streams, assuming that the distribution generating examples does not change over time. Hoeffding trees exploit the fact that a small sample can often be enough to choose an optimal splitting attribute. This idea is supported mathematically by the Hoeffding bound, which quantifies the number of observations (in our case, examples) needed to estimate some statistics within a prescribed precision (in our case, the goodness of an attribute).

    A theoretically appealing feature of Hoeffding Trees not shared by other incremental decision tree learners is that it has sound guarantees of performance. Using the Hoeffding bound one can show that its output is asymptotically nearly identical to that of a non-incremental learner using infinitely many examples. Implementation based on MOA 2.

    1. G. Hulten, L. Spencer, and P. Domingos. Mining time-changing data streams. In KDD\u201901, pages 97\u2013106, San Francisco, CA, 2001. ACM Press.\u00a0\u21a9

    2. Albert Bifet, Geoff Holmes, Richard Kirkby, Bernhard Pfahringer. MOA: Massive Online Analysis; Journal of Machine Learning Research 11: 1601-1604, 2010.\u00a0\u21a9

    "},{"location":"api/tree/HoeffdingTreeRegressor/","title":"HoeffdingTreeRegressor","text":"

    Hoeffding Tree regressor.

    "},{"location":"api/tree/HoeffdingTreeRegressor/#parameters","title":"Parameters","text":"
    • grace_period

      Type \u2192 int

      Default \u2192 200

      Number of instances a leaf should observe between split attempts.

    • max_depth

      Type \u2192 int | None

      Default \u2192 None

      The maximum depth a tree can reach. If None, the tree will grow indefinitely.

    • delta

      Type \u2192 float

      Default \u2192 1e-07

      Significance level to calculate the Hoeffding bound. The significance level is given by 1 - delta. Values closer to zero imply longer split decision delays.

    • tau

      Type \u2192 float

      Default \u2192 0.05

      Threshold below which a split will be forced to break ties.

    • leaf_prediction

      Type \u2192 str

      Default \u2192 adaptive

      Prediction mechanism used at leafs. - 'mean' - Target mean - 'model' - Uses the model defined in leaf_model - 'adaptive' - Chooses between 'mean' and 'model' dynamically

    • leaf_model

      Type \u2192 base.Regressor | None

      Default \u2192 None

      The regression model used to provide responses if leaf_prediction='model'. If not provided an instance of linear_model.LinearRegression with the default hyperparameters is used.

    • model_selector_decay

      Type \u2192 float

      Default \u2192 0.95

      The exponential decaying factor applied to the learning models' squared errors, that are monitored if leaf_prediction='adaptive'. Must be between 0 and 1. The closer to 1, the more importance is going to be given to past observations. On the other hand, if its value approaches 0, the recent observed errors are going to have more influence on the final decision.

    • nominal_attributes

      Type \u2192 list | None

      Default \u2192 None

      List of Nominal attributes identifiers. If empty, then assume that all numeric attributes should be treated as continuous.

    • splitter

      Type \u2192 Splitter | None

      Default \u2192 None

      The Splitter or Attribute Observer (AO) used to monitor the class statistics of numeric features and perform splits. Splitters are available in the tree.splitter module. Different splitters are available for classification and regression tasks. Classification and regression splitters can be distinguished by their property is_target_class. This is an advanced option. Special care must be taken when choosing different splitters. By default, tree.splitter.TEBSTSplitter is used if splitter is None.

    • min_samples_split

      Type \u2192 int

      Default \u2192 5

      The minimum number of samples every branch resulting from a split candidate must have to be considered valid.

    • binary_split

      Type \u2192 bool

      Default \u2192 False

      If True, only allow binary splits.

    • max_size

      Type \u2192 float

      Default \u2192 500.0

      The max size of the tree, in Megabytes (MB).

    • memory_estimate_period

      Type \u2192 int

      Default \u2192 1000000

      Interval (number of processed instances) between memory consumption checks.

    • stop_mem_management

      Type \u2192 bool

      Default \u2192 False

      If True, stop growing as soon as memory limit is hit.

    • remove_poor_attrs

      Type \u2192 bool

      Default \u2192 False

      If True, disable poor attributes to reduce memory usage.

    • merit_preprune

      Type \u2192 bool

      Default \u2192 True

      If True, enable merit-based tree pre-pruning.

    "},{"location":"api/tree/HoeffdingTreeRegressor/#attributes","title":"Attributes","text":"
    • height

    • leaf_prediction

      Return the prediction strategy used by the tree at its leaves.

    • max_size

      Max allowed size tree can reach (in MB).

    • n_active_leaves

    • n_branches

    • n_inactive_leaves

    • n_leaves

    • n_nodes

    • split_criterion

      Return a string with the name of the split criterion being used by the tree.

    • summary

      Collect metrics corresponding to the current status of the tree in a string buffer.

    "},{"location":"api/tree/HoeffdingTreeRegressor/#examples","title":"Examples","text":"

    from river import datasets\nfrom river import evaluate\nfrom river import metrics\nfrom river import tree\nfrom river import preprocessing\n\ndataset = datasets.TrumpApproval()\n\nmodel = (\n    preprocessing.StandardScaler() |\n    tree.HoeffdingTreeRegressor(\n        grace_period=100,\n        model_selector_decay=0.9\n    )\n)\n\nmetric = metrics.MAE()\n\nevaluate.progressive_val_score(dataset, model, metric)\n
    MAE: 0.793345\n

    "},{"location":"api/tree/HoeffdingTreeRegressor/#methods","title":"Methods","text":"debug_one

    Print an explanation of how x is predicted.

    Parameters

    • x \u2014 'dict'

    Returns

    str | None: A representation of the path followed by the tree to predict x; None if

    draw

    Draw the tree using the graphviz library.

    Since the tree is drawn without passing incoming samples, classification trees will show the majority class in their leaves, whereas regression trees will use the target mean.

    Parameters

    • max_depth \u2014 'int | None' \u2014 defaults to None The maximum depth a tree can reach. If None, the tree will grow indefinitely.

    learn_one

    Train the tree model on sample x and corresponding target y.

    Parameters

    • x
    • y
    • sample_weight \u2014 defaults to 1.0

    Returns

    self

    predict_one

    Predict the target value using one of the leaf prediction strategies.

    Parameters

    • x

    Returns

    Predicted target value.

    to_dataframe

    Return a representation of the current tree structure organized in a pandas.DataFrame object.

    In case the tree is empty or it only contains a single node (a leaf), None is returned.

    Returns

    df

    "},{"location":"api/tree/HoeffdingTreeRegressor/#notes","title":"Notes","text":"

    The Hoeffding Tree Regressor (HTR) is an adaptation of the incremental tree algorithm of the same name for classification. Similarly to its classification counterpart, HTR uses the Hoeffding bound to control its split decisions. Differently from the classification algorithm, HTR relies on calculating the reduction of variance in the target space to decide among the split candidates. The smallest the variance at its leaf nodes, the more homogeneous the partitions are. At its leaf nodes, HTR fits either linear models or uses the target average as the predictor.

    "},{"location":"api/tree/SGTClassifier/","title":"SGTClassifier","text":"

    Stochastic Gradient Tree1 for binary classification.

    Binary decision tree classifier that minimizes the binary cross-entropy to guide its growth.

    Stochastic Gradient Trees (SGT) directly minimize a loss function to guide tree growth and update their predictions. Thus, they differ from other incrementally tree learners that do not directly optimize the loss, but data impurity-related heuristics.

    "},{"location":"api/tree/SGTClassifier/#parameters","title":"Parameters","text":"
    • delta

      Type \u2192 float

      Default \u2192 1e-07

      Define the significance level of the F-tests performed to decide upon creating splits or updating predictions.

    • grace_period

      Type \u2192 int

      Default \u2192 200

      Interval between split attempts or prediction updates.

    • init_pred

      Type \u2192 float

      Default \u2192 0.0

      Initial value predicted by the tree.

    • max_depth

      Type \u2192 int | None

      Default \u2192 None

      The maximum depth the tree might reach. If set to None, the trees will grow indefinitely.

    • lambda_value

      Type \u2192 float

      Default \u2192 0.1

      Positive float value used to impose a penalty over the tree's predictions and force them to become smaller. The greater the lambda value, the more constrained are the predictions.

    • gamma

      Type \u2192 float

      Default \u2192 1.0

      Positive float value used to impose a penalty over the tree's splits and force them to be avoided when possible. The greater the gamma value, the smaller the chance of a split occurring.

    • nominal_attributes

      Type \u2192 list | None

      Default \u2192 None

      List with identifiers of the nominal attributes. If None, all features containing numbers are assumed to be numeric.

    • feature_quantizer

      Type \u2192 tree.splitter.Quantizer | None

      Default \u2192 None

      The algorithm used to quantize numeric features. Either a static quantizer (as in the original implementation) or a dynamic quantizer can be used. The correct choice and setup of the feature quantizer is a crucial step to determine the performance of SGTs. Feature quantizers are akin to the attribute observers used in Hoeffding Trees. By default, an instance of tree.splitter.StaticQuantizer (with default parameters) is used if this parameter is not set.

    "},{"location":"api/tree/SGTClassifier/#attributes","title":"Attributes","text":"
    • height

    • n_branches

    • n_leaves

    • n_node_updates

    • n_nodes

    • n_observations

    • n_splits

    "},{"location":"api/tree/SGTClassifier/#examples","title":"Examples","text":"

    from river import datasets\nfrom river import evaluate\nfrom river import metrics\nfrom river import tree\n\ndataset = datasets.Phishing()\nmodel = tree.SGTClassifier(\n    feature_quantizer=tree.splitter.StaticQuantizer(\n        n_bins=32, warm_start=10\n    )\n)\nmetric = metrics.Accuracy()\n\nevaluate.progressive_val_score(dataset, model, metric)\n
    Accuracy: 82.24%\n

    "},{"location":"api/tree/SGTClassifier/#methods","title":"Methods","text":"learn_one

    Update the model with a set of features x and a label y.

    Parameters

    • x \u2014 'dict'
    • y \u2014 'base.typing.ClfTarget'
    • w \u2014 defaults to 1.0

    Returns

    Classifier: self

    predict_one

    Predict the label of a set of features x.

    Parameters

    • x \u2014 'dict'
    • kwargs

    Returns

    base.typing.ClfTarget | None: The predicted label.

    predict_proba_one

    Predict the probability of each label for a dictionary of features x.

    Parameters

    • x \u2014 'dict'

    Returns

    dict[base.typing.ClfTarget, float]: A dictionary that associates a probability which each label.

    1. Gouk, H., Pfahringer, B., & Frank, E. (2019, October). Stochastic Gradient Trees. In Asian Conference on Machine Learning (pp. 1094-1109).\u00a0\u21a9

    "},{"location":"api/tree/SGTRegressor/","title":"SGTRegressor","text":"

    Stochastic Gradient Tree for regression.

    Incremental decision tree regressor that minimizes the mean square error to guide its growth.

    Stochastic Gradient Trees (SGT) directly minimize a loss function to guide tree growth and update their predictions. Thus, they differ from other incrementally tree learners that do not directly optimize the loss, but a data impurity-related heuristic.

    "},{"location":"api/tree/SGTRegressor/#parameters","title":"Parameters","text":"
    • delta

      Type \u2192 float

      Default \u2192 1e-07

      Define the significance level of the F-tests performed to decide upon creating splits or updating predictions.

    • grace_period

      Type \u2192 int

      Default \u2192 200

      Interval between split attempts or prediction updates.

    • init_pred

      Type \u2192 float

      Default \u2192 0.0

      Initial value predicted by the tree.

    • max_depth

      Type \u2192 int | None

      Default \u2192 None

      The maximum depth the tree might reach. If set to None, the trees will grow indefinitely.

    • lambda_value

      Type \u2192 float

      Default \u2192 0.1

      Positive float value used to impose a penalty over the tree's predictions and force them to become smaller. The greater the lambda value, the more constrained are the predictions.

    • gamma

      Type \u2192 float

      Default \u2192 1.0

      Positive float value used to impose a penalty over the tree's splits and force them to be avoided when possible. The greater the gamma value, the smaller the chance of a split occurring.

    • nominal_attributes

      Type \u2192 list | None

      Default \u2192 None

      List with identifiers of the nominal attributes. If None, all features containing numbers are assumed to be numeric.

    • feature_quantizer

      Type \u2192 tree.splitter.Quantizer | None

      Default \u2192 None

      The algorithm used to quantize numeric features. Either a static quantizer (as in the original implementation) or a dynamic quantizer can be used. The correct choice and setup of the feature quantizer is a crucial step to determine the performance of SGTs. Feature quantizers are akin to the attribute observers used in Hoeffding Trees. By default, an instance of tree.splitter.StaticQuantizer (with default parameters) is used if this parameter is not set.

    "},{"location":"api/tree/SGTRegressor/#attributes","title":"Attributes","text":"
    • height

    • n_branches

    • n_leaves

    • n_node_updates

    • n_nodes

    • n_observations

    • n_splits

    "},{"location":"api/tree/SGTRegressor/#examples","title":"Examples","text":"

    from river import datasets\nfrom river import evaluate\nfrom river import metrics\nfrom river import tree\n\ndataset = datasets.TrumpApproval()\nmodel = tree.SGTRegressor(\n    delta=0.01,\n    lambda_value=0.01,\n    grace_period=20,\n    feature_quantizer=tree.splitter.DynamicQuantizer(std_prop=0.1)\n)\nmetric = metrics.MAE()\n\nevaluate.progressive_val_score(dataset, model, metric)\n
    MAE: 1.721818\n

    "},{"location":"api/tree/SGTRegressor/#methods","title":"Methods","text":"learn_one

    Fits to a set of features x and a real-valued target y.

    Parameters

    • x \u2014 'dict'
    • y \u2014 'base.typing.RegTarget'
    • w \u2014 defaults to 1.0

    Returns

    Regressor: self

    predict_one

    Predict the output of features x.

    Parameters

    • x \u2014 'dict'

    Returns

    base.typing.RegTarget: The prediction.

    "},{"location":"api/tree/SGTRegressor/#notes","title":"Notes","text":"

    This implementation enhances the original proposal 1 by using an incremental strategy to discretize numerical features dynamically, rather than relying on a calibration set and parameterized number of bins. The strategy used is an adaptation of the Quantization Observer (QO) 2. Different bin size setting policies are available for selection. They directly related to number of split candidates the tree is going to explore, and thus, how accurate its split decisions are going to be. Besides, the number of stored bins per feature is directly related to the tree's memory usage and runtime.

    1. Gouk, H., Pfahringer, B., & Frank, E. (2019, October). Stochastic Gradient Trees. In Asian Conference on Machine Learning (pp. 1094-1109).\u00a0\u21a9

    2. Mastelini, S.M. and de Leon Ferreira, A.C.P., 2021. Using dynamical quantization to perform split attempts in online tree regressors. Pattern Recognition Letters.\u00a0\u21a9

    "},{"location":"api/tree/iSOUPTreeRegressor/","title":"iSOUPTreeRegressor","text":"

    Incremental Structured Output Prediction Tree (iSOUP-Tree) for multi-target regression.

    This is an implementation of the iSOUP-Tree proposed by A. Osojnik, P. Panov, and S. D\u017eeroski 1.

    "},{"location":"api/tree/iSOUPTreeRegressor/#parameters","title":"Parameters","text":"
    • grace_period

      Type \u2192 int

      Default \u2192 200

      Number of instances a leaf should observe between split attempts.

    • max_depth

      Type \u2192 int | None

      Default \u2192 None

      The maximum depth a tree can reach. If None, the tree will grow indefinitely.

    • delta

      Type \u2192 float

      Default \u2192 1e-07

      Allowed error in split decision, a value closer to 0 takes longer to decide.

    • tau

      Type \u2192 float

      Default \u2192 0.05

      Threshold below which a split will be forced to break ties.

    • leaf_prediction

      Type \u2192 str

      Default \u2192 adaptive

      Prediction mechanism used at leafs. - 'mean' - Target mean - 'model' - Uses the model defined in leaf_model - 'adaptive' - Chooses between 'mean' and 'model' dynamically

    • leaf_model

      Type \u2192 base.Regressor | dict | None

      Default \u2192 None

      The regression model(s) used to provide responses if leaf_prediction='model'. It can be either a regressor (in which case it is going to be replicated to all the targets) or a dictionary whose keys are target identifiers, and the values are instances of base.Regressor.If not provided, instances of [linear_model.LinearRegression`](../../linear-model/LinearRegression) with the default hyperparameters are used for all the targets. If a dictionary is passed and not all target models are specified, copies from the first model match in the dictionary will be used to the remaining targets.

    • model_selector_decay

      Type \u2192 float

      Default \u2192 0.95

      The exponential decaying factor applied to the learning models' squared errors, that are monitored if leaf_prediction='adaptive'. Must be between 0 and 1. The closer to 1, the more importance is going to be given to past observations. On the other hand, if its value approaches 0, the recent observed errors are going to have more influence on the final decision.

    • nominal_attributes

      Type \u2192 list | None

      Default \u2192 None

      List of Nominal attributes identifiers. If empty, then assume that all numeric attributes should be treated as continuous.

    • splitter

      Type \u2192 Splitter | None

      Default \u2192 None

      The Splitter or Attribute Observer (AO) used to monitor the class statistics of numeric features and perform splits. Splitters are available in the tree.splitter module. Different splitters are available for classification and regression tasks. Classification and regression splitters can be distinguished by their property is_target_class. This is an advanced option. Special care must be taken when choosing different splitters. By default, tree.splitter.TEBSTSplitter is used if splitter is None.

    • min_samples_split

      Type \u2192 int

      Default \u2192 5

      The minimum number of samples every branch resulting from a split candidate must have to be considered valid.

    • binary_split

      Type \u2192 bool

      Default \u2192 False

      If True, only allow binary splits.

    • max_size

      Type \u2192 float

      Default \u2192 500.0

      The max size of the tree, in Megabytes (MB).

    • memory_estimate_period

      Type \u2192 int

      Default \u2192 1000000

      Interval (number of processed instances) between memory consumption checks.

    • stop_mem_management

      Type \u2192 bool

      Default \u2192 False

      If True, stop growing as soon as memory limit is hit.

    • remove_poor_attrs

      Type \u2192 bool

      Default \u2192 False

      If True, disable poor attributes to reduce memory usage.

    • merit_preprune

      Type \u2192 bool

      Default \u2192 True

      If True, enable merit-based tree pre-pruning.

    "},{"location":"api/tree/iSOUPTreeRegressor/#attributes","title":"Attributes","text":"
    • height

    • leaf_prediction

      Return the prediction strategy used by the tree at its leaves.

    • max_size

      Max allowed size tree can reach (in MB).

    • n_active_leaves

    • n_branches

    • n_inactive_leaves

    • n_leaves

    • n_nodes

    • split_criterion

      Return a string with the name of the split criterion being used by the tree.

    • summary

      Collect metrics corresponding to the current status of the tree in a string buffer.

    "},{"location":"api/tree/iSOUPTreeRegressor/#examples","title":"Examples","text":"

    import numbers\nfrom river import compose\nfrom river import datasets\nfrom river import evaluate\nfrom river import linear_model\nfrom river import metrics\nfrom river import preprocessing\nfrom river import tree\n\ndataset = datasets.SolarFlare()\n\nnum = compose.SelectType(numbers.Number) | preprocessing.MinMaxScaler()\ncat = compose.SelectType(str) | preprocessing.OneHotEncoder()\n\nmodel = tree.iSOUPTreeRegressor(\n    grace_period=100,\n    leaf_prediction='model',\n    leaf_model={\n        'c-class-flares': linear_model.LinearRegression(l2=0.02),\n        'm-class-flares': linear_model.PARegressor(),\n        'x-class-flares': linear_model.LinearRegression(l2=0.1)\n    }\n)\n\npipeline = (num + cat) | model\nmetric = metrics.multioutput.MicroAverage(metrics.MAE())\n\nevaluate.progressive_val_score(dataset, pipeline, metric)\n
    MicroAverage(MAE): 0.426177\n

    "},{"location":"api/tree/iSOUPTreeRegressor/#methods","title":"Methods","text":"debug_one

    Print an explanation of how x is predicted.

    Parameters

    • x \u2014 'dict'

    Returns

    str | None: A representation of the path followed by the tree to predict x; None if

    draw

    Draw the tree using the graphviz library.

    Since the tree is drawn without passing incoming samples, classification trees will show the majority class in their leaves, whereas regression trees will use the target mean.

    Parameters

    • max_depth \u2014 'int | None' \u2014 defaults to None The maximum depth a tree can reach. If None, the tree will grow indefinitely.

    learn_one

    Incrementally train the model with one sample.

    Training tasks: * If the tree is empty, create a leaf node as the root. * If the tree is already initialized, find the corresponding leaf for the instance and update the leaf node statistics. * If growth is allowed and the number of instances that the leaf has observed between split attempts exceed the grace period then attempt to split.

    Parameters

    • x
    • y
    • sample_weight \u2014 'float' \u2014 defaults to 1.0

    predict_one

    Predict the target value using one of the leaf prediction strategies.

    Parameters

    • x

    Returns

    Predicted target value.

    to_dataframe

    Return a representation of the current tree structure organized in a pandas.DataFrame object.

    In case the tree is empty or it only contains a single node (a leaf), None is returned.

    Returns

    df

    1. Alja\u017e Osojnik, Pan\u010de Panov, and Sa\u0161o D\u017eeroski. \"Tree-based methods for online multi-target regression.\" Journal of Intelligent Information Systems 50.2 (2018): 315-339.\u00a0\u21a9

    "},{"location":"api/tree/base/Branch/","title":"Branch","text":"

    A generic tree branch.

    "},{"location":"api/tree/base/Branch/#parameters","title":"Parameters","text":"
    • children

      Child branches and/or leaves.

    "},{"location":"api/tree/base/Branch/#attributes","title":"Attributes","text":"
    • height

      Distance to the deepest descendant.

    • n_branches

      Number of branches, including thyself.

    • n_leaves

      Number of leaves.

    • n_nodes

      Number of descendants, including thyself.

    • repr_split

      String representation of the split.

    "},{"location":"api/tree/base/Branch/#methods","title":"Methods","text":"iter_bfs

    Iterate over nodes in breadth-first order.

    iter_branches

    Iterate over branches in depth-first order.

    iter_dfs

    Iterate over nodes in depth-first order.

    iter_edges

    Iterate over edges in depth-first order.

    iter_leaves

    Iterate over leaves from the left-most one to the right-most one.

    most_common_path

    Return a tuple with the branch index and the child node related to the most traversed path.

    Used in case the split feature is missing from an instance.

    next

    Move to the next node down the tree.

    Parameters

    • x

    to_dataframe

    Build a DataFrame containing one record for each node.

    traverse

    Return the leaf corresponding to the given input.

    Parameters

    • x
    • until_leaf \u2014 defaults to True

    walk

    Iterate over the nodes of the path induced by x.

    Parameters

    • x
    • until_leaf \u2014 defaults to True

    "},{"location":"api/tree/base/Leaf/","title":"Leaf","text":"

    A generic tree node.

    "},{"location":"api/tree/base/Leaf/#parameters","title":"Parameters","text":"
    • kwargs

      Each provided keyword argument is stored in the leaf as an attribute.

    "},{"location":"api/tree/base/Leaf/#attributes","title":"Attributes","text":"
    • height

    • n_branches

    • n_leaves

    • n_nodes

    "},{"location":"api/tree/base/Leaf/#methods","title":"Methods","text":"iter_branches iter_dfs iter_edges iter_leaves walk"},{"location":"api/tree/splitter/DynamicQuantizer/","title":"DynamicQuantizer","text":"

    Adapted version of the Quantizer Observer (QO)1 that is applied to Stochastic Gradient Trees (SGT).

    This feature quantizer starts by partitioning the inputs using the passed radius value. As more splits are created in the SGTs, new feature quantizers will use std * std_prop as the quantization radius. In the expression, std represents the standard deviation of the input data, which is calculated incrementally.

    "},{"location":"api/tree/splitter/DynamicQuantizer/#parameters","title":"Parameters","text":"
    • radius

      Type \u2192 float

      Default \u2192 0.5

      The initial quantization radius.

    • std_prop

      Type \u2192 float

      Default \u2192 0.25

      The proportion of the standard deviation that is going to be used to define the radius value for new quantizer instances following the initial one.

    "},{"location":"api/tree/splitter/DynamicQuantizer/#methods","title":"Methods","text":"update
    1. Mastelini, S.M. and de Leon Ferreira, A.C.P., 2021. Using dynamical quantization to perform split attempts in online tree regressors. Pattern Recognition Letters.\u00a0\u21a9

    "},{"location":"api/tree/splitter/EBSTSplitter/","title":"EBSTSplitter","text":"

    iSOUP-Tree's Extended Binary Search Tree (E-BST).

    This class implements the Extended Binary Search Tree1 (E-BST) structure, using the variant employed by Osojnik et al.2 in the iSOUP-Tree algorithm. This structure is employed to observe the target space distribution.

    Proposed along with Fast Incremental Model Tree with Drift Detection1 (FIMT-DD), E-BST was the first attribute observer (AO) proposed for incremental Hoeffding Tree regressors. This AO works by storing all observations between splits in an extended binary search tree structure. E-BST stores the input feature realizations and statistics of the target(s) that enable calculating the split heuristic at any time. To alleviate time and memory costs, E-BST implements a memory management routine, where the worst split candidates are pruned from the binary tree.

    In this variant, only the left branch statistics are stored and the complete split-enabling statistics are calculated with an in-order traversal of the binary search tree.

    "},{"location":"api/tree/splitter/EBSTSplitter/#attributes","title":"Attributes","text":"
    • is_numeric

      Determine whether or not the splitter works with numerical features.

    • is_target_class

      Check on which kind of learning task the splitter is designed to work. If True, the splitter works with classification trees, otherwise it is designed for regression trees.

    "},{"location":"api/tree/splitter/EBSTSplitter/#methods","title":"Methods","text":"best_evaluated_split_suggestion

    Get the best split suggestion given a criterion and the target's statistics.

    Parameters

    • criterion \u2014 'SplitCriterion'
    • pre_split_dist \u2014 'list | dict'
    • att_idx \u2014 'base.typing.FeatureName'
    • binary_only \u2014 'bool' \u2014 defaults to True

    Returns

    BranchFactory: Suggestion of the best attribute split.

    cond_proba

    Not implemented in regression splitters.

    Parameters

    • att_val
    • target_val \u2014 'base.typing.ClfTarget'

    remove_bad_splits

    Remove bad splits.

    Based on FIMT-DD's 1 procedure to remove bad split candidates from the E-BST. This mechanism is triggered every time a split attempt fails. The rationale is to remove points whose split merit is much worse than the best candidate overall (for which the growth decision already failed). Let \\(m_1\\) be the merit of the best split point and \\(m_2\\) be the merit of the second best split candidate. The ratio \\(r = m_2/m_1\\) along with the Hoeffding bound (\\(\\epsilon\\)) are used to decide upon creating a split. A split occurs when \\(r < 1 - \\epsilon\\). A split candidate, with merit \\(m_i\\), is considered badr if \\(m_i / m_1 < r - 2\\epsilon\\). The rationale is the following: if the merit ratio for this point is smaller than the lower bound of \\(r\\), then the true merit of that split relative to the best one is small. Hence, this candidate can be safely removed. To avoid excessive and costly manipulations of the E-BST to update the stored statistics, only the nodes whose children are all bad split points are pruned, as defined in 1.

    Parameters

    • criterion
    • last_check_ratio \u2014 'float'
    • last_check_vr \u2014 'float'
    • last_check_e \u2014 'float'
    • pre_split_dist \u2014 'list | dict'

    update

    Update statistics of this observer given an attribute value, its target value and the weight of the instance observed.

    Parameters

    • att_val
    • target_val \u2014 'base.typing.Target'
    • sample_weight \u2014 'float'

    1. Ikonomovska, E., Gama, J., & D\u017eeroski, S. (2011). Learning model trees from evolving data streams. Data mining and knowledge discovery, 23(1), 128-168.\u00a0\u21a9\u21a9\u21a9\u21a9

    2. Osojnik, Alja\u017e. 2017. Structured output prediction on Data Streams (Doctoral Dissertation) \u21a9

    "},{"location":"api/tree/splitter/ExhaustiveSplitter/","title":"ExhaustiveSplitter","text":"

    Numeric attribute observer for classification tasks that is based on a Binary Search Tree.

    This algorithm1 is also referred to as exhaustive attribute observer, since it ends up storing all the observations between split attempts2.

    This splitter cannot perform probability density estimations, so it does not work well when coupled with tree leaves using naive bayes models.

    "},{"location":"api/tree/splitter/ExhaustiveSplitter/#attributes","title":"Attributes","text":"
    • is_numeric

      Determine whether or not the splitter works with numerical features.

    • is_target_class

      Check on which kind of learning task the splitter is designed to work. If True, the splitter works with classification trees, otherwise it is designed for regression trees.

    "},{"location":"api/tree/splitter/ExhaustiveSplitter/#methods","title":"Methods","text":"best_evaluated_split_suggestion

    Get the best split suggestion given a criterion and the target's statistics.

    Parameters

    • criterion \u2014 'SplitCriterion'
    • pre_split_dist \u2014 'list | dict'
    • att_idx \u2014 'base.typing.FeatureName'
    • binary_only \u2014 'bool'

    Returns

    BranchFactory: Suggestion of the best attribute split.

    cond_proba

    The underlying data structure used to monitor the input does not allow probability density estimations. Hence, it always returns zero for any given input.

    Parameters

    • att_val
    • target_val \u2014 'base.typing.ClfTarget'

    update

    Update statistics of this observer given an attribute value, its target value and the weight of the instance observed.

    Parameters

    • att_val
    • target_val \u2014 'base.typing.Target'
    • sample_weight \u2014 'float'

    1. Domingos, P. and Hulten, G., 2000, August. Mining high-speed data streams. In Proceedings of the sixth ACM SIGKDD international conference on Knowledge discovery and data mining (pp. 71-80).\u00a0\u21a9

    2. Pfahringer, B., Holmes, G. and Kirkby, R., 2008, May. Handling numeric attributes in hoeffding trees. In Pacific-Asia Conference on Knowledge Discovery and Data Mining (pp. 296-307). Springer, Berlin, Heidelberg.\u00a0\u21a9

    "},{"location":"api/tree/splitter/GaussianSplitter/","title":"GaussianSplitter","text":"

    Numeric attribute observer for classification tasks that is based on Gaussian estimators.

    The distribution of each class is approximated using a Gaussian distribution. Hence, the probability density function can be easily calculated.

    "},{"location":"api/tree/splitter/GaussianSplitter/#parameters","title":"Parameters","text":"
    • n_splits

      Type \u2192 int

      Default \u2192 10

      The number of partitions to consider when querying for split candidates.

    "},{"location":"api/tree/splitter/GaussianSplitter/#attributes","title":"Attributes","text":"
    • is_numeric

      Determine whether or not the splitter works with numerical features.

    • is_target_class

      Check on which kind of learning task the splitter is designed to work. If True, the splitter works with classification trees, otherwise it is designed for regression trees.

    "},{"location":"api/tree/splitter/GaussianSplitter/#methods","title":"Methods","text":"best_evaluated_split_suggestion

    Get the best split suggestion given a criterion and the target's statistics.

    Parameters

    • criterion \u2014 'SplitCriterion'
    • pre_split_dist \u2014 'list | dict'
    • att_idx \u2014 'base.typing.FeatureName'
    • binary_only \u2014 'bool'

    Returns

    BranchFactory: Suggestion of the best attribute split.

    cond_proba

    Get the probability for an attribute value given a class.

    Parameters

    • att_val
    • target_val \u2014 'base.typing.ClfTarget'

    Returns

    float: Probability for an attribute value given a class.

    update

    Update statistics of this observer given an attribute value, its target value and the weight of the instance observed.

    Parameters

    • att_val
    • target_val \u2014 'base.typing.Target'
    • sample_weight \u2014 'float'

    "},{"location":"api/tree/splitter/HistogramSplitter/","title":"HistogramSplitter","text":"

    Numeric attribute observer for classification tasks that discretizes features using histograms.

    "},{"location":"api/tree/splitter/HistogramSplitter/#parameters","title":"Parameters","text":"
    • n_bins

      Type \u2192 int

      Default \u2192 256

      The maximum number of bins in the histogram.

    • n_splits

      Type \u2192 int

      Default \u2192 32

      The number of split points to evaluate when querying for the best split candidate.

    "},{"location":"api/tree/splitter/HistogramSplitter/#attributes","title":"Attributes","text":"
    • is_numeric

      Determine whether or not the splitter works with numerical features.

    • is_target_class

      Check on which kind of learning task the splitter is designed to work. If True, the splitter works with classification trees, otherwise it is designed for regression trees.

    "},{"location":"api/tree/splitter/HistogramSplitter/#methods","title":"Methods","text":"best_evaluated_split_suggestion

    Get the best split suggestion given a criterion and the target's statistics.

    Parameters

    • criterion \u2014 'SplitCriterion'
    • pre_split_dist \u2014 'list | dict'
    • att_idx \u2014 'base.typing.FeatureName'
    • binary_only \u2014 'bool'

    Returns

    BranchFactory: Suggestion of the best attribute split.

    cond_proba

    Get the probability for an attribute value given a class.

    Parameters

    • att_val
    • target_val \u2014 'base.typing.ClfTarget'

    Returns

    float: Probability for an attribute value given a class.

    update

    Update statistics of this observer given an attribute value, its target value and the weight of the instance observed.

    Parameters

    • att_val
    • target_val \u2014 'base.typing.Target'
    • sample_weight \u2014 'float'

    "},{"location":"api/tree/splitter/QOSplitter/","title":"QOSplitter","text":"

    Quantization observer (QO).

    This splitter utilizes a hash-based quantization algorithm to keep track of the target statistics and evaluate split candidates. QO, relies on the radius parameter to define discretization intervals for each incoming feature. Split candidates are defined as the midpoints between two consecutive hash slots. Both binary splits and multi-way splits can be created by this attribute observer. This class implements the algorithm described in 1.

    The smaller the quantization radius, the more hash slots will be created to accommodate the discretized data. Hence, both the running time and memory consumption increase, but the resulting splits ought to be closer to the ones obtained by a batch exhaustive approach. On the other hand, if the radius is too large, fewer slots will be created, less memory and running time will be required, but at the cost of coarse split suggestions.

    QO assumes that all features have the same range. It is always advised to scale the features to apply this splitter. That can be done using the preprocessing module. A good \"rule of thumb\" is to scale data using preprocessing.StandardScaler and define the radius as a proportion of the features' standard deviation. For instance, the default radius value would correspond to one quarter of the normalized features' standard deviation (since the scaled data has zero mean and unit variance). If the features come from normal distributions, by following the empirical rule, roughly 32 hash slots will be created.

    "},{"location":"api/tree/splitter/QOSplitter/#parameters","title":"Parameters","text":"
    • radius

      Type \u2192 float

      Default \u2192 0.25

      The quantization radius. QO discretizes the incoming feature in intervals of equal length that are defined by this parameter.

    • allow_multiway_splits

      Default \u2192 False

      Whether or not allow that multiway splits are evaluated. Numeric multi-way splits use the same quantization strategy of QO to create multiple tree branches. The same quantization radius is used, and each stored slot represents the split enabling statistics of one branch.

    "},{"location":"api/tree/splitter/QOSplitter/#attributes","title":"Attributes","text":"
    • is_numeric

      Determine whether or not the splitter works with numerical features.

    • is_target_class

      Check on which kind of learning task the splitter is designed to work. If True, the splitter works with classification trees, otherwise it is designed for regression trees.

    "},{"location":"api/tree/splitter/QOSplitter/#methods","title":"Methods","text":"best_evaluated_split_suggestion

    Get the best split suggestion given a criterion and the target's statistics.

    Parameters

    • criterion \u2014 'SplitCriterion'
    • pre_split_dist \u2014 'list | dict'
    • att_idx \u2014 'base.typing.FeatureName'
    • binary_only \u2014 'bool' \u2014 defaults to True

    Returns

    BranchFactory: Suggestion of the best attribute split.

    cond_proba

    Get the probability for an attribute value given a class.

    Parameters

    • att_val
    • target_val \u2014 'base.typing.ClfTarget'

    Returns

    float: Probability for an attribute value given a class.

    update

    Update statistics of this observer given an attribute value, its target value and the weight of the instance observed.

    Parameters

    • att_val
    • target_val \u2014 'base.typing.Target'
    • sample_weight \u2014 'float'

    1. Mastelini, S.M. and de Leon Ferreira, A.C.P., 2021. Using dynamical quantization to perform split attempts in online tree regressors. Pattern Recognition Letters.\u00a0\u21a9

    "},{"location":"api/tree/splitter/Quantizer/","title":"Quantizer","text":"

    Base class for the feature quantizers used in Stochastic Gradient Trees1.

    "},{"location":"api/tree/splitter/Quantizer/#methods","title":"Methods","text":"update
    1. Gouk, H., Pfahringer, B., & Frank, E. (2019, October). Stochastic Gradient Trees. In Asian Conference on Machine Learning (pp. 1094-1109).\u00a0\u21a9

    "},{"location":"api/tree/splitter/Splitter/","title":"Splitter","text":"

    Base class for the tree splitters.

    Each Attribute Observer (AO) or Splitter monitors one input feature and finds the best split point for this attribute. AOs can also perform other tasks related to the monitored feature, such as estimating its probability density function (classification case).

    This class should not be instantiated, as none of its methods are implemented.

    "},{"location":"api/tree/splitter/Splitter/#attributes","title":"Attributes","text":"
    • is_numeric

      Determine whether or not the splitter works with numerical features.

    • is_target_class

      Check on which kind of learning task the splitter is designed to work. If True, the splitter works with classification trees, otherwise it is designed for regression trees.

    "},{"location":"api/tree/splitter/Splitter/#methods","title":"Methods","text":"best_evaluated_split_suggestion

    Get the best split suggestion given a criterion and the target's statistics.

    Parameters

    • criterion \u2014 'SplitCriterion'
    • pre_split_dist \u2014 'list | dict'
    • att_idx \u2014 'base.typing.FeatureName'
    • binary_only \u2014 'bool'

    Returns

    BranchFactory: Suggestion of the best attribute split.

    cond_proba

    Get the probability for an attribute value given a class.

    Parameters

    • att_val
    • target_val \u2014 'base.typing.ClfTarget'

    Returns

    float: Probability for an attribute value given a class.

    update

    Update statistics of this observer given an attribute value, its target value and the weight of the instance observed.

    Parameters

    • att_val
    • target_val \u2014 'base.typing.Target'
    • sample_weight \u2014 'float'

    "},{"location":"api/tree/splitter/StaticQuantizer/","title":"StaticQuantizer","text":"

    Quantization strategy originally used in Stochastic Gradient Trees (SGT)1.

    Firstly, a buffer of size warm_start is stored. The data stored in the buffer is then used to quantize the input feature into n_bins intervals. These intervals will be replicated to every new quantizer. Feature values lying outside of the limits defined by the initial buffer will be mapped to the head or tail of the list of intervals.

    "},{"location":"api/tree/splitter/StaticQuantizer/#parameters","title":"Parameters","text":"
    • n_bins

      Type \u2192 int

      Default \u2192 64

      The number of bins (intervals) to divide the input feature.

    • warm_start

      Type \u2192 int

      Default \u2192 100

      The number of observations used to initialize the quantization intervals.

    • buckets

      Type \u2192 list | None

      Default \u2192 None

      This parameter is only used internally by the quantizer, so it must not be set. Once the intervals are defined, new instances of this quantizer will receive the quantization information via this parameter.

    "},{"location":"api/tree/splitter/StaticQuantizer/#methods","title":"Methods","text":"update
    1. Gouk, H., Pfahringer, B., & Frank, E. (2019, October). Stochastic Gradient Trees. In Asian Conference on Machine Learning (pp. 1094-1109).\u00a0\u21a9

    "},{"location":"api/tree/splitter/TEBSTSplitter/","title":"TEBSTSplitter","text":"

    Truncated E-BST.

    Variation of E-BST that rounds the incoming feature values before passing them to the binary search tree (BST). By doing so, the attribute observer might reduce its processing time and memory usage since small variations in the input values will end up being mapped to the same BST node.

    "},{"location":"api/tree/splitter/TEBSTSplitter/#parameters","title":"Parameters","text":"
    • digits

      Type \u2192 int

      Default \u2192 1

      The number of decimal places used to round the input feature values.

    "},{"location":"api/tree/splitter/TEBSTSplitter/#attributes","title":"Attributes","text":"
    • is_numeric

      Determine whether or not the splitter works with numerical features.

    • is_target_class

      Check on which kind of learning task the splitter is designed to work. If True, the splitter works with classification trees, otherwise it is designed for regression trees.

    "},{"location":"api/tree/splitter/TEBSTSplitter/#methods","title":"Methods","text":"best_evaluated_split_suggestion

    Get the best split suggestion given a criterion and the target's statistics.

    Parameters

    • criterion \u2014 'SplitCriterion'
    • pre_split_dist \u2014 'list | dict'
    • att_idx \u2014 'base.typing.FeatureName'
    • binary_only \u2014 'bool' \u2014 defaults to True

    Returns

    BranchFactory: Suggestion of the best attribute split.

    cond_proba

    Not implemented in regression splitters.

    Parameters

    • att_val
    • target_val \u2014 'base.typing.ClfTarget'

    remove_bad_splits

    Remove bad splits.

    Based on FIMT-DD's [^1] procedure to remove bad split candidates from the E-BST. This mechanism is triggered every time a split attempt fails. The rationale is to remove points whose split merit is much worse than the best candidate overall (for which the growth decision already failed). Let \\(m_1\\) be the merit of the best split point and \\(m_2\\) be the merit of the second best split candidate. The ratio \\(r = m_2/m_1\\) along with the Hoeffding bound (\\(\\epsilon\\)) are used to decide upon creating a split. A split occurs when \\(r < 1 - \\epsilon\\). A split candidate, with merit \\(m_i\\), is considered badr if \\(m_i / m_1 < r - 2\\epsilon\\). The rationale is the following: if the merit ratio for this point is smaller than the lower bound of \\(r\\), then the true merit of that split relative to the best one is small. Hence, this candidate can be safely removed. To avoid excessive and costly manipulations of the E-BST to update the stored statistics, only the nodes whose children are all bad split points are pruned, as defined in [^1].

    Parameters

    • criterion
    • last_check_ratio \u2014 'float'
    • last_check_vr \u2014 'float'
    • last_check_e \u2014 'float'
    • pre_split_dist \u2014 'list | dict'

    update

    Update statistics of this observer given an attribute value, its target value and the weight of the instance observed.

    Parameters

    • att_val
    • target_val \u2014 'base.typing.Target'
    • sample_weight \u2014 'float'

    "},{"location":"api/utils/Rolling/","title":"Rolling","text":"

    A generic wrapper for performing rolling computations.

    This can be wrapped around any object which implements both an update and a revert method. Inputs to update are stored in a queue. Elements of the queue are popped when the window is full.

    "},{"location":"api/utils/Rolling/#parameters","title":"Parameters","text":"
    • obj

      Type \u2192 Rollable

      An object that implements both an update method and a rollingmethod.

    • window_size

      Type \u2192 int

      Size of the window.

    "},{"location":"api/utils/Rolling/#attributes","title":"Attributes","text":"
    • window_size
    "},{"location":"api/utils/Rolling/#examples","title":"Examples","text":"

    For instance, here is how you can compute a rolling average over a window of size 3:

    from river import stats, utils\n\nX = [1, 3, 5, 7]\nrmean = utils.Rolling(stats.Mean(), window_size=3)\n\nfor x in X:\n    print(rmean.update(x).get())\n
    1.0\n2.0\n3.0\n5.0\n

    "},{"location":"api/utils/Rolling/#methods","title":"Methods","text":"update"},{"location":"api/utils/SortedWindow/","title":"SortedWindow","text":"

    Sorted running window data structure.

    "},{"location":"api/utils/SortedWindow/#parameters","title":"Parameters","text":"
    • size

      Type \u2192 int

      Size of the window to compute the rolling quantile.

    "},{"location":"api/utils/SortedWindow/#attributes","title":"Attributes","text":"
    • size
    "},{"location":"api/utils/SortedWindow/#examples","title":"Examples","text":"

    from river import utils\n\nwindow = utils.SortedWindow(size=3)\n\nfor i in reversed(range(9)):\n    print(window.append(i))\n
    [8]\n[7, 8]\n[6, 7, 8]\n[5, 6, 7]\n[4, 5, 6]\n[3, 4, 5]\n[2, 3, 4]\n[1, 2, 3]\n[0, 1, 2]\n

    "},{"location":"api/utils/SortedWindow/#methods","title":"Methods","text":"
    1. Left sorted inserts in Python \u21a9

    "},{"location":"api/utils/TimeRolling/","title":"TimeRolling","text":"

    A generic wrapper for performing time rolling computations.

    This can be wrapped around any object which implements both an update and a revert method. Inputs to update are stored in a queue. Elements of the queue are popped when they are too old.

    "},{"location":"api/utils/TimeRolling/#parameters","title":"Parameters","text":"
    • obj

      Type \u2192 Rollable

      An object that implements both an update method and a rollingmethod.

    • period

      Type \u2192 dt.timedelta

      A duration of time, expressed as a datetime.timedelta.

    "},{"location":"api/utils/TimeRolling/#examples","title":"Examples","text":"

    For instance, here is how you can compute a rolling average over a period of 3 days:

    from river import stats, utils\n\nX = {\n    dt.datetime(2019, 1, 1): 1,\n    dt.datetime(2019, 1, 2): 5,\n    dt.datetime(2019, 1, 3): 9,\n    dt.datetime(2019, 1, 4): 13\n}\n\nrmean = utils.TimeRolling(stats.Mean(), period=dt.timedelta(days=3))\nfor t, x in X.items():\n    print(rmean.update(x, t=t).get())\n
    1.0\n3.0\n5.0\n9.0\n

    "},{"location":"api/utils/TimeRolling/#methods","title":"Methods","text":"update"},{"location":"api/utils/VectorDict/","title":"VectorDict","text":""},{"location":"api/utils/VectorDict/#methods","title":"Methods","text":"abs clear get

    Parameters

    • key
    • args
    • kwargs

    items

    keys

    max

    maximum

    Parameters

    • other

    min

    minimum

    Parameters

    • other

    pop

    Parameters

    • args
    • kwargs

    popitem

    setdefault

    Parameters

    • key
    • args
    • kwargs

    to_dict

    to_numpy

    Parameters

    • fields

    update

    Parameters

    • args
    • kwargs

    values

    with_mask

    Parameters

    • mask
    • copy \u2014 defaults to False

    "},{"location":"api/utils/dict2numpy/","title":"dict2numpy","text":"

    Convert a dictionary containing data to a numpy array.

    There is not restriction to the type of keys in data, but values must be strictly numeric. To make sure random permutations of the features do not impact on the learning algorithms, keys are first converted to strings and then sorted prior to the conversion.

    "},{"location":"api/utils/dict2numpy/#parameters","title":"Parameters","text":"
    • data

      A dictionary whose keys represent input attributes and the values represent their observed contents.

    "},{"location":"api/utils/dict2numpy/#examples","title":"Examples","text":"

    from river.utils import dict2numpy\ndict2numpy({'a': 1, 'b': 2, 3: 3})\n
    array([3, 1, 2])\n

    "},{"location":"api/utils/expand-param-grid/","title":"expand_param_grid","text":"

    Expands a grid of parameters.

    This method can be used to generate a list of model parametrizations from a dictionary where each parameter is associated with a list of possible parameters. In other words, it expands a grid of parameters.

    Typically, this method can be used to create copies of a given model with different parameter choices. The models can then be used as part of a model selection process, such as a selection.SuccessiveHalvingClassifier or a selection.EWARegressor.

    The syntax for the parameter grid is quite flexible. It allows nesting parameters and can therefore be used to generate parameters for a pipeline.

    "},{"location":"api/utils/expand-param-grid/#parameters","title":"Parameters","text":"
    • model

      Type \u2192 base.Estimator

    • grid

      Type \u2192 dict

      The grid of parameters to expand. The provided dictionary can be nested. The only requirement is that the values at the leaves need to be lists.

    "},{"location":"api/utils/expand-param-grid/#examples","title":"Examples","text":"

    As an initial example, we can expand a grid of parameters for a single model.

    from river import linear_model\nfrom river import optim\nfrom river import utils\n\nmodel = linear_model.LinearRegression()\n\ngrid = {'optimizer': [optim.SGD(.1), optim.SGD(.01), optim.SGD(.001)]}\nmodels = utils.expand_param_grid(model, grid)\nlen(models)\n
    3\n

    models[0]\n
    LinearRegression (\n  optimizer=SGD (\n    lr=Constant (\n      learning_rate=0.1\n    )\n  )\n  loss=Squared ()\n  l2=0.\n  l1=0.\n  intercept_init=0.\n  intercept_lr=Constant (\n    learning_rate=0.01\n  )\n  clip_gradient=1e+12\n  initializer=Zeros ()\n)\n

    You can expand parameters for multiple choices like so:

    grid = {\n    'optimizer': [\n        (optim.SGD, {'lr': [.1, .01, .001]}),\n        (optim.Adam, {'lr': [.1, .01, .01]})\n    ]\n}\nmodels = utils.expand_param_grid(model, grid)\nlen(models)\n
    6\n

    You may specify a grid of parameters for a pipeline via nesting:

    from river import feature_extraction\n\nmodel = (\n    feature_extraction.BagOfWords() |\n    linear_model.LinearRegression()\n)\n\ngrid = {\n    'BagOfWords': {\n        'strip_accents': [False, True]\n    },\n    'LinearRegression': {\n        'optimizer': [\n            (optim.SGD, {'lr': [.1, .01]}),\n            (optim.Adam, {'lr': [.1, .01]})\n        ]\n    }\n}\n\nmodels = utils.expand_param_grid(model, grid)\nlen(models)\n
    8\n

    "},{"location":"api/utils/log-method-calls/","title":"log_method_calls","text":"

    A context manager to log method calls.

    All method calls will be logged by default. This behavior can be overriden by passing filtering functions.

    "},{"location":"api/utils/log-method-calls/#parameters","title":"Parameters","text":"
    • class_condition

      Type \u2192 typing.Callable[[typing.Any], bool] | None

      Default \u2192 None

      A function which determines if a class should be logged or not.

    • method_condition

      Type \u2192 typing.Callable[[typing.Any], bool] | None

      Default \u2192 None

      A function which determines if a method should be logged or not.

    "},{"location":"api/utils/log-method-calls/#examples","title":"Examples","text":"

    import io\nimport logging\nfrom river import anomaly\nfrom river import compose\nfrom river import datasets\nfrom river import preprocessing\nfrom river import utils\n\nmodel = compose.Pipeline(\n    preprocessing.MinMaxScaler(),\n    anomaly.HalfSpaceTrees(seed=42)\n)\n\nclass_condition = lambda x: x.__class__.__name__ in ('MinMaxScaler', 'HalfSpaceTrees')\n\nlogger = logging.getLogger()\nlogger.setLevel(logging.DEBUG)\n\nlogs = io.StringIO()\nsh = logging.StreamHandler(logs)\nsh.setLevel(logging.DEBUG)\nlogger.addHandler(sh)\n\nwith utils.log_method_calls(class_condition):\n    for x, y in datasets.CreditCard().take(1):\n        score = model.score_one(x)\n        model = model.learn_one(x)\n\nprint(logs.getvalue())\n
    MinMaxScaler.transform_one\nHalfSpaceTrees.score_one\nMinMaxScaler.learn_one\nMinMaxScaler.transform_one\nHalfSpaceTrees.learn_one\n

    logs.close()\n
    "},{"location":"api/utils/numpy2dict/","title":"numpy2dict","text":"

    Convert a numpy array to a dictionary.

    "},{"location":"api/utils/numpy2dict/#parameters","title":"Parameters","text":"
    • data

      Type \u2192 np.ndarray

      An one-dimensional numpy.array.

    "},{"location":"api/utils/numpy2dict/#examples","title":"Examples","text":"

    import numpy as np\nfrom river.utils import numpy2dict\nnumpy2dict(np.array([1.0, 2.0, 3.0]))\n
    {0: 1.0, 1: 2.0, 2: 3.0}\n

    "},{"location":"api/utils/math/argmax/","title":"argmax","text":"

    Argmax function.

    "},{"location":"api/utils/math/argmax/#parameters","title":"Parameters","text":"
    • lst

      Type \u2192 list

    "},{"location":"api/utils/math/chain-dot/","title":"chain_dot","text":"

    Returns the dot product of multiple vectors represented as dicts.

    "},{"location":"api/utils/math/chain-dot/#parameters","title":"Parameters","text":"
    • xs
    "},{"location":"api/utils/math/chain-dot/#examples","title":"Examples","text":"

    from river import utils\n\nx = {'x0': 1, 'x1': 2, 'x2': 1}\ny = {'x1': 21, 'x2': 3}\nz = {'x1': 2, 'x2': 1 / 3}\n\nutils.math.chain_dot(x, y, z)\n
    85.0\n

    "},{"location":"api/utils/math/clamp/","title":"clamp","text":"

    Clamp a number.

    This is a synonym of clipping.

    "},{"location":"api/utils/math/clamp/#parameters","title":"Parameters","text":"
    • x

      Type \u2192 float

    • minimum

      Default \u2192 0.0

    • maximum

      Default \u2192 1.0

    "},{"location":"api/utils/math/dot/","title":"dot","text":"

    Returns the dot product of two vectors represented as dicts.

    "},{"location":"api/utils/math/dot/#parameters","title":"Parameters","text":"
    • x

      Type \u2192 dict

    • y

      Type \u2192 dict

    "},{"location":"api/utils/math/dot/#examples","title":"Examples","text":"

    from river import utils\n\nx = {'x0': 1, 'x1': 2}\ny = {'x1': 21, 'x2': 3}\n\nutils.math.dot(x, y)\n
    42\n

    "},{"location":"api/utils/math/dotvecmat/","title":"dotvecmat","text":"

    Vector times matrix from left side, i.e. transpose(x)A.

    "},{"location":"api/utils/math/dotvecmat/#parameters","title":"Parameters","text":"
    • x

    • A

    "},{"location":"api/utils/math/dotvecmat/#examples","title":"Examples","text":"

    from river import utils\n\nx = {0: 4, 1: 5}\n\nA = {\n    (0, 0): 0, (0, 1): 1,\n    (1, 0): 2, (1, 1): 3\n}\n\nC = utils.math.dotvecmat(x, A)\nprint(C)\n
    {0: 10.0, 1: 19.0}\n

    "},{"location":"api/utils/math/log-sum-2-exp/","title":"log_sum_2_exp","text":"

    Computation of log( (e^a + e^b) / 2) in an overflow-proof way

    "},{"location":"api/utils/math/log-sum-2-exp/#parameters","title":"Parameters","text":"
    • a

      Type \u2192 float

      First number

    • b

      Type \u2192 float

      Second number

    "},{"location":"api/utils/math/matmul2d/","title":"matmul2d","text":"

    Multiplication for 2D matrices.

    "},{"location":"api/utils/math/matmul2d/#parameters","title":"Parameters","text":"
    • A

    • B

    "},{"location":"api/utils/math/matmul2d/#examples","title":"Examples","text":"

    import pprint\nfrom river import utils\n\nA = {\n    (0, 0): 2, (0, 1): 0, (0, 2): 4,\n    (1, 0): 5, (1, 1): 6, (1, 2): 0\n}\n\nB = {\n    (0, 0): 1, (0, 1): 1, (0, 2): 0, (0, 3): 0,\n    (1, 0): 2, (1, 1): 0, (1, 2): 1, (1, 3): 3,\n    (2, 0): 4, (2, 1): 0, (2, 2): 0, (2, 3): 0\n}\n\nC = utils.math.matmul2d(A, B)\npprint.pprint(C)\n
    {(0, 0): 18.0,\n    (0, 1): 2.0,\n    (0, 2): 0.0,\n    (0, 3): 0.0,\n    (1, 0): 17.0,\n    (1, 1): 5.0,\n    (1, 2): 6.0,\n    (1, 3): 18.0}\n

    "},{"location":"api/utils/math/minkowski-distance/","title":"minkowski_distance","text":"

    Minkowski distance.

    "},{"location":"api/utils/math/minkowski-distance/#parameters","title":"Parameters","text":"
    • a

      Type \u2192 dict

    • b

      Type \u2192 dict

    • p

      Type \u2192 int

      Parameter for the Minkowski distance. When p=1, this is equivalent to using the Manhattan distance. When p=2, this is equivalent to using the Euclidean distance.

    "},{"location":"api/utils/math/norm/","title":"norm","text":"

    Compute the norm of a dictionaries values.

    "},{"location":"api/utils/math/norm/#parameters","title":"Parameters","text":"
    • x

      Type \u2192 dict

    • order

      Default \u2192 None

    "},{"location":"api/utils/math/outer/","title":"outer","text":"

    Outer-product between two vectors.

    "},{"location":"api/utils/math/outer/#parameters","title":"Parameters","text":"
    • u

      Type \u2192 dict

    • v

      Type \u2192 dict

    "},{"location":"api/utils/math/outer/#examples","title":"Examples","text":"

    import pprint\nfrom river import utils\n\nu = dict(enumerate((1, 2, 3)))\nv = dict(enumerate((2, 4, 8)))\n\nuTv = utils.math.outer(u, v)\npprint.pprint(uTv)\n
    {(0, 0): 2,\n    (0, 1): 4,\n    (0, 2): 8,\n    (1, 0): 4,\n    (1, 1): 8,\n    (1, 2): 16,\n    (2, 0): 6,\n    (2, 1): 12,\n    (2, 2): 24}\n

    "},{"location":"api/utils/math/prod/","title":"prod","text":"

    Product function.

    "},{"location":"api/utils/math/prod/#parameters","title":"Parameters","text":"
    • iterable
    "},{"location":"api/utils/math/sherman-morrison/","title":"sherman_morrison","text":"

    Sherman-Morrison formula.

    This is an inplace function.

    "},{"location":"api/utils/math/sherman-morrison/#parameters","title":"Parameters","text":"
    • A

      Type \u2192 np.ndarray

    • u

      Type \u2192 np.ndarray

    • v

      Type \u2192 np.ndarray

    1. Fast rank-one updates to matrix inverse? \u2014 Tim Vieira \u21a9

    "},{"location":"api/utils/math/sigmoid/","title":"sigmoid","text":"

    Sigmoid function.

    "},{"location":"api/utils/math/sigmoid/#parameters","title":"Parameters","text":"
    • x

      Type \u2192 float

    "},{"location":"api/utils/math/sign/","title":"sign","text":"

    Sign function.

    "},{"location":"api/utils/math/sign/#parameters","title":"Parameters","text":"
    • x

      Type \u2192 float

    "},{"location":"api/utils/math/softmax/","title":"softmax","text":"

    Normalizes a dictionary of predicted probabilities, in-place.

    "},{"location":"api/utils/math/softmax/#parameters","title":"Parameters","text":"
    • y_pred

      Type \u2192 dict

    "},{"location":"api/utils/math/woodbury-matrix/","title":"woodbury_matrix","text":"

    Woodbury matrix identity.

    This is an inplace function.

    "},{"location":"api/utils/math/woodbury-matrix/#parameters","title":"Parameters","text":"
    • A

      Type \u2192 np.ndarray

    • U

      Type \u2192 np.ndarray

    • V

      Type \u2192 np.ndarray

    1. Matrix inverse mini-batch updates \u2014 Max Halford \u21a9

    "},{"location":"api/utils/norm/normalize-values-in-dict/","title":"normalize_values_in_dict","text":"

    Normalize the values in a dictionary using the given factor.

    For each element in the dictionary, applies value/factor.

    "},{"location":"api/utils/norm/normalize-values-in-dict/#parameters","title":"Parameters","text":"
    • dictionary

      Dictionary to normalize.

    • factor

      Default \u2192 None

      Normalization factor value. If not set, use the sum of values.

    • inplace

      Default \u2192 True

      If True, perform operation in-place

    • raise_error

      Default \u2192 False

      In case the normalization factor is either 0 or None: - True: raise an error. - False: return gracefully (if inplace=False, a copy of) dictionary.

    "},{"location":"api/utils/norm/scale-values-in-dict/","title":"scale_values_in_dict","text":"

    Scale the values in a dictionary.

    For each element in the dictionary, applies value * multiplier.

    "},{"location":"api/utils/norm/scale-values-in-dict/#parameters","title":"Parameters","text":"
    • dictionary

      Dictionary to scale.

    • multiplier

      Scaling value.

    • inplace

      Default \u2192 True

      If True, perform operation in-place

    "},{"location":"api/utils/pretty/humanize-bytes/","title":"humanize_bytes","text":"

    Returns a human-friendly byte size.

    "},{"location":"api/utils/pretty/humanize-bytes/#parameters","title":"Parameters","text":"
    • n_bytes

      Type \u2192 int

    "},{"location":"api/utils/pretty/print-table/","title":"print_table","text":"

    Pretty-prints a table.

    "},{"location":"api/utils/pretty/print-table/#parameters","title":"Parameters","text":"
    • headers

      Type \u2192 list[str]

      The column names.

    • columns

      Type \u2192 list[list[str]]

      The column values.

    • order

      Type \u2192 list[int] | None

      Default \u2192 None

      Order in which to print the column the values. Defaults to the order in which the values are given.

    "},{"location":"api/utils/random/exponential/","title":"exponential","text":"

    Sample a random value from a Poisson distribution.

    "},{"location":"api/utils/random/exponential/#parameters","title":"Parameters","text":"
    • rate

      Type \u2192 float

      Default \u2192 1.0

    • rng

      Default \u2192 <module 'random' from '/opt/hostedtoolcache/Python/3.11.5/x64/lib/python3.11/random.py'>

    1. Wikipedia article \u21a9

    "},{"location":"api/utils/random/poisson/","title":"poisson","text":"

    Sample a random value from a Poisson distribution.

    "},{"location":"api/utils/random/poisson/#parameters","title":"Parameters","text":"
    • rate

      Type \u2192 float

    • rng

      Default \u2192 <module 'random' from '/opt/hostedtoolcache/Python/3.11.5/x64/lib/python3.11/random.py'>

    [^1] Wikipedia article

    "},{"location":"benchmarks/Binary%20classification/","title":"Binary classification","text":"TableChart Model Dataset Accuracy F1 Memory in Mb Time in s ADWIN Bagging Bananas 0.625967 0.448218 0.416215 147.786 ADWIN Bagging Elec2 0.823285 0.777237 0.733135 1359.96 ADWIN Bagging Phishing 0.893515 0.879201 1.34233 106.016 ADWIN Bagging SMTP 0.999748 0.368421 1.09872 1278.41 ALMA Bananas 0.506415 0.482595 0.0029211 11.8441 ALMA Elec2 0.906402 0.889756 0.00435829 129.979 ALMA Phishing 0.8264 0.811795 0.0045805 4.33824 ALMA SMTP 0.764971 0.00178548 0.00309372 202.144 AdaBoost Bananas 0.677864 0.645041 0.468451 140.518 AdaBoost Elec2 0.875119 0.851923 14.8672 1611.8 AdaBoost Phishing 0.878303 0.863555 0.899108 53.255 AdaBoost SMTP 0.999622 0.526316 1.46643 848.066 Adaptive Random Forest Bananas 0.88696 0.871542 13.8454 307.673 Adaptive Random Forest Elec2 0.87662 0.851959 20.3554 2086.72 Adaptive Random Forest Phishing 0.908727 0.896926 3.82644 123.578 Adaptive Random Forest SMTP 0.999811 0.653846 1.32656 1588.86 Bagging Bananas 0.634082 0.459437 0.722435 194.332 Bagging Elec2 0.841939 0.804093 3.20333 2236.85 Bagging Phishing 0.893515 0.879201 1.42051 103.926 Bagging SMTP 0.999748 0.368421 1.36508 1929.49 Hoeffding Adaptive Tree Bananas 0.616531 0.42825 0.0624046 19.3398 Hoeffding Adaptive Tree Elec2 0.828672 0.795392 0.399325 382.504 Hoeffding Adaptive Tree Phishing 0.874299 0.856095 0.144985 12.0149 Hoeffding Adaptive Tree SMTP 0.999548 0.358209 0.137261 255.925 Hoeffding Tree Bananas 0.642197 0.503405 0.0602674 15.6935 Hoeffding Tree Elec2 0.796993 0.759154 1.18787 137.818 Hoeffding Tree Phishing 0.879904 0.860595 0.134742 5.7113 Hoeffding Tree SMTP 0.999622 0.419355 0.10326 230.368 Leveraging Bagging Bananas 0.828269 0.802603 3.31306 301.152 Leveraging Bagging Elec2 0.892382 0.871457 4.89464 4013.28 Leveraging Bagging Phishing 0.895116 0.878366 3.93267 254.561 Leveraging Bagging SMTP 0.999779 0.553191 1.32725 3620.36 Logistic regression Bananas 0.543019 0.195349 0.00424099 12.8476 Logistic regression Elec2 0.822163 0.777151 0.005373 151.719 Logistic regression Phishing 0.888 0.872263 0.00556469 4.74798 Logistic regression SMTP 0.999769 0.421053 0.00438309 146.608 Naive Bayes Bananas 0.61521 0.413912 0.0140247 21.4993 Naive Bayes Elec2 0.728714 0.603823 0.0510378 183.712 Naive Bayes Phishing 0.884708 0.871429 0.05723 8.45491 Naive Bayes SMTP 0.993484 0.0490798 0.0201406 263.696 Stacking Bananas 0.850349 0.829938 21.2839 421.54 Stacking Elec2 0.896797 0.877621 42.8805 3675.44 Stacking Phishing 0.899119 0.886691 4.31951 185.738 Stacking SMTP 0.99979 0.52381 1.51676 3474.07 Streaming Random Patches Bananas 0.869032 0.850817 12.1355 492.613 Streaming Random Patches Elec2 0.882249 0.859113 58.3436 5128.27 Streaming Random Patches Phishing 0.911129 0.89991 7.09044 222.948 Streaming Random Patches SMTP 0.999832 0.666667 1.41018 3718.08 Voting Bananas 0.830157 0.794989 0.122465 78.6442 Voting Elec2 0.858871 0.820255 1.31479 729.274 Voting Phishing 0.890312 0.876909 0.270739 50.7734 Voting SMTP 0.999685 0.53125 0.17401 1099.49 Vowpal Wabbit logistic regression Bananas 0.551321 0 0.000646591 14.0488 Vowpal Wabbit logistic regression Elec2 0.697439 0.459628 0.000646591 168.403 Vowpal Wabbit logistic regression Phishing 0.7736 0.669778 0.000646591 4.23902 Vowpal Wabbit logistic regression SMTP 0.999695 0.121212 0.000646591 218.564 [baseline] Last Class Bananas 0.50953 0.452957 0.000510216 2.21764 [baseline] Last Class Elec2 0.853352 0.827316 0.000510216 26.8887 [baseline] Last Class Phishing 0.515612 0.447489 0.000510216 1.7058 [baseline] Last Class SMTP 0.999601 0.366667 0.000510216 88.1401 k-Nearest Neighbors Bananas 0.848462 0.827423 0.0418806 60.5128 k-Nearest Neighbors Elec2 0.884435 0.862904 0.0689526 421.994 k-Nearest Neighbors Phishing 0.867094 0.847985 0.0714331 14.7191 k-Nearest Neighbors SMTP 0.999853 0.740741 0.0443382 606.731 sklearn SGDClassifier Bananas 0.546604 0.205094 0.00549507 83.2497 sklearn SGDClassifier Elec2 0.819051 0.772854 0.00667286 497.166 sklearn SGDClassifier Phishing 0.8888 0.875336 0.00687218 22.1178 sklearn SGDClassifier SMTP 0.999748 0.4 0.0056448 960.794

    Try reloading the page if something is buggy

    { \"$schema\": \"https://vega.github.io/schema/vega-lite/v5.json\", \"data\": { \"values\": [ { \"step\": 106, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.490566037735849, \"F1\": 0.325, \"Memory in Mb\": 0.0041875839233398, \"Time in s\": 0.013756 }, { \"step\": 212, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5141509433962265, \"F1\": 0.3757575757575758, \"Memory in Mb\": 0.0041875839233398, \"Time in s\": 0.038575 }, { \"step\": 318, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5188679245283019, \"F1\": 0.4137931034482758, \"Memory in Mb\": 0.0042409896850585, \"Time in s\": 0.073067 }, { \"step\": 424, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5165094339622641, \"F1\": 0.3952802359882006, \"Memory in Mb\": 0.0042409896850585, \"Time in s\": 0.1170739999999999 }, { \"step\": 530, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5320754716981132, \"F1\": 0.3575129533678756, \"Memory in Mb\": 0.0042409896850585, \"Time in s\": 0.1709859999999999 }, { \"step\": 636, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5377358490566038, \"F1\": 0.3225806451612903, \"Memory in Mb\": 0.0042409896850585, \"Time in s\": 0.2348859999999999 }, { \"step\": 742, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5525606469002695, \"F1\": 0.2995780590717299, \"Memory in Mb\": 0.0042409896850585, \"Time in s\": 0.308749 }, { \"step\": 848, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5518867924528302, \"F1\": 0.2720306513409961, \"Memory in Mb\": 0.0042409896850585, \"Time in s\": 0.392516 }, { \"step\": 954, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5545073375262054, \"F1\": 0.2504409171075837, \"Memory in Mb\": 0.0042409896850585, \"Time in s\": 0.486674 }, { \"step\": 1060, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5613207547169812, \"F1\": 0.2339373970345963, \"Memory in Mb\": 0.0042409896850585, \"Time in s\": 0.590985 }, { \"step\": 1166, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5600343053173242, \"F1\": 0.216793893129771, \"Memory in Mb\": 0.0042409896850585, \"Time in s\": 0.705545 }, { \"step\": 1272, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5605345911949685, \"F1\": 0.2137834036568213, \"Memory in Mb\": 0.0042409896850585, \"Time in s\": 0.830071 }, { \"step\": 1378, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5638606676342526, \"F1\": 0.2018592297476759, \"Memory in Mb\": 0.0042409896850585, \"Time in s\": 0.964451 }, { \"step\": 1484, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5640161725067385, \"F1\": 0.1902377972465581, \"Memory in Mb\": 0.0042409896850585, \"Time in s\": 1.108635 }, { \"step\": 1590, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5641509433962264, \"F1\": 0.1798816568047337, \"Memory in Mb\": 0.0042409896850585, \"Time in s\": 1.262826 }, { \"step\": 1696, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5654481132075472, \"F1\": 0.1728395061728395, \"Memory in Mb\": 0.0042409896850585, \"Time in s\": 1.426847 }, { \"step\": 1802, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5621531631520533, \"F1\": 0.165079365079365, \"Memory in Mb\": 0.0042409896850585, \"Time in s\": 1.600621 }, { \"step\": 1908, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5581761006289309, \"F1\": 0.1628599801390268, \"Memory in Mb\": 0.0042409896850585, \"Time in s\": 1.784431 }, { \"step\": 2014, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.551142005958292, \"F1\": 0.1614100185528756, \"Memory in Mb\": 0.0042409896850585, \"Time in s\": 1.978045 }, { \"step\": 2120, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5490566037735849, \"F1\": 0.1643356643356643, \"Memory in Mb\": 0.0042409896850585, \"Time in s\": 2.1815 }, { \"step\": 2226, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5480682839173405, \"F1\": 0.1767594108019639, \"Memory in Mb\": 0.0042409896850585, \"Time in s\": 2.394877 }, { \"step\": 2332, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5480274442538593, \"F1\": 0.1929555895865237, \"Memory in Mb\": 0.0042409896850585, \"Time in s\": 2.6182670000000003 }, { \"step\": 2438, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5467596390484003, \"F1\": 0.1963636363636363, \"Memory in Mb\": 0.0042409896850585, \"Time in s\": 2.8514960000000005 }, { \"step\": 2544, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.547562893081761, \"F1\": 0.2132604237867396, \"Memory in Mb\": 0.0042409896850585, \"Time in s\": 3.0947110000000007 }, { \"step\": 2650, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5449056603773584, \"F1\": 0.2229381443298969, \"Memory in Mb\": 0.0042409896850585, \"Time in s\": 3.3477670000000006 }, { \"step\": 2756, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5391872278664731, \"F1\": 0.2256097560975609, \"Memory in Mb\": 0.0042409896850585, \"Time in s\": 3.6106280000000006 }, { \"step\": 2862, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5387840670859538, \"F1\": 0.2271662763466042, \"Memory in Mb\": 0.0042409896850585, \"Time in s\": 3.8834530000000007 }, { \"step\": 2968, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5407681940700808, \"F1\": 0.2233618233618233, \"Memory in Mb\": 0.0042409896850585, \"Time in s\": 4.166078000000001 }, { \"step\": 3074, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5400130123617437, \"F1\": 0.2187845303867403, \"Memory in Mb\": 0.0042409896850585, \"Time in s\": 4.4584340000000005 }, { \"step\": 3180, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5433962264150943, \"F1\": 0.2176724137931034, \"Memory in Mb\": 0.0042409896850585, \"Time in s\": 4.760795000000001 }, { \"step\": 3286, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5447352404138771, \"F1\": 0.213459516298633, \"Memory in Mb\": 0.0042409896850585, \"Time in s\": 5.072864000000001 }, { \"step\": 3392, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5436320754716981, \"F1\": 0.210204081632653, \"Memory in Mb\": 0.0042409896850585, \"Time in s\": 5.3947970000000005 }, { \"step\": 3498, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5454545454545454, \"F1\": 0.2057942057942058, \"Memory in Mb\": 0.0042409896850585, \"Time in s\": 5.726813000000001 }, { \"step\": 3604, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5477247502774695, \"F1\": 0.2017629774730656, \"Memory in Mb\": 0.0042409896850585, \"Time in s\": 6.068550000000001 }, { \"step\": 3710, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5466307277628032, \"F1\": 0.1967526265520535, \"Memory in Mb\": 0.0042409896850585, \"Time in s\": 6.420104000000001 }, { \"step\": 3816, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5461215932914046, \"F1\": 0.1921641791044775, \"Memory in Mb\": 0.0042409896850585, \"Time in s\": 6.781176000000001 }, { \"step\": 3922, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5471698113207547, \"F1\": 0.1882998171846435, \"Memory in Mb\": 0.0042409896850585, \"Time in s\": 7.151771000000001 }, { \"step\": 4028, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5476663356504469, \"F1\": 0.1844225604297224, \"Memory in Mb\": 0.0042409896850585, \"Time in s\": 7.532081000000001 }, { \"step\": 4134, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5478955007256894, \"F1\": 0.1806225339763262, \"Memory in Mb\": 0.0042409896850585, \"Time in s\": 7.921987000000001 }, { \"step\": 4240, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5471698113207547, \"F1\": 0.176672384219554, \"Memory in Mb\": 0.0042409896850585, \"Time in s\": 8.321417 }, { \"step\": 4346, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5473999079613437, \"F1\": 0.1745698699118757, \"Memory in Mb\": 0.0042409896850585, \"Time in s\": 8.730515 }, { \"step\": 4452, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5496406109613656, \"F1\": 0.1799591002044989, \"Memory in Mb\": 0.0042409896850585, \"Time in s\": 9.149203 }, { \"step\": 4558, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5465116279069767, \"F1\": 0.1794362842397777, \"Memory in Mb\": 0.0042409896850585, \"Time in s\": 9.577468 }, { \"step\": 4664, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5463121783876501, \"F1\": 0.1861538461538461, \"Memory in Mb\": 0.0042409896850585, \"Time in s\": 10.015743 }, { \"step\": 4770, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5465408805031446, \"F1\": 0.1889763779527558, \"Memory in Mb\": 0.0042409896850585, \"Time in s\": 10.463621 }, { \"step\": 4876, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5467596390484003, \"F1\": 0.1892883345561261, \"Memory in Mb\": 0.0042409896850585, \"Time in s\": 10.921126 }, { \"step\": 4982, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5467683661180249, \"F1\": 0.1958689458689458, \"Memory in Mb\": 0.0042409896850585, \"Time in s\": 11.388366 }, { \"step\": 5088, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5446147798742138, \"F1\": 0.1940869565217391, \"Memory in Mb\": 0.0042409896850585, \"Time in s\": 11.865171 }, { \"step\": 5194, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5427416249518675, \"F1\": 0.1924515470928255, \"Memory in Mb\": 0.0042409896850585, \"Time in s\": 12.351544 }, { \"step\": 5300, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5430188679245282, \"F1\": 0.1953488372093023, \"Memory in Mb\": 0.0042409896850585, \"Time in s\": 12.847639 }, { \"step\": 906, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.7980132450331126, \"F1\": 0.7834319526627219, \"Memory in Mb\": 0.0053730010986328, \"Time in s\": 0.108703 }, { \"step\": 1812, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8134657836644592, \"F1\": 0.7488855869242199, \"Memory in Mb\": 0.0053730010986328, \"Time in s\": 0.336789 }, { \"step\": 2718, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8024282560706402, \"F1\": 0.7300150829562596, \"Memory in Mb\": 0.0053730010986328, \"Time in s\": 0.6856869999999999 }, { \"step\": 3624, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8192604856512141, \"F1\": 0.7598093142647598, \"Memory in Mb\": 0.0053730010986328, \"Time in s\": 1.150847 }, { \"step\": 4530, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8289183222958058, \"F1\": 0.7613181398213735, \"Memory in Mb\": 0.0053730010986328, \"Time in s\": 1.732046 }, { \"step\": 5436, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8226637233259749, \"F1\": 0.7528205128205128, \"Memory in Mb\": 0.0053730010986328, \"Time in s\": 2.429434 }, { \"step\": 6342, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8229265216020183, \"F1\": 0.7589611504614724, \"Memory in Mb\": 0.0053730010986328, \"Time in s\": 3.242126 }, { \"step\": 7248, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8261589403973509, \"F1\": 0.7617246596066566, \"Memory in Mb\": 0.0053730010986328, \"Time in s\": 4.168583 }, { \"step\": 8154, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8318616629874908, \"F1\": 0.7833096254148886, \"Memory in Mb\": 0.0053730010986328, \"Time in s\": 5.210413 }, { \"step\": 9060, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8375275938189846, \"F1\": 0.7975797579757975, \"Memory in Mb\": 0.0053730010986328, \"Time in s\": 6.367211 }, { \"step\": 9966, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8377483443708609, \"F1\": 0.802008081302804, \"Memory in Mb\": 0.0053730010986328, \"Time in s\": 7.639357 }, { \"step\": 10872, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8400478292862399, \"F1\": 0.8089220964729151, \"Memory in Mb\": 0.0053730010986328, \"Time in s\": 9.025984 }, { \"step\": 11778, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8433520122261844, \"F1\": 0.8128613449639923, \"Memory in Mb\": 0.0053730010986328, \"Time in s\": 10.527624 }, { \"step\": 12684, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8420056764427626, \"F1\": 0.8118309859154929, \"Memory in Mb\": 0.0053730010986328, \"Time in s\": 12.142268 }, { \"step\": 13590, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8438557763061074, \"F1\": 0.8167846658608184, \"Memory in Mb\": 0.0053730010986328, \"Time in s\": 13.8717 }, { \"step\": 14496, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8447847682119205, \"F1\": 0.8189863234111022, \"Memory in Mb\": 0.0053730010986328, \"Time in s\": 15.715708 }, { \"step\": 15402, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8465134398130113, \"F1\": 0.8201734367868553, \"Memory in Mb\": 0.0053730010986328, \"Time in s\": 17.682764000000002 }, { \"step\": 16308, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8412435614422369, \"F1\": 0.8128388635870744, \"Memory in Mb\": 0.0053730010986328, \"Time in s\": 19.778485000000003 }, { \"step\": 17214, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8397815731381434, \"F1\": 0.8070519098922625, \"Memory in Mb\": 0.0053730010986328, \"Time in s\": 22.000603000000005 }, { \"step\": 18120, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8419977924944813, \"F1\": 0.8099316205271195, \"Memory in Mb\": 0.0053730010986328, \"Time in s\": 24.347790000000003 }, { \"step\": 19026, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8451592557552823, \"F1\": 0.8116368286445013, \"Memory in Mb\": 0.0053730010986328, \"Time in s\": 26.819926 }, { \"step\": 19932, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8428657435279951, \"F1\": 0.8098129706096673, \"Memory in Mb\": 0.0053730010986328, \"Time in s\": 29.418034 }, { \"step\": 20838, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8394279681351378, \"F1\": 0.805736182071528, \"Memory in Mb\": 0.0053730010986328, \"Time in s\": 32.142589 }, { \"step\": 21744, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8403237674760854, \"F1\": 0.8037087290818633, \"Memory in Mb\": 0.0053730010986328, \"Time in s\": 34.992653000000004 }, { \"step\": 22650, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8395143487858719, \"F1\": 0.800963697092482, \"Memory in Mb\": 0.0053730010986328, \"Time in s\": 37.96833 }, { \"step\": 23556, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8357530989981321, \"F1\": 0.7954965907288969, \"Memory in Mb\": 0.0053730010986328, \"Time in s\": 41.070336000000005 }, { \"step\": 24462, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8330880549423596, \"F1\": 0.7914815382258312, \"Memory in Mb\": 0.0053730010986328, \"Time in s\": 44.296124000000006 }, { \"step\": 25368, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8298643960895616, \"F1\": 0.787326303340889, \"Memory in Mb\": 0.0053730010986328, \"Time in s\": 47.644214000000005 }, { \"step\": 26274, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8304788003349318, \"F1\": 0.7877834953306653, \"Memory in Mb\": 0.0053730010986328, \"Time in s\": 51.11458400000001 }, { \"step\": 27180, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8309050772626931, \"F1\": 0.789000091818933, \"Memory in Mb\": 0.0053730010986328, \"Time in s\": 54.70630400000001 }, { \"step\": 28086, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8277433596809799, \"F1\": 0.7844028520499109, \"Memory in Mb\": 0.0053730010986328, \"Time in s\": 58.42138600000001 }, { \"step\": 28992, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8270557395143487, \"F1\": 0.782037906451052, \"Memory in Mb\": 0.0053730010986328, \"Time in s\": 62.25746200000001 }, { \"step\": 29898, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8260753227640645, \"F1\": 0.7809050307575629, \"Memory in Mb\": 0.0053730010986328, \"Time in s\": 66.21287400000001 }, { \"step\": 30804, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8259316971821842, \"F1\": 0.7798127463863337, \"Memory in Mb\": 0.0053730010986328, \"Time in s\": 70.29091000000001 }, { \"step\": 31710, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8213181961526332, \"F1\": 0.7731603811353991, \"Memory in Mb\": 0.0053730010986328, \"Time in s\": 74.48714300000002 }, { \"step\": 32616, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8188925680647535, \"F1\": 0.7700393195001364, \"Memory in Mb\": 0.0053730010986328, \"Time in s\": 78.80153600000001 }, { \"step\": 33522, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8169261977208997, \"F1\": 0.7682314286793308, \"Memory in Mb\": 0.0053730010986328, \"Time in s\": 83.23605800000001 }, { \"step\": 34428, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8144243057976066, \"F1\": 0.764807656911467, \"Memory in Mb\": 0.0053730010986328, \"Time in s\": 87.78961800000002 }, { \"step\": 35334, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8142299201901851, \"F1\": 0.7628098576280986, \"Memory in Mb\": 0.0053730010986328, \"Time in s\": 92.463459 }, { \"step\": 36240, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8155077262693157, \"F1\": 0.7630254483589707, \"Memory in Mb\": 0.0053730010986328, \"Time in s\": 97.25864300000002 }, { \"step\": 37146, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8151887148010553, \"F1\": 0.7614745839268963, \"Memory in Mb\": 0.0053730010986328, \"Time in s\": 102.17619800000004 }, { \"step\": 38052, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8151739724587407, \"F1\": 0.7609855564995752, \"Memory in Mb\": 0.0053730010986328, \"Time in s\": 107.21554300000004 }, { \"step\": 38958, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8162636685661482, \"F1\": 0.7631526702402223, \"Memory in Mb\": 0.0053730010986328, \"Time in s\": 112.37010800000004 }, { \"step\": 39864, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8169526389725065, \"F1\": 0.7662192035369877, \"Memory in Mb\": 0.0053730010986328, \"Time in s\": 117.64186300000004 }, { \"step\": 40770, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8186902133922002, \"F1\": 0.7707480461481205, \"Memory in Mb\": 0.0053730010986328, \"Time in s\": 123.03144600000005 }, { \"step\": 41676, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8201842787215664, \"F1\": 0.7745623007039286, \"Memory in Mb\": 0.0053730010986328, \"Time in s\": 128.53961200000003 }, { \"step\": 42582, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8212155370813959, \"F1\": 0.7763841973858129, \"Memory in Mb\": 0.0053730010986328, \"Time in s\": 134.16514200000003 }, { \"step\": 43488, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8217209345106696, \"F1\": 0.7773086313370673, \"Memory in Mb\": 0.0053730010986328, \"Time in s\": 139.90254000000004 }, { \"step\": 44394, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8211920529801324, \"F1\": 0.7754328391988233, \"Memory in Mb\": 0.0053730010986328, \"Time in s\": 145.75278900000004 }, { \"step\": 45300, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8221633554083885, \"F1\": 0.7771507607192254, \"Memory in Mb\": 0.0053730010986328, \"Time in s\": 151.71939900000004 }, { \"step\": 25, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.64, \"F1\": 0.6896551724137931, \"Memory in Mb\": 0.005324363708496, \"Time in s\": 0.005171 }, { \"step\": 50, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.78, \"F1\": 0.7755102040816326, \"Memory in Mb\": 0.005324363708496, \"Time in s\": 0.014932 }, { \"step\": 75, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8133333333333334, \"F1\": 0.8157894736842105, \"Memory in Mb\": 0.005324363708496, \"Time in s\": 0.027624 }, { \"step\": 100, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.82, \"F1\": 0.8163265306122449, \"Memory in Mb\": 0.005324363708496, \"Time in s\": 0.045128 }, { \"step\": 125, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.808, \"F1\": 0.8032786885245902, \"Memory in Mb\": 0.005324363708496, \"Time in s\": 0.065471 }, { \"step\": 150, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8133333333333334, \"F1\": 0.8157894736842104, \"Memory in Mb\": 0.005324363708496, \"Time in s\": 0.088498 }, { \"step\": 175, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8228571428571428, \"F1\": 0.8143712574850299, \"Memory in Mb\": 0.005324363708496, \"Time in s\": 0.1141639999999999 }, { \"step\": 200, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.82, \"F1\": 0.8105263157894737, \"Memory in Mb\": 0.005324363708496, \"Time in s\": 0.143232 }, { \"step\": 225, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8177777777777778, \"F1\": 0.8038277511961723, \"Memory in Mb\": 0.005324363708496, \"Time in s\": 0.175843 }, { \"step\": 250, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.824, \"F1\": 0.811965811965812, \"Memory in Mb\": 0.005324363708496, \"Time in s\": 0.212135 }, { \"step\": 275, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8254545454545454, \"F1\": 0.8125, \"Memory in Mb\": 0.0055646896362304, \"Time in s\": 0.2519909999999999 }, { \"step\": 300, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8366666666666667, \"F1\": 0.8205128205128205, \"Memory in Mb\": 0.0055646896362304, \"Time in s\": 0.295434 }, { \"step\": 325, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8430769230769231, \"F1\": 0.8222996515679442, \"Memory in Mb\": 0.0055646896362304, \"Time in s\": 0.3424399999999999 }, { \"step\": 350, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8542857142857143, \"F1\": 0.8316831683168316, \"Memory in Mb\": 0.0055646896362304, \"Time in s\": 0.3929919999999999 }, { \"step\": 375, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8506666666666667, \"F1\": 0.825, \"Memory in Mb\": 0.0055646896362304, \"Time in s\": 0.4472389999999999 }, { \"step\": 400, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8525, \"F1\": 0.8249258160237388, \"Memory in Mb\": 0.0055646896362304, \"Time in s\": 0.5051399999999999 }, { \"step\": 425, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8588235294117647, \"F1\": 0.8285714285714286, \"Memory in Mb\": 0.0055646896362304, \"Time in s\": 0.5668209999999999 }, { \"step\": 450, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8622222222222222, \"F1\": 0.8306010928961749, \"Memory in Mb\": 0.0055646896362304, \"Time in s\": 0.6322009999999999 }, { \"step\": 475, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8589473684210527, \"F1\": 0.8277634961439589, \"Memory in Mb\": 0.0055646896362304, \"Time in s\": 0.7013439999999999 }, { \"step\": 500, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.86, \"F1\": 0.8325358851674641, \"Memory in Mb\": 0.0055646896362304, \"Time in s\": 0.7743169999999998 }, { \"step\": 525, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8590476190476191, \"F1\": 0.827906976744186, \"Memory in Mb\": 0.0055646896362304, \"Time in s\": 0.8510559999999998 }, { \"step\": 550, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.86, \"F1\": 0.8300220750551875, \"Memory in Mb\": 0.0055646896362304, \"Time in s\": 0.9315169999999998 }, { \"step\": 575, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8626086956521739, \"F1\": 0.8329809725158562, \"Memory in Mb\": 0.0055646896362304, \"Time in s\": 1.015688 }, { \"step\": 600, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8666666666666667, \"F1\": 0.8353909465020577, \"Memory in Mb\": 0.0055646896362304, \"Time in s\": 1.103615 }, { \"step\": 625, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8688, \"F1\": 0.8346774193548386, \"Memory in Mb\": 0.0055646896362304, \"Time in s\": 1.195378 }, { \"step\": 650, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8723076923076923, \"F1\": 0.8413001912045889, \"Memory in Mb\": 0.0055646896362304, \"Time in s\": 1.291006 }, { \"step\": 675, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8725925925925926, \"F1\": 0.8447653429602888, \"Memory in Mb\": 0.0055646896362304, \"Time in s\": 1.390501 }, { \"step\": 700, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8771428571428571, \"F1\": 0.8485915492957746, \"Memory in Mb\": 0.0055646896362304, \"Time in s\": 1.494099 }, { \"step\": 725, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8786206896551724, \"F1\": 0.8533333333333334, \"Memory in Mb\": 0.0055646896362304, \"Time in s\": 1.601701 }, { \"step\": 750, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.88, \"F1\": 0.8557692307692307, \"Memory in Mb\": 0.0055646896362304, \"Time in s\": 1.713159 }, { \"step\": 775, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8812903225806452, \"F1\": 0.8566978193146417, \"Memory in Mb\": 0.0055646896362304, \"Time in s\": 1.828506 }, { \"step\": 800, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.88125, \"F1\": 0.8584202682563338, \"Memory in Mb\": 0.0055646896362304, \"Time in s\": 1.947668 }, { \"step\": 825, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8812121212121212, \"F1\": 0.8595988538681948, \"Memory in Mb\": 0.0055646896362304, \"Time in s\": 2.0707090000000004 }, { \"step\": 850, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8823529411764706, \"F1\": 0.8603351955307262, \"Memory in Mb\": 0.0055646896362304, \"Time in s\": 2.197589 }, { \"step\": 875, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8857142857142857, \"F1\": 0.8637602179836512, \"Memory in Mb\": 0.0055646896362304, \"Time in s\": 2.3283080000000003 }, { \"step\": 900, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8855555555555555, \"F1\": 0.8632138114209827, \"Memory in Mb\": 0.0055646896362304, \"Time in s\": 2.462826 }, { \"step\": 925, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8875675675675676, \"F1\": 0.867007672634271, \"Memory in Mb\": 0.0055646896362304, \"Time in s\": 2.6011680000000004 }, { \"step\": 950, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8863157894736842, \"F1\": 0.8669950738916257, \"Memory in Mb\": 0.0055646896362304, \"Time in s\": 2.7434260000000004 }, { \"step\": 975, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8871794871794871, \"F1\": 0.8677884615384616, \"Memory in Mb\": 0.0055646896362304, \"Time in s\": 2.889565 }, { \"step\": 1000, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.888, \"F1\": 0.8688524590163934, \"Memory in Mb\": 0.0055646896362304, \"Time in s\": 3.039628 }, { \"step\": 1025, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8878048780487805, \"F1\": 0.8691695108077361, \"Memory in Mb\": 0.0055646896362304, \"Time in s\": 3.19355 }, { \"step\": 1050, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8895238095238095, \"F1\": 0.8716814159292035, \"Memory in Mb\": 0.0055646896362304, \"Time in s\": 3.351259 }, { \"step\": 1075, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8883720930232558, \"F1\": 0.8715203426124196, \"Memory in Mb\": 0.0055646896362304, \"Time in s\": 3.512691 }, { \"step\": 1100, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.89, \"F1\": 0.8735632183908045, \"Memory in Mb\": 0.0055646896362304, \"Time in s\": 3.677881 }, { \"step\": 1125, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8906666666666667, \"F1\": 0.8753799392097265, \"Memory in Mb\": 0.0055646896362304, \"Time in s\": 3.846795 }, { \"step\": 1150, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8904347826086957, \"F1\": 0.8750000000000001, \"Memory in Mb\": 0.0055646896362304, \"Time in s\": 4.0194600000000005 }, { \"step\": 1175, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8893617021276595, \"F1\": 0.8735408560311284, \"Memory in Mb\": 0.0055646896362304, \"Time in s\": 4.195879000000001 }, { \"step\": 1200, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.89, \"F1\": 0.8740458015267174, \"Memory in Mb\": 0.0055646896362304, \"Time in s\": 4.376099000000001 }, { \"step\": 1225, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8906122448979592, \"F1\": 0.874766355140187, \"Memory in Mb\": 0.0055646896362304, \"Time in s\": 4.560123000000001 }, { \"step\": 1250, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.888, \"F1\": 0.8722627737226277, \"Memory in Mb\": 0.0055646896362304, \"Time in s\": 4.747978000000001 }, { \"step\": 1903, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.0043830871582031, \"Time in s\": 0.201733 }, { \"step\": 3806, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.0043830871582031, \"Time in s\": 0.541161 }, { \"step\": 5709, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.0043830871582031, \"Time in s\": 0.989434 }, { \"step\": 7612, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.0043830871582031, \"Time in s\": 1.548242 }, { \"step\": 9515, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.0043830871582031, \"Time in s\": 2.214419 }, { \"step\": 11418, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.0043830871582031, \"Time in s\": 2.987891 }, { \"step\": 13321, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.0043830871582031, \"Time in s\": 3.868043 }, { \"step\": 15224, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996715712033633, \"F1\": 0.7058823529411764, \"Memory in Mb\": 0.0043830871582031, \"Time in s\": 4.854806 }, { \"step\": 17127, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997080632918784, \"F1\": 0.761904761904762, \"Memory in Mb\": 0.0043830871582031, \"Time in s\": 5.948104 }, { \"step\": 19030, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997372569626904, \"F1\": 0.761904761904762, \"Memory in Mb\": 0.0043830871582031, \"Time in s\": 7.14776 }, { \"step\": 20933, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999761142693355, \"F1\": 0.761904761904762, \"Memory in Mb\": 0.0043830871582031, \"Time in s\": 8.453778 }, { \"step\": 22836, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997810474689088, \"F1\": 0.761904761904762, \"Memory in Mb\": 0.0043830871582031, \"Time in s\": 9.865972 }, { \"step\": 24739, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997978899713004, \"F1\": 0.761904761904762, \"Memory in Mb\": 0.0043830871582031, \"Time in s\": 11.384388 }, { \"step\": 26642, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999774791682306, \"F1\": 0.7272727272727273, \"Memory in Mb\": 0.0043830871582031, \"Time in s\": 13.008482 }, { \"step\": 28545, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997898055701524, \"F1\": 0.7272727272727273, \"Memory in Mb\": 0.0043830871582031, \"Time in s\": 14.738381 }, { \"step\": 30448, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999802942722018, \"F1\": 0.7272727272727273, \"Memory in Mb\": 0.0043830871582031, \"Time in s\": 16.574147999999997 }, { \"step\": 32351, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999814534326605, \"F1\": 0.7272727272727273, \"Memory in Mb\": 0.0043830871582031, \"Time in s\": 18.52197 }, { \"step\": 34254, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999824837975127, \"F1\": 0.7272727272727273, \"Memory in Mb\": 0.0043830871582031, \"Time in s\": 20.589921 }, { \"step\": 36157, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998340570290676, \"F1\": 0.7272727272727273, \"Memory in Mb\": 0.0043830871582031, \"Time in s\": 22.77409 }, { \"step\": 38060, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998423541776142, \"F1\": 0.7272727272727273, \"Memory in Mb\": 0.0043830871582031, \"Time in s\": 25.073776 }, { \"step\": 39963, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998498611215374, \"F1\": 0.7272727272727273, \"Memory in Mb\": 0.0043830871582031, \"Time in s\": 27.489115 }, { \"step\": 41866, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999856685616013, \"F1\": 0.7272727272727273, \"Memory in Mb\": 0.0043830871582031, \"Time in s\": 30.019976 }, { \"step\": 43769, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998629166761864, \"F1\": 0.7272727272727273, \"Memory in Mb\": 0.0043830871582031, \"Time in s\": 32.666409 }, { \"step\": 45672, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998686284813452, \"F1\": 0.7272727272727273, \"Memory in Mb\": 0.0043830871582031, \"Time in s\": 35.428179 }, { \"step\": 47575, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998738833420916, \"F1\": 0.7272727272727273, \"Memory in Mb\": 0.0043830871582031, \"Time in s\": 38.305364 }, { \"step\": 49478, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998787339827804, \"F1\": 0.7272727272727273, \"Memory in Mb\": 0.0043830871582031, \"Time in s\": 41.297895 }, { \"step\": 51381, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998443004223352, \"F1\": 0.6666666666666666, \"Memory in Mb\": 0.0043830871582031, \"Time in s\": 44.405542 }, { \"step\": 53284, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998498611215374, \"F1\": 0.6666666666666666, \"Memory in Mb\": 0.0043830871582031, \"Time in s\": 47.6267 }, { \"step\": 55187, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999855038324243, \"F1\": 0.6666666666666666, \"Memory in Mb\": 0.0043830871582031, \"Time in s\": 50.959272 }, { \"step\": 57090, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997022245577158, \"F1\": 0.4848484848484848, \"Memory in Mb\": 0.0043830871582031, \"Time in s\": 54.404277 }, { \"step\": 58993, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997118302171444, \"F1\": 0.4848484848484848, \"Memory in Mb\": 0.0043830871582031, \"Time in s\": 57.960817 }, { \"step\": 60896, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997208355228586, \"F1\": 0.4848484848484848, \"Memory in Mb\": 0.0043830871582031, \"Time in s\": 61.629022 }, { \"step\": 62799, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999697447411583, \"F1\": 0.4571428571428571, \"Memory in Mb\": 0.0043830871582031, \"Time in s\": 65.409349 }, { \"step\": 64702, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997063460171248, \"F1\": 0.4571428571428571, \"Memory in Mb\": 0.0043830871582031, \"Time in s\": 69.30026500000001 }, { \"step\": 66605, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997147361309212, \"F1\": 0.4571428571428571, \"Memory in Mb\": 0.0043830871582031, \"Time in s\": 73.30473800000001 }, { \"step\": 68508, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996934664564724, \"F1\": 0.4324324324324324, \"Memory in Mb\": 0.0043830871582031, \"Time in s\": 77.42011300000001 }, { \"step\": 70411, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999701751146838, \"F1\": 0.4324324324324324, \"Memory in Mb\": 0.0043830871582031, \"Time in s\": 81.64355900000001 }, { \"step\": 72314, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997095998008684, \"F1\": 0.4324324324324324, \"Memory in Mb\": 0.0043830871582031, \"Time in s\": 85.97794200000001 }, { \"step\": 74217, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997170459598204, \"F1\": 0.4324324324324324, \"Memory in Mb\": 0.0043830871582031, \"Time in s\": 90.42391200000002 }, { \"step\": 76120, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999724119810825, \"F1\": 0.4324324324324324, \"Memory in Mb\": 0.0043830871582031, \"Time in s\": 94.979812 }, { \"step\": 78023, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997308485959268, \"F1\": 0.4324324324324324, \"Memory in Mb\": 0.0043830871582031, \"Time in s\": 99.646713 }, { \"step\": 79926, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997372569626904, \"F1\": 0.4324324324324324, \"Memory in Mb\": 0.0043830871582031, \"Time in s\": 104.425102 }, { \"step\": 81829, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997433672658838, \"F1\": 0.4324324324324324, \"Memory in Mb\": 0.0043830871582031, \"Time in s\": 109.315273 }, { \"step\": 83732, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997491998280228, \"F1\": 0.4324324324324324, \"Memory in Mb\": 0.0043830871582031, \"Time in s\": 114.3171 }, { \"step\": 85635, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997547731651778, \"F1\": 0.4324324324324324, \"Memory in Mb\": 0.0043830871582031, \"Time in s\": 119.427298 }, { \"step\": 87538, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999760104183326, \"F1\": 0.4324324324324324, \"Memory in Mb\": 0.0043830871582031, \"Time in s\": 124.64502 }, { \"step\": 89441, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997540277948592, \"F1\": 0.4210526315789474, \"Memory in Mb\": 0.0043830871582031, \"Time in s\": 129.97108 }, { \"step\": 91344, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997591522157996, \"F1\": 0.4210526315789474, \"Memory in Mb\": 0.0043830871582031, \"Time in s\": 135.407078 }, { \"step\": 93247, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997640674767015, \"F1\": 0.4210526315789474, \"Memory in Mb\": 0.0043830871582031, \"Time in s\": 140.95343100000002 }, { \"step\": 95150, \"track\": \"Binary classification\", \"model\": \"Logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997687861271676, \"F1\": 0.4210526315789474, \"Memory in Mb\": 0.0043830871582031, \"Time in s\": 146.60779900000003 }, { \"step\": 106, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5377358490566038, \"F1\": 0.5242718446601942, \"Memory in Mb\": 0.0028944015502929, \"Time in s\": 0.022931 }, { \"step\": 212, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5330188679245284, \"F1\": 0.5217391304347825, \"Memory in Mb\": 0.0028944015502929, \"Time in s\": 0.057388 }, { \"step\": 318, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5188679245283019, \"F1\": 0.5173501577287066, \"Memory in Mb\": 0.0029211044311523, \"Time in s\": 0.098467 }, { \"step\": 424, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5330188679245284, \"F1\": 0.5330188679245282, \"Memory in Mb\": 0.0029211044311523, \"Time in s\": 0.14786 }, { \"step\": 530, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5207547169811321, \"F1\": 0.5115384615384615, \"Memory in Mb\": 0.0029211044311523, \"Time in s\": 0.2044709999999999 }, { \"step\": 636, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5377358490566038, \"F1\": 0.5303514376996804, \"Memory in Mb\": 0.0029211044311523, \"Time in s\": 0.268 }, { \"step\": 742, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Bananas\", \"Accuracy\": 0.522911051212938, \"F1\": 0.512396694214876, \"Memory in Mb\": 0.0029211044311523, \"Time in s\": 0.339749 }, { \"step\": 848, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5235849056603774, \"F1\": 0.5061124694376529, \"Memory in Mb\": 0.0029211044311523, \"Time in s\": 0.420031 }, { \"step\": 954, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5157232704402516, \"F1\": 0.5, \"Memory in Mb\": 0.0029211044311523, \"Time in s\": 0.5084810000000001 }, { \"step\": 1060, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5160377358490567, \"F1\": 0.4975514201762978, \"Memory in Mb\": 0.0029211044311523, \"Time in s\": 0.605006 }, { \"step\": 1166, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5154373927958834, \"F1\": 0.495985727029438, \"Memory in Mb\": 0.0029211044311523, \"Time in s\": 0.7097530000000001 }, { \"step\": 1272, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5165094339622641, \"F1\": 0.4979591836734694, \"Memory in Mb\": 0.0029211044311523, \"Time in s\": 0.823412 }, { \"step\": 1378, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5195936139332366, \"F1\": 0.4977238239757208, \"Memory in Mb\": 0.0029211044311523, \"Time in s\": 0.94616 }, { \"step\": 1484, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5195417789757413, \"F1\": 0.4968242766407903, \"Memory in Mb\": 0.0029211044311523, \"Time in s\": 1.077952 }, { \"step\": 1590, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5226415094339623, \"F1\": 0.4983476536682089, \"Memory in Mb\": 0.0029211044311523, \"Time in s\": 1.218949 }, { \"step\": 1696, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5194575471698113, \"F1\": 0.4947303161810291, \"Memory in Mb\": 0.0029211044311523, \"Time in s\": 1.368908 }, { \"step\": 1802, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5205327413984462, \"F1\": 0.4965034965034965, \"Memory in Mb\": 0.0029211044311523, \"Time in s\": 1.527947 }, { \"step\": 1908, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5193920335429769, \"F1\": 0.4964305326743548, \"Memory in Mb\": 0.0029211044311523, \"Time in s\": 1.696168 }, { \"step\": 2014, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Bananas\", \"Accuracy\": 0.519364448857994, \"F1\": 0.4989648033126293, \"Memory in Mb\": 0.0029211044311523, \"Time in s\": 1.873583 }, { \"step\": 2120, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5174528301886793, \"F1\": 0.4997555012224939, \"Memory in Mb\": 0.0029211044311523, \"Time in s\": 2.060039 }, { \"step\": 2226, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5197663971248877, \"F1\": 0.5002337540906966, \"Memory in Mb\": 0.0029211044311523, \"Time in s\": 2.255465 }, { \"step\": 2332, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5175814751286449, \"F1\": 0.4975435462259938, \"Memory in Mb\": 0.0029211044311523, \"Time in s\": 2.460073 }, { \"step\": 2438, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5176374077112387, \"F1\": 0.4957118353344769, \"Memory in Mb\": 0.0029211044311523, \"Time in s\": 2.67358 }, { \"step\": 2544, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5196540880503144, \"F1\": 0.5008169934640523, \"Memory in Mb\": 0.0029211044311523, \"Time in s\": 2.896329 }, { \"step\": 2650, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Bananas\", \"Accuracy\": 0.520377358490566, \"F1\": 0.5037094884810621, \"Memory in Mb\": 0.0029211044311523, \"Time in s\": 3.127217 }, { \"step\": 2756, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Bananas\", \"Accuracy\": 0.521044992743106, \"F1\": 0.5041322314049587, \"Memory in Mb\": 0.0029211044311523, \"Time in s\": 3.366825 }, { \"step\": 2862, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5213137665967854, \"F1\": 0.5032632342277013, \"Memory in Mb\": 0.0029211044311523, \"Time in s\": 3.615642 }, { \"step\": 2968, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5175202156334232, \"F1\": 0.4985994397759103, \"Memory in Mb\": 0.0029211044311523, \"Time in s\": 3.87375 }, { \"step\": 3074, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5152895250487963, \"F1\": 0.4969615124915597, \"Memory in Mb\": 0.0029211044311523, \"Time in s\": 4.141236999999999 }, { \"step\": 3180, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5132075471698113, \"F1\": 0.4931237721021611, \"Memory in Mb\": 0.0029211044311523, \"Time in s\": 4.418262 }, { \"step\": 3286, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5130858186244674, \"F1\": 0.4927076727964489, \"Memory in Mb\": 0.0029211044311523, \"Time in s\": 4.703974 }, { \"step\": 3392, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5103183962264151, \"F1\": 0.490959239963224, \"Memory in Mb\": 0.0029211044311523, \"Time in s\": 4.999187999999999 }, { \"step\": 3498, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5091480846197828, \"F1\": 0.4891401368640284, \"Memory in Mb\": 0.0029211044311523, \"Time in s\": 5.303419999999999 }, { \"step\": 3604, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5097114317425083, \"F1\": 0.4876775877065816, \"Memory in Mb\": 0.0029211044311523, \"Time in s\": 5.616715999999999 }, { \"step\": 3710, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5118598382749326, \"F1\": 0.4908630868709586, \"Memory in Mb\": 0.0029211044311523, \"Time in s\": 5.938947999999999 }, { \"step\": 3816, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Bananas\", \"Accuracy\": 0.510482180293501, \"F1\": 0.4893384363039912, \"Memory in Mb\": 0.0029211044311523, \"Time in s\": 6.270639999999999 }, { \"step\": 3922, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Bananas\", \"Accuracy\": 0.50790413054564, \"F1\": 0.485881726158764, \"Memory in Mb\": 0.0029211044311523, \"Time in s\": 6.611250999999999 }, { \"step\": 4028, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Bananas\", \"Accuracy\": 0.506454816285998, \"F1\": 0.4844398340248962, \"Memory in Mb\": 0.0029211044311523, \"Time in s\": 6.960905 }, { \"step\": 4134, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5050798258345428, \"F1\": 0.4828109201213346, \"Memory in Mb\": 0.0029211044311523, \"Time in s\": 7.319711 }, { \"step\": 4240, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5068396226415094, \"F1\": 0.4848484848484848, \"Memory in Mb\": 0.0029211044311523, \"Time in s\": 7.686943999999999 }, { \"step\": 4346, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5080533824206167, \"F1\": 0.4858104858104858, \"Memory in Mb\": 0.0029211044311523, \"Time in s\": 8.062541999999999 }, { \"step\": 4452, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5080862533692723, \"F1\": 0.4847058823529412, \"Memory in Mb\": 0.0029211044311523, \"Time in s\": 8.446741 }, { \"step\": 4558, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5063624396665204, \"F1\": 0.4837081229921982, \"Memory in Mb\": 0.0029211044311523, \"Time in s\": 8.840034999999999 }, { \"step\": 4664, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5051457975986278, \"F1\": 0.4829749103942652, \"Memory in Mb\": 0.0029211044311523, \"Time in s\": 9.242387 }, { \"step\": 4770, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5048218029350104, \"F1\": 0.482017543859649, \"Memory in Mb\": 0.0029211044311523, \"Time in s\": 9.653845 }, { \"step\": 4876, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5036915504511895, \"F1\": 0.4802405498281787, \"Memory in Mb\": 0.0029211044311523, \"Time in s\": 10.074018 }, { \"step\": 4982, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5038137294259334, \"F1\": 0.4811083123425693, \"Memory in Mb\": 0.0029211044311523, \"Time in s\": 10.503025 }, { \"step\": 5088, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5029481132075472, \"F1\": 0.4799506477483035, \"Memory in Mb\": 0.0029211044311523, \"Time in s\": 10.941391 }, { \"step\": 5194, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5040431266846361, \"F1\": 0.4810636583400483, \"Memory in Mb\": 0.0029211044311523, \"Time in s\": 11.388345 }, { \"step\": 5300, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5064150943396226, \"F1\": 0.4825949367088608, \"Memory in Mb\": 0.0029211044311523, \"Time in s\": 11.844057 }, { \"step\": 906, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9072847682119204, \"F1\": 0.90561797752809, \"Memory in Mb\": 0.0043582916259765, \"Time in s\": 0.085634 }, { \"step\": 1812, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9166666666666666, \"F1\": 0.8967874231032126, \"Memory in Mb\": 0.0043582916259765, \"Time in s\": 0.273724 }, { \"step\": 2718, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9175864606328182, \"F1\": 0.898458748866727, \"Memory in Mb\": 0.0043582916259765, \"Time in s\": 0.563875 }, { \"step\": 3624, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9268763796909492, \"F1\": 0.9098945936756204, \"Memory in Mb\": 0.0043582916259765, \"Time in s\": 0.954455 }, { \"step\": 4530, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9271523178807948, \"F1\": 0.9076664801343034, \"Memory in Mb\": 0.0043582916259765, \"Time in s\": 1.4444780000000002 }, { \"step\": 5436, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9269683590875644, \"F1\": 0.907437631149452, \"Memory in Mb\": 0.0043582916259765, \"Time in s\": 2.035788 }, { \"step\": 6342, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9274676758120468, \"F1\": 0.9089108910891088, \"Memory in Mb\": 0.0043582916259765, \"Time in s\": 2.724631 }, { \"step\": 7248, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9253587196467992, \"F1\": 0.9064499394777796, \"Memory in Mb\": 0.0043582916259765, \"Time in s\": 3.51195 }, { \"step\": 8154, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9250674515575178, \"F1\": 0.9098687121994394, \"Memory in Mb\": 0.0043582916259765, \"Time in s\": 4.397951 }, { \"step\": 9060, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9264900662251656, \"F1\": 0.9133714880332986, \"Memory in Mb\": 0.0043582916259765, \"Time in s\": 5.380316 }, { \"step\": 9966, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9292594822396149, \"F1\": 0.9181279758448496, \"Memory in Mb\": 0.0043582916259765, \"Time in s\": 6.461929 }, { \"step\": 10872, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9312913907284768, \"F1\": 0.9216077237905342, \"Memory in Mb\": 0.0043582916259765, \"Time in s\": 7.640261 }, { \"step\": 11778, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9313126167430804, \"F1\": 0.9217525872908404, \"Memory in Mb\": 0.0043582916259765, \"Time in s\": 8.918386 }, { \"step\": 12684, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9289656259854936, \"F1\": 0.9190694332165634, \"Memory in Mb\": 0.0043582916259765, \"Time in s\": 10.293447 }, { \"step\": 13590, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9297277409860192, \"F1\": 0.9208978712830284, \"Memory in Mb\": 0.0043582916259765, \"Time in s\": 11.76578 }, { \"step\": 14496, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9304635761589404, \"F1\": 0.9221381121581956, \"Memory in Mb\": 0.0043582916259765, \"Time in s\": 13.33625 }, { \"step\": 15402, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9307882093234644, \"F1\": 0.922201138519924, \"Memory in Mb\": 0.0043582916259765, \"Time in s\": 15.00584 }, { \"step\": 16308, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9292985038018152, \"F1\": 0.9202572792032644, \"Memory in Mb\": 0.0043582916259765, \"Time in s\": 16.773224 }, { \"step\": 17214, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Elec2\", \"Accuracy\": 0.927907517137214, \"F1\": 0.9175579618680662, \"Memory in Mb\": 0.0043582916259765, \"Time in s\": 18.640941 }, { \"step\": 18120, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9265452538631348, \"F1\": 0.915945689927376, \"Memory in Mb\": 0.0043582916259765, \"Time in s\": 20.621169 }, { \"step\": 19026, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9265216020182908, \"F1\": 0.9150771473697, \"Memory in Mb\": 0.0043582916259765, \"Time in s\": 22.710024 }, { \"step\": 19932, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9262492474413004, \"F1\": 0.915526950925181, \"Memory in Mb\": 0.0043582916259765, \"Time in s\": 24.907693 }, { \"step\": 20838, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9231692100969384, \"F1\": 0.9122114382848056, \"Memory in Mb\": 0.0043582916259765, \"Time in s\": 27.214713 }, { \"step\": 21744, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9224613686534217, \"F1\": 0.910137511992325, \"Memory in Mb\": 0.0043582916259765, \"Time in s\": 29.631112 }, { \"step\": 22650, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9216777041942604, \"F1\": 0.9086978898610396, \"Memory in Mb\": 0.0043582916259765, \"Time in s\": 32.155455 }, { \"step\": 23556, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9186194600101883, \"F1\": 0.9050096625538872, \"Memory in Mb\": 0.0043582916259765, \"Time in s\": 34.791879 }, { \"step\": 24462, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9172594227781866, \"F1\": 0.9028324531925108, \"Memory in Mb\": 0.0043582916259765, \"Time in s\": 37.534909 }, { \"step\": 25368, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9144591611479028, \"F1\": 0.8997134670487106, \"Memory in Mb\": 0.0043582916259765, \"Time in s\": 40.38939 }, { \"step\": 26274, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9142117682880414, \"F1\": 0.899213020926489, \"Memory in Mb\": 0.0043582916259765, \"Time in s\": 43.350156 }, { \"step\": 27180, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9137969094922738, \"F1\": 0.8990477831875565, \"Memory in Mb\": 0.0043582916259765, \"Time in s\": 46.420679 }, { \"step\": 28086, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9109876806950082, \"F1\": 0.8954587271054613, \"Memory in Mb\": 0.0043582916259765, \"Time in s\": 49.601356 }, { \"step\": 28992, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9101131346578366, \"F1\": 0.8940478126524638, \"Memory in Mb\": 0.0043582916259765, \"Time in s\": 52.888972 }, { \"step\": 29898, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9094588266773698, \"F1\": 0.8931264558411306, \"Memory in Mb\": 0.0043582916259765, \"Time in s\": 56.282618 }, { \"step\": 30804, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9082911310219453, \"F1\": 0.8912666948924213, \"Memory in Mb\": 0.0043582916259765, \"Time in s\": 59.78184 }, { \"step\": 31710, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9061810154525386, \"F1\": 0.8888307611823175, \"Memory in Mb\": 0.0043582916259765, \"Time in s\": 63.389233 }, { \"step\": 32616, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9052612214863872, \"F1\": 0.8878891227051738, \"Memory in Mb\": 0.0043582916259765, \"Time in s\": 67.10073700000001 }, { \"step\": 33522, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9050176003818388, \"F1\": 0.887665819926616, \"Memory in Mb\": 0.0043582916259765, \"Time in s\": 70.91761400000001 }, { \"step\": 34428, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9050482165679098, \"F1\": 0.8877519486316657, \"Memory in Mb\": 0.0043582916259765, \"Time in s\": 74.83635300000002 }, { \"step\": 35334, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9045112356370636, \"F1\": 0.8865729846029718, \"Memory in Mb\": 0.0043582916259765, \"Time in s\": 78.86287600000001 }, { \"step\": 36240, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9047737306843268, \"F1\": 0.8860115606936415, \"Memory in Mb\": 0.0043582916259765, \"Time in s\": 82.99223100000002 }, { \"step\": 37146, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9044850051149518, \"F1\": 0.8853857087479002, \"Memory in Mb\": 0.0043582916259765, \"Time in s\": 87.22379500000002 }, { \"step\": 38052, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9042363082098182, \"F1\": 0.884573962622743, \"Memory in Mb\": 0.0043582916259765, \"Time in s\": 91.56013600000004 }, { \"step\": 38958, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9043842086349402, \"F1\": 0.8849918182098861, \"Memory in Mb\": 0.0043582916259765, \"Time in s\": 96.00151600000002 }, { \"step\": 39864, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Elec2\", \"Accuracy\": 0.904901665663255, \"F1\": 0.8863302449701658, \"Memory in Mb\": 0.0043582916259765, \"Time in s\": 100.54490000000004 }, { \"step\": 40770, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Elec2\", \"Accuracy\": 0.905494235957812, \"F1\": 0.8878344153008646, \"Memory in Mb\": 0.0043582916259765, \"Time in s\": 105.19249400000002 }, { \"step\": 41676, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9060850369517228, \"F1\": 0.8891971464160343, \"Memory in Mb\": 0.0043582916259765, \"Time in s\": 109.94207100000004 }, { \"step\": 42582, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9063688882626462, \"F1\": 0.8897796699195533, \"Memory in Mb\": 0.0043582916259765, \"Time in s\": 114.79624900000005 }, { \"step\": 43488, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Elec2\", \"Accuracy\": 0.906686902133922, \"F1\": 0.8902234485743656, \"Memory in Mb\": 0.0043582916259765, \"Time in s\": 119.75466700000004 }, { \"step\": 44394, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9062485921520926, \"F1\": 0.8894437656059077, \"Memory in Mb\": 0.0043582916259765, \"Time in s\": 124.81683700000002 }, { \"step\": 45300, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Elec2\", \"Accuracy\": 0.906401766004415, \"F1\": 0.8897555902236091, \"Memory in Mb\": 0.0043582916259765, \"Time in s\": 129.97946500000003 }, { \"step\": 25, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Phishing\", \"Accuracy\": 0.56, \"F1\": 0.5217391304347826, \"Memory in Mb\": 0.0043668746948242, \"Time in s\": 0.006962 }, { \"step\": 50, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Phishing\", \"Accuracy\": 0.7, \"F1\": 0.6341463414634146, \"Memory in Mb\": 0.0043668746948242, \"Time in s\": 0.020485 }, { \"step\": 75, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Phishing\", \"Accuracy\": 0.72, \"F1\": 0.6956521739130435, \"Memory in Mb\": 0.0043668746948242, \"Time in s\": 0.040554 }, { \"step\": 100, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Phishing\", \"Accuracy\": 0.73, \"F1\": 0.7157894736842104, \"Memory in Mb\": 0.0043668746948242, \"Time in s\": 0.063947 }, { \"step\": 125, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Phishing\", \"Accuracy\": 0.728, \"F1\": 0.7166666666666666, \"Memory in Mb\": 0.0043668746948242, \"Time in s\": 0.089789 }, { \"step\": 150, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Phishing\", \"Accuracy\": 0.72, \"F1\": 0.7272727272727273, \"Memory in Mb\": 0.0043668746948242, \"Time in s\": 0.118115 }, { \"step\": 175, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Phishing\", \"Accuracy\": 0.7371428571428571, \"F1\": 0.7261904761904763, \"Memory in Mb\": 0.0043668746948242, \"Time in s\": 0.148737 }, { \"step\": 200, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Phishing\", \"Accuracy\": 0.74, \"F1\": 0.7291666666666666, \"Memory in Mb\": 0.0043668746948242, \"Time in s\": 0.181823 }, { \"step\": 225, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Phishing\", \"Accuracy\": 0.7288888888888889, \"F1\": 0.7081339712918661, \"Memory in Mb\": 0.0043668746948242, \"Time in s\": 0.217433 }, { \"step\": 250, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Phishing\", \"Accuracy\": 0.728, \"F1\": 0.7094017094017095, \"Memory in Mb\": 0.0043668746948242, \"Time in s\": 0.255636 }, { \"step\": 275, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Phishing\", \"Accuracy\": 0.7381818181818182, \"F1\": 0.7187499999999999, \"Memory in Mb\": 0.0045804977416992, \"Time in s\": 0.296124 }, { \"step\": 300, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Phishing\", \"Accuracy\": 0.74, \"F1\": 0.717391304347826, \"Memory in Mb\": 0.0045804977416992, \"Time in s\": 0.339387 }, { \"step\": 325, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Phishing\", \"Accuracy\": 0.7507692307692307, \"F1\": 0.7216494845360825, \"Memory in Mb\": 0.0045804977416992, \"Time in s\": 0.3849 }, { \"step\": 350, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Phishing\", \"Accuracy\": 0.7571428571428571, \"F1\": 0.7266881028938907, \"Memory in Mb\": 0.0045804977416992, \"Time in s\": 0.432909 }, { \"step\": 375, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Phishing\", \"Accuracy\": 0.76, \"F1\": 0.7272727272727273, \"Memory in Mb\": 0.0045804977416992, \"Time in s\": 0.483666 }, { \"step\": 400, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Phishing\", \"Accuracy\": 0.7625, \"F1\": 0.7293447293447294, \"Memory in Mb\": 0.0045804977416992, \"Time in s\": 0.537154 }, { \"step\": 425, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Phishing\", \"Accuracy\": 0.7623529411764706, \"F1\": 0.7232876712328767, \"Memory in Mb\": 0.0045804977416992, \"Time in s\": 0.593599 }, { \"step\": 450, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Phishing\", \"Accuracy\": 0.7644444444444445, \"F1\": 0.7239583333333334, \"Memory in Mb\": 0.0045804977416992, \"Time in s\": 0.65326 }, { \"step\": 475, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Phishing\", \"Accuracy\": 0.7684210526315789, \"F1\": 0.7303921568627451, \"Memory in Mb\": 0.0045804977416992, \"Time in s\": 0.7162679999999999 }, { \"step\": 500, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Phishing\", \"Accuracy\": 0.77, \"F1\": 0.735632183908046, \"Memory in Mb\": 0.0045804977416992, \"Time in s\": 0.7828129999999999 }, { \"step\": 525, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Phishing\", \"Accuracy\": 0.7733333333333333, \"F1\": 0.7349665924276169, \"Memory in Mb\": 0.0045804977416992, \"Time in s\": 0.8526399999999998 }, { \"step\": 550, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Phishing\", \"Accuracy\": 0.7727272727272727, \"F1\": 0.7368421052631579, \"Memory in Mb\": 0.0045804977416992, \"Time in s\": 0.92582 }, { \"step\": 575, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Phishing\", \"Accuracy\": 0.7756521739130435, \"F1\": 0.7404426559356138, \"Memory in Mb\": 0.0045804977416992, \"Time in s\": 1.00229 }, { \"step\": 600, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Phishing\", \"Accuracy\": 0.7816666666666666, \"F1\": 0.7426326129666011, \"Memory in Mb\": 0.0045804977416992, \"Time in s\": 1.08201 }, { \"step\": 625, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Phishing\", \"Accuracy\": 0.776, \"F1\": 0.7338403041825096, \"Memory in Mb\": 0.0045804977416992, \"Time in s\": 1.165182 }, { \"step\": 650, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Phishing\", \"Accuracy\": 0.7830769230769231, \"F1\": 0.7450271247739602, \"Memory in Mb\": 0.0045804977416992, \"Time in s\": 1.251548 }, { \"step\": 675, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Phishing\", \"Accuracy\": 0.7851851851851852, \"F1\": 0.7521367521367521, \"Memory in Mb\": 0.0045804977416992, \"Time in s\": 1.34123 }, { \"step\": 700, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Phishing\", \"Accuracy\": 0.7914285714285715, \"F1\": 0.7566666666666668, \"Memory in Mb\": 0.0045804977416992, \"Time in s\": 1.434087 }, { \"step\": 725, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Phishing\", \"Accuracy\": 0.7931034482758621, \"F1\": 0.7626582278481012, \"Memory in Mb\": 0.0045804977416992, \"Time in s\": 1.530377 }, { \"step\": 750, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Phishing\", \"Accuracy\": 0.7933333333333333, \"F1\": 0.7640791476407914, \"Memory in Mb\": 0.0045804977416992, \"Time in s\": 1.6300339999999998 }, { \"step\": 775, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Phishing\", \"Accuracy\": 0.7935483870967742, \"F1\": 0.7633136094674556, \"Memory in Mb\": 0.0045804977416992, \"Time in s\": 1.7333069999999997 }, { \"step\": 800, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Phishing\", \"Accuracy\": 0.79625, \"F1\": 0.7687943262411348, \"Memory in Mb\": 0.0045804977416992, \"Time in s\": 1.840083 }, { \"step\": 825, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Phishing\", \"Accuracy\": 0.7951515151515152, \"F1\": 0.7688098495212038, \"Memory in Mb\": 0.0045804977416992, \"Time in s\": 1.950315 }, { \"step\": 850, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Phishing\", \"Accuracy\": 0.7988235294117647, \"F1\": 0.7723035952063916, \"Memory in Mb\": 0.0045804977416992, \"Time in s\": 2.063938 }, { \"step\": 875, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8034285714285714, \"F1\": 0.7760416666666667, \"Memory in Mb\": 0.0045804977416992, \"Time in s\": 2.180811 }, { \"step\": 900, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8022222222222222, \"F1\": 0.7752525252525253, \"Memory in Mb\": 0.0045804977416992, \"Time in s\": 2.30125 }, { \"step\": 925, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8064864864864865, \"F1\": 0.781973203410475, \"Memory in Mb\": 0.0045804977416992, \"Time in s\": 2.424984 }, { \"step\": 950, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8084210526315789, \"F1\": 0.7863849765258215, \"Memory in Mb\": 0.0045804977416992, \"Time in s\": 2.552135 }, { \"step\": 975, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8112820512820513, \"F1\": 0.7894736842105262, \"Memory in Mb\": 0.0045804977416992, \"Time in s\": 2.682703 }, { \"step\": 1000, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Phishing\", \"Accuracy\": 0.812, \"F1\": 0.7906458797327395, \"Memory in Mb\": 0.0045804977416992, \"Time in s\": 2.8167349999999995 }, { \"step\": 1025, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8156097560975609, \"F1\": 0.7956756756756757, \"Memory in Mb\": 0.0045804977416992, \"Time in s\": 2.95405 }, { \"step\": 1050, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8171428571428572, \"F1\": 0.7983193277310925, \"Memory in Mb\": 0.0045804977416992, \"Time in s\": 3.094733 }, { \"step\": 1075, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8167441860465117, \"F1\": 0.7995930824008138, \"Memory in Mb\": 0.0045804977416992, \"Time in s\": 3.238751 }, { \"step\": 1100, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Phishing\", \"Accuracy\": 0.82, \"F1\": 0.8035714285714286, \"Memory in Mb\": 0.0045804977416992, \"Time in s\": 3.3859479999999995 }, { \"step\": 1125, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8222222222222222, \"F1\": 0.8073217726396917, \"Memory in Mb\": 0.0045804977416992, \"Time in s\": 3.536398 }, { \"step\": 1150, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8234782608695652, \"F1\": 0.8083097261567517, \"Memory in Mb\": 0.0045804977416992, \"Time in s\": 3.690036 }, { \"step\": 1175, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8221276595744681, \"F1\": 0.8070175438596491, \"Memory in Mb\": 0.0045804977416992, \"Time in s\": 3.847107 }, { \"step\": 1200, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8241666666666667, \"F1\": 0.8087035358114234, \"Memory in Mb\": 0.0045804977416992, \"Time in s\": 4.007475 }, { \"step\": 1225, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8253061224489796, \"F1\": 0.8099467140319715, \"Memory in Mb\": 0.0045804977416992, \"Time in s\": 4.171155 }, { \"step\": 1250, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8264, \"F1\": 0.8117953165654813, \"Memory in Mb\": 0.0045804977416992, \"Time in s\": 4.338235999999999 }, { \"step\": 1903, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"SMTP\", \"Accuracy\": 0.720966894377299, \"F1\": 0.0, \"Memory in Mb\": 0.0030937194824218, \"Time in s\": 0.171046 }, { \"step\": 3806, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"SMTP\", \"Accuracy\": 0.7769311613242249, \"F1\": 0.0, \"Memory in Mb\": 0.0030937194824218, \"Time in s\": 0.510929 }, { \"step\": 5709, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"SMTP\", \"Accuracy\": 0.7509196006305833, \"F1\": 0.0, \"Memory in Mb\": 0.0030937194824218, \"Time in s\": 1.019887 }, { \"step\": 7612, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"SMTP\", \"Accuracy\": 0.7900683131897005, \"F1\": 0.0, \"Memory in Mb\": 0.0030937194824218, \"Time in s\": 1.685465 }, { \"step\": 9515, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"SMTP\", \"Accuracy\": 0.7826589595375723, \"F1\": 0.0, \"Memory in Mb\": 0.0030937194824218, \"Time in s\": 2.527149 }, { \"step\": 11418, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"SMTP\", \"Accuracy\": 0.7699246803293046, \"F1\": 0.0, \"Memory in Mb\": 0.0030937194824218, \"Time in s\": 3.545039 }, { \"step\": 13321, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"SMTP\", \"Accuracy\": 0.7722393213722694, \"F1\": 0.0, \"Memory in Mb\": 0.0030937194824218, \"Time in s\": 4.73657 }, { \"step\": 15224, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"SMTP\", \"Accuracy\": 0.7791644771413557, \"F1\": 0.0041469194312796, \"Memory in Mb\": 0.0030937194824218, \"Time in s\": 6.098207 }, { \"step\": 17127, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"SMTP\", \"Accuracy\": 0.783207800548841, \"F1\": 0.004824443848834, \"Memory in Mb\": 0.0030937194824218, \"Time in s\": 7.63166 }, { \"step\": 19030, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"SMTP\", \"Accuracy\": 0.7891224382553862, \"F1\": 0.0044653932026792, \"Memory in Mb\": 0.0030937194824218, \"Time in s\": 9.334478 }, { \"step\": 20933, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"SMTP\", \"Accuracy\": 0.7832131084889887, \"F1\": 0.0039508340649692, \"Memory in Mb\": 0.0030937194824218, \"Time in s\": 11.212632 }, { \"step\": 22836, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"SMTP\", \"Accuracy\": 0.7821422315641969, \"F1\": 0.0036050470658922, \"Memory in Mb\": 0.0030937194824218, \"Time in s\": 13.266531 }, { \"step\": 24739, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"SMTP\", \"Accuracy\": 0.7877440478596548, \"F1\": 0.0034162080091098, \"Memory in Mb\": 0.0030937194824218, \"Time in s\": 15.48839 }, { \"step\": 26642, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"SMTP\", \"Accuracy\": 0.78188574431349, \"F1\": 0.0034299434059338, \"Memory in Mb\": 0.0030937194824218, \"Time in s\": 17.885128 }, { \"step\": 28545, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"SMTP\", \"Accuracy\": 0.7857418111753371, \"F1\": 0.0032594524119947, \"Memory in Mb\": 0.0030937194824218, \"Time in s\": 20.449795 }, { \"step\": 30448, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"SMTP\", \"Accuracy\": 0.7871452968996322, \"F1\": 0.0030764497769573, \"Memory in Mb\": 0.0030937194824218, \"Time in s\": 23.184561 }, { \"step\": 32351, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"SMTP\", \"Accuracy\": 0.7866835646502427, \"F1\": 0.0028897558156335, \"Memory in Mb\": 0.0030937194824218, \"Time in s\": 26.090132000000004 }, { \"step\": 34254, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"SMTP\", \"Accuracy\": 0.7860979739592456, \"F1\": 0.002722199537226, \"Memory in Mb\": 0.0030937194824218, \"Time in s\": 29.168944000000003 }, { \"step\": 36157, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"SMTP\", \"Accuracy\": 0.7771939043615345, \"F1\": 0.0024764735017335, \"Memory in Mb\": 0.0030937194824218, \"Time in s\": 32.428274 }, { \"step\": 38060, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"SMTP\", \"Accuracy\": 0.7831581713084603, \"F1\": 0.0024175027196905, \"Memory in Mb\": 0.0030937194824218, \"Time in s\": 35.852824 }, { \"step\": 39963, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"SMTP\", \"Accuracy\": 0.779496033831294, \"F1\": 0.0022644927536231, \"Memory in Mb\": 0.0030937194824218, \"Time in s\": 39.452307 }, { \"step\": 41866, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"SMTP\", \"Accuracy\": 0.7831175655663307, \"F1\": 0.0021978021978021, \"Memory in Mb\": 0.0030937194824218, \"Time in s\": 43.218835 }, { \"step\": 43769, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"SMTP\", \"Accuracy\": 0.7791130708949257, \"F1\": 0.0020644095788604, \"Memory in Mb\": 0.0030937194824218, \"Time in s\": 47.160889 }, { \"step\": 45672, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"SMTP\", \"Accuracy\": 0.7808066211245402, \"F1\": 0.0019938191606021, \"Memory in Mb\": 0.0030937194824218, \"Time in s\": 51.272051 }, { \"step\": 47575, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"SMTP\", \"Accuracy\": 0.7799684708355229, \"F1\": 0.001906941266209, \"Memory in Mb\": 0.0030937194824218, \"Time in s\": 55.555832 }, { \"step\": 49478, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"SMTP\", \"Accuracy\": 0.7778810784591131, \"F1\": 0.0018165304268846, \"Memory in Mb\": 0.0030937194824218, \"Time in s\": 60.01382 }, { \"step\": 51381, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"SMTP\", \"Accuracy\": 0.7807944570950351, \"F1\": 0.0021263400372109, \"Memory in Mb\": 0.0030937194824218, \"Time in s\": 64.63894599999999 }, { \"step\": 53284, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"SMTP\", \"Accuracy\": 0.7777193904361535, \"F1\": 0.0020222446916076, \"Memory in Mb\": 0.0030937194824218, \"Time in s\": 69.43803199999999 }, { \"step\": 55187, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"SMTP\", \"Accuracy\": 0.7785891604906953, \"F1\": 0.0019603038470963, \"Memory in Mb\": 0.0030937194824218, \"Time in s\": 74.40731799999999 }, { \"step\": 57090, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"SMTP\", \"Accuracy\": 0.7758801891749869, \"F1\": 0.0026502455374542, \"Memory in Mb\": 0.0030937194824218, \"Time in s\": 79.551965 }, { \"step\": 58993, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"SMTP\", \"Accuracy\": 0.774159646059702, \"F1\": 0.0025454817698585, \"Memory in Mb\": 0.0030937194824218, \"Time in s\": 84.828086 }, { \"step\": 60896, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"SMTP\", \"Accuracy\": 0.7746157383079348, \"F1\": 0.0024711098190275, \"Memory in Mb\": 0.0030937194824218, \"Time in s\": 90.195459 }, { \"step\": 62799, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"SMTP\", \"Accuracy\": 0.7704899759550311, \"F1\": 0.0023534297778085, \"Memory in Mb\": 0.0030937194824218, \"Time in s\": 95.659582 }, { \"step\": 64702, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"SMTP\", \"Accuracy\": 0.771274458285679, \"F1\": 0.002292186341266, \"Memory in Mb\": 0.0030937194824218, \"Time in s\": 101.21574 }, { \"step\": 66605, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"SMTP\", \"Accuracy\": 0.7721942797087306, \"F1\": 0.0022358124547905, \"Memory in Mb\": 0.0030937194824218, \"Time in s\": 106.861577 }, { \"step\": 68508, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"SMTP\", \"Accuracy\": 0.7705085537455479, \"F1\": 0.0024111675126903, \"Memory in Mb\": 0.0030937194824218, \"Time in s\": 112.596175 }, { \"step\": 70411, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"SMTP\", \"Accuracy\": 0.7685872945988553, \"F1\": 0.0023267205486162, \"Memory in Mb\": 0.0030937194824218, \"Time in s\": 118.42066 }, { \"step\": 72314, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"SMTP\", \"Accuracy\": 0.7687999557485411, \"F1\": 0.0022677090171271, \"Memory in Mb\": 0.0030937194824218, \"Time in s\": 124.335619 }, { \"step\": 74217, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"SMTP\", \"Accuracy\": 0.7657140547313958, \"F1\": 0.0021806496040399, \"Memory in Mb\": 0.0030937194824218, \"Time in s\": 130.34374499999998 }, { \"step\": 76120, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"SMTP\", \"Accuracy\": 0.7665002627430373, \"F1\": 0.0021333932180552, \"Memory in Mb\": 0.0030937194824218, \"Time in s\": 136.44014199999998 }, { \"step\": 78023, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"SMTP\", \"Accuracy\": 0.7657101111210797, \"F1\": 0.0020744622775412, \"Memory in Mb\": 0.0030937194824218, \"Time in s\": 142.623664 }, { \"step\": 79926, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"SMTP\", \"Accuracy\": 0.7636313590070816, \"F1\": 0.0020073956682514, \"Memory in Mb\": 0.0030937194824218, \"Time in s\": 148.896051 }, { \"step\": 81829, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"SMTP\", \"Accuracy\": 0.7647777682728617, \"F1\": 0.0019703411801306, \"Memory in Mb\": 0.0030937194824218, \"Time in s\": 155.253168 }, { \"step\": 83732, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"SMTP\", \"Accuracy\": 0.7652868676252806, \"F1\": 0.0019298156518206, \"Memory in Mb\": 0.0030937194824218, \"Time in s\": 161.69571599999998 }, { \"step\": 85635, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"SMTP\", \"Accuracy\": 0.7642552694575816, \"F1\": 0.0018787699001285, \"Memory in Mb\": 0.0030937194824218, \"Time in s\": 168.224788 }, { \"step\": 87538, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"SMTP\", \"Accuracy\": 0.7644680024674998, \"F1\": 0.001839659178931, \"Memory in Mb\": 0.0030937194824218, \"Time in s\": 174.840187 }, { \"step\": 89441, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"SMTP\", \"Accuracy\": 0.7635312664214399, \"F1\": 0.0018876828692779, \"Memory in Mb\": 0.0030937194824218, \"Time in s\": 181.540983 }, { \"step\": 91344, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"SMTP\", \"Accuracy\": 0.7650091960063058, \"F1\": 0.0018600325505696, \"Memory in Mb\": 0.0030937194824218, \"Time in s\": 188.324265 }, { \"step\": 93247, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"SMTP\", \"Accuracy\": 0.7647859984771628, \"F1\": 0.001820415965048, \"Memory in Mb\": 0.0030937194824218, \"Time in s\": 195.191916 }, { \"step\": 95150, \"track\": \"Binary classification\", \"model\": \"ALMA\", \"dataset\": \"SMTP\", \"Accuracy\": 0.7649710982658959, \"F1\": 0.0017854751595768, \"Memory in Mb\": 0.0030937194824218, \"Time in s\": 202.143655 }, { \"step\": 106, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5377358490566038, \"F1\": 0.4731182795698925, \"Memory in Mb\": 0.0054683685302734, \"Time in s\": 0.075077 }, { \"step\": 212, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5424528301886793, \"F1\": 0.4699453551912568, \"Memory in Mb\": 0.0054683685302734, \"Time in s\": 0.21882 }, { \"step\": 318, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5377358490566038, \"F1\": 0.4878048780487805, \"Memory in Mb\": 0.0054950714111328, \"Time in s\": 0.428712 }, { \"step\": 424, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5212264150943396, \"F1\": 0.4671916010498687, \"Memory in Mb\": 0.0054950714111328, \"Time in s\": 0.704487 }, { \"step\": 530, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5283018867924528, \"F1\": 0.4266055045871559, \"Memory in Mb\": 0.0054950714111328, \"Time in s\": 1.047006 }, { \"step\": 636, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5251572327044025, \"F1\": 0.388663967611336, \"Memory in Mb\": 0.0054950714111328, \"Time in s\": 1.456112 }, { \"step\": 742, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5377358490566038, \"F1\": 0.3636363636363636, \"Memory in Mb\": 0.0054950714111328, \"Time in s\": 1.9325 }, { \"step\": 848, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5412735849056604, \"F1\": 0.3395585738539897, \"Memory in Mb\": 0.0054950714111328, \"Time in s\": 2.475946 }, { \"step\": 954, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5450733752620545, \"F1\": 0.3154574132492113, \"Memory in Mb\": 0.0054950714111328, \"Time in s\": 3.086975 }, { \"step\": 1060, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5528301886792453, \"F1\": 0.2967359050445103, \"Memory in Mb\": 0.0054950714111328, \"Time in s\": 3.764565 }, { \"step\": 1166, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5531732418524872, \"F1\": 0.2793914246196404, \"Memory in Mb\": 0.0054950714111328, \"Time in s\": 4.50753 }, { \"step\": 1272, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5550314465408805, \"F1\": 0.2762148337595907, \"Memory in Mb\": 0.0054950714111328, \"Time in s\": 5.315441 }, { \"step\": 1378, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5573294629898403, \"F1\": 0.261501210653753, \"Memory in Mb\": 0.0054950714111328, \"Time in s\": 6.188685 }, { \"step\": 1484, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5579514824797843, \"F1\": 0.2477064220183486, \"Memory in Mb\": 0.0054950714111328, \"Time in s\": 7.126671999999999 }, { \"step\": 1590, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5584905660377358, \"F1\": 0.2352941176470588, \"Memory in Mb\": 0.0054950714111328, \"Time in s\": 8.129404999999998 }, { \"step\": 1696, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5601415094339622, \"F1\": 0.2261410788381742, \"Memory in Mb\": 0.0054950714111328, \"Time in s\": 9.196331 }, { \"step\": 1802, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5571587125416204, \"F1\": 0.2161100196463654, \"Memory in Mb\": 0.0054950714111328, \"Time in s\": 10.327506 }, { \"step\": 1908, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5550314465408805, \"F1\": 0.2116991643454038, \"Memory in Mb\": 0.0054950714111328, \"Time in s\": 11.523138 }, { \"step\": 2014, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5501489572989077, \"F1\": 0.201058201058201, \"Memory in Mb\": 0.0054950714111328, \"Time in s\": 12.782968 }, { \"step\": 2120, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5471698113207547, \"F1\": 0.1946308724832214, \"Memory in Mb\": 0.0054950714111328, \"Time in s\": 14.106907 }, { \"step\": 2226, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Bananas\", \"Accuracy\": 0.550763701707098, \"F1\": 0.2038216560509554, \"Memory in Mb\": 0.0054950714111328, \"Time in s\": 15.494886 }, { \"step\": 2332, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5497427101200686, \"F1\": 0.2105263157894736, \"Memory in Mb\": 0.0054950714111328, \"Time in s\": 16.946832999999998 }, { \"step\": 2438, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5484003281378179, \"F1\": 0.2118826055833929, \"Memory in Mb\": 0.0054950714111328, \"Time in s\": 18.462691 }, { \"step\": 2544, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5487421383647799, \"F1\": 0.2264150943396226, \"Memory in Mb\": 0.0054950714111328, \"Time in s\": 20.042348 }, { \"step\": 2650, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5464150943396227, \"F1\": 0.2324393358876117, \"Memory in Mb\": 0.0054950714111328, \"Time in s\": 21.68579 }, { \"step\": 2756, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5399129172714079, \"F1\": 0.2305825242718446, \"Memory in Mb\": 0.0054950714111328, \"Time in s\": 23.392549 }, { \"step\": 2862, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Bananas\", \"Accuracy\": 0.539832285115304, \"F1\": 0.2311733800350262, \"Memory in Mb\": 0.0054950714111328, \"Time in s\": 25.163525000000003 }, { \"step\": 2968, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5414420485175202, \"F1\": 0.2306387789711701, \"Memory in Mb\": 0.0054950714111328, \"Time in s\": 26.998099000000003 }, { \"step\": 3074, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5400130123617437, \"F1\": 0.2273224043715847, \"Memory in Mb\": 0.0054950714111328, \"Time in s\": 28.897093000000005 }, { \"step\": 3180, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5433962264150943, \"F1\": 0.2284803400637619, \"Memory in Mb\": 0.0054950714111328, \"Time in s\": 30.860975000000003 }, { \"step\": 3286, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5447352404138771, \"F1\": 0.2248704663212435, \"Memory in Mb\": 0.0054950714111328, \"Time in s\": 32.894016 }, { \"step\": 3392, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5448113207547169, \"F1\": 0.2248995983935743, \"Memory in Mb\": 0.0054950714111328, \"Time in s\": 34.991593 }, { \"step\": 3498, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5465980560320183, \"F1\": 0.2202556538839724, \"Memory in Mb\": 0.0054950714111328, \"Time in s\": 37.153313 }, { \"step\": 3604, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5491120976692564, \"F1\": 0.216867469879518, \"Memory in Mb\": 0.0054950714111328, \"Time in s\": 39.379211000000005 }, { \"step\": 3710, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5482479784366577, \"F1\": 0.212406015037594, \"Memory in Mb\": 0.0054950714111328, \"Time in s\": 41.66962600000001 }, { \"step\": 3816, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5476939203354297, \"F1\": 0.2075298438934802, \"Memory in Mb\": 0.0054950714111328, \"Time in s\": 44.024291000000005 }, { \"step\": 3922, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5486996430392657, \"F1\": 0.2034203420342034, \"Memory in Mb\": 0.0054950714111328, \"Time in s\": 46.443344 }, { \"step\": 4028, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5491559086395233, \"F1\": 0.1992945326278659, \"Memory in Mb\": 0.0054950714111328, \"Time in s\": 48.926930000000006 }, { \"step\": 4134, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5493468795355588, \"F1\": 0.1952483801295896, \"Memory in Mb\": 0.0054950714111328, \"Time in s\": 51.474790000000006 }, { \"step\": 4240, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5485849056603773, \"F1\": 0.1910397295012679, \"Memory in Mb\": 0.0054950714111328, \"Time in s\": 54.086862 }, { \"step\": 4346, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5487804878048781, \"F1\": 0.1886636326023996, \"Memory in Mb\": 0.0054950714111328, \"Time in s\": 56.763353 }, { \"step\": 4452, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5509883198562444, \"F1\": 0.1936264622831787, \"Memory in Mb\": 0.0054950714111328, \"Time in s\": 59.503949000000006 }, { \"step\": 4558, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5484861781483107, \"F1\": 0.1935736677115987, \"Memory in Mb\": 0.0054950714111328, \"Time in s\": 62.30891200000001 }, { \"step\": 4664, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5493138936535163, \"F1\": 0.1977099236641221, \"Memory in Mb\": 0.0054950714111328, \"Time in s\": 65.176854 }, { \"step\": 4770, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Bananas\", \"Accuracy\": 0.550314465408805, \"F1\": 0.1999254009697874, \"Memory in Mb\": 0.0054950714111328, \"Time in s\": 68.08610900000001 }, { \"step\": 4876, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Bananas\", \"Accuracy\": 0.550656275635767, \"F1\": 0.2000730193501277, \"Memory in Mb\": 0.0054950714111328, \"Time in s\": 71.03653600000001 }, { \"step\": 4982, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5505820955439582, \"F1\": 0.2063098192130449, \"Memory in Mb\": 0.0054950714111328, \"Time in s\": 74.02805500000001 }, { \"step\": 5088, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5481525157232704, \"F1\": 0.2042229145032883, \"Memory in Mb\": 0.0054950714111328, \"Time in s\": 77.060803 }, { \"step\": 5194, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5463996919522526, \"F1\": 0.2024373730534867, \"Memory in Mb\": 0.0054950714111328, \"Time in s\": 80.134657 }, { \"step\": 5300, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5466037735849056, \"F1\": 0.2050942772080714, \"Memory in Mb\": 0.0054950714111328, \"Time in s\": 83.249657 }, { \"step\": 906, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Elec2\", \"Accuracy\": 0.7991169977924945, \"F1\": 0.7853773584905659, \"Memory in Mb\": 0.0066728591918945, \"Time in s\": 0.581912 }, { \"step\": 1812, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8134657836644592, \"F1\": 0.7492581602373888, \"Memory in Mb\": 0.0066728591918945, \"Time in s\": 1.751174 }, { \"step\": 2718, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8002207505518764, \"F1\": 0.7256189994946943, \"Memory in Mb\": 0.0066728591918945, \"Time in s\": 3.503934 }, { \"step\": 3624, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8187086092715232, \"F1\": 0.7581891792418107, \"Memory in Mb\": 0.0066728591918945, \"Time in s\": 5.845535 }, { \"step\": 4530, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8275938189845474, \"F1\": 0.7584287039901021, \"Memory in Mb\": 0.0066728591918945, \"Time in s\": 8.772576 }, { \"step\": 5436, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8210080941869021, \"F1\": 0.7495495495495494, \"Memory in Mb\": 0.0066728591918945, \"Time in s\": 12.203207 }, { \"step\": 6342, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8221381267738883, \"F1\": 0.7573149741824441, \"Memory in Mb\": 0.0066728591918945, \"Time in s\": 16.003328 }, { \"step\": 7248, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8251931567328918, \"F1\": 0.7596281540504647, \"Memory in Mb\": 0.0066728591918945, \"Time in s\": 20.176524 }, { \"step\": 8154, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8302673534461614, \"F1\": 0.7805960684844642, \"Memory in Mb\": 0.0066728591918945, \"Time in s\": 24.718011 }, { \"step\": 9060, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8363134657836645, \"F1\": 0.7957018873123021, \"Memory in Mb\": 0.0066728591918945, \"Time in s\": 29.627709 }, { \"step\": 9966, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8370459562512542, \"F1\": 0.8009803921568629, \"Memory in Mb\": 0.0066728591918945, \"Time in s\": 34.907269 }, { \"step\": 10872, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8392200147167035, \"F1\": 0.8078276165347406, \"Memory in Mb\": 0.0066728591918945, \"Time in s\": 40.555269 }, { \"step\": 11778, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8422482594668025, \"F1\": 0.8113705583756344, \"Memory in Mb\": 0.0066728591918945, \"Time in s\": 46.57328199999999 }, { \"step\": 12684, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8409019236833807, \"F1\": 0.8104096204434422, \"Memory in Mb\": 0.0066728591918945, \"Time in s\": 52.95826299999999 }, { \"step\": 13590, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8427520235467255, \"F1\": 0.8153779697624189, \"Memory in Mb\": 0.0066728591918945, \"Time in s\": 59.70527599999999 }, { \"step\": 14496, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8438189845474614, \"F1\": 0.8177427145387216, \"Memory in Mb\": 0.0066728591918945, \"Time in s\": 66.808032 }, { \"step\": 15402, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Elec2\", \"Accuracy\": 0.845214907154915, \"F1\": 0.8184310738766185, \"Memory in Mb\": 0.0066728591918945, \"Time in s\": 74.254711 }, { \"step\": 16308, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8397105714986509, \"F1\": 0.8107990735379271, \"Memory in Mb\": 0.0066728591918945, \"Time in s\": 82.035365 }, { \"step\": 17214, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8384454513767864, \"F1\": 0.8052930056710774, \"Memory in Mb\": 0.0066728591918945, \"Time in s\": 90.148338 }, { \"step\": 18120, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Elec2\", \"Accuracy\": 0.840728476821192, \"F1\": 0.8082392026578072, \"Memory in Mb\": 0.0066728591918945, \"Time in s\": 98.575089 }, { \"step\": 19026, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Elec2\", \"Accuracy\": 0.843950383685483, \"F1\": 0.8100083189351762, \"Memory in Mb\": 0.0066728591918945, \"Time in s\": 107.31510699999998 }, { \"step\": 19932, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8412101143889223, \"F1\": 0.8075402858011553, \"Memory in Mb\": 0.0066728591918945, \"Time in s\": 116.36858499999998 }, { \"step\": 20838, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8373164411171897, \"F1\": 0.8027923211169286, \"Memory in Mb\": 0.0066728591918945, \"Time in s\": 125.73531899999998 }, { \"step\": 21744, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8382082413539367, \"F1\": 0.8007250481477285, \"Memory in Mb\": 0.0066728591918945, \"Time in s\": 135.41517199999998 }, { \"step\": 22650, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8376158940397351, \"F1\": 0.7981560750740864, \"Memory in Mb\": 0.0066728591918945, \"Time in s\": 145.412659 }, { \"step\": 23556, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8337154015961963, \"F1\": 0.7923888270525256, \"Memory in Mb\": 0.0066728591918945, \"Time in s\": 155.725043 }, { \"step\": 24462, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8312893467418854, \"F1\": 0.7886732551589942, \"Memory in Mb\": 0.0066728591918945, \"Time in s\": 166.35011 }, { \"step\": 25368, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8278145695364238, \"F1\": 0.7841897233201582, \"Memory in Mb\": 0.0066728591918945, \"Time in s\": 177.288636 }, { \"step\": 26274, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8282332343761893, \"F1\": 0.7844073950222137, \"Memory in Mb\": 0.0066728591918945, \"Time in s\": 188.539768 }, { \"step\": 27180, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Elec2\", \"Accuracy\": 0.828513612950699, \"F1\": 0.7853557448768134, \"Memory in Mb\": 0.0066728591918945, \"Time in s\": 200.1032 }, { \"step\": 28086, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8253222245958841, \"F1\": 0.7808451710890736, \"Memory in Mb\": 0.0066728591918945, \"Time in s\": 211.980006 }, { \"step\": 28992, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8246067880794702, \"F1\": 0.7784410265347915, \"Memory in Mb\": 0.0066728591918945, \"Time in s\": 224.170025 }, { \"step\": 29898, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Elec2\", \"Accuracy\": 0.822830958592548, \"F1\": 0.7765638840848695, \"Memory in Mb\": 0.0066728591918945, \"Time in s\": 236.672116 }, { \"step\": 30804, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8227178288533956, \"F1\": 0.775479998355466, \"Memory in Mb\": 0.0066728591918945, \"Time in s\": 249.486866 }, { \"step\": 31710, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8179754020813623, \"F1\": 0.768545994065282, \"Memory in Mb\": 0.0066728591918945, \"Time in s\": 262.615336 }, { \"step\": 32616, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8155506499877361, \"F1\": 0.7653116954045408, \"Memory in Mb\": 0.0066728591918945, \"Time in s\": 276.05616200000003 }, { \"step\": 33522, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Elec2\", \"Accuracy\": 0.813614939442754, \"F1\": 0.7635840774935674, \"Memory in Mb\": 0.0066728591918945, \"Time in s\": 289.80868200000003 }, { \"step\": 34428, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8107935401417451, \"F1\": 0.7596842027595366, \"Memory in Mb\": 0.0066728591918945, \"Time in s\": 303.873891 }, { \"step\": 35334, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8109752646176487, \"F1\": 0.758173720989174, \"Memory in Mb\": 0.0066728591918945, \"Time in s\": 318.256576 }, { \"step\": 36240, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8122792494481236, \"F1\": 0.7584590804189597, \"Memory in Mb\": 0.0066728591918945, \"Time in s\": 332.952142 }, { \"step\": 37146, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8118774565229095, \"F1\": 0.7566852367688023, \"Memory in Mb\": 0.0066728591918945, \"Time in s\": 347.962415 }, { \"step\": 38052, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Elec2\", \"Accuracy\": 0.811783874697782, \"F1\": 0.7561623314721503, \"Memory in Mb\": 0.0066728591918945, \"Time in s\": 363.28641 }, { \"step\": 38958, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8127983982750655, \"F1\": 0.7582617919055984, \"Memory in Mb\": 0.0066728591918945, \"Time in s\": 378.92486 }, { \"step\": 39864, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8135661248244029, \"F1\": 0.7615350060963869, \"Memory in Mb\": 0.0066728591918945, \"Time in s\": 394.877767 }, { \"step\": 40770, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8153544272749571, \"F1\": 0.7661821344266367, \"Memory in Mb\": 0.0066728591918945, \"Time in s\": 411.1448869999999 }, { \"step\": 41676, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8169450043190325, \"F1\": 0.7701762313601446, \"Memory in Mb\": 0.0066728591918945, \"Time in s\": 427.723092 }, { \"step\": 42582, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8179747311070406, \"F1\": 0.7719824669784955, \"Memory in Mb\": 0.0066728591918945, \"Time in s\": 444.613374 }, { \"step\": 43488, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8185476453274466, \"F1\": 0.7730057820096079, \"Memory in Mb\": 0.0066728591918945, \"Time in s\": 461.816869 }, { \"step\": 44394, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8179258458350227, \"F1\": 0.77092248830948, \"Memory in Mb\": 0.0066728591918945, \"Time in s\": 479.334408 }, { \"step\": 45300, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8190507726269316, \"F1\": 0.7728544905367584, \"Memory in Mb\": 0.0066728591918945, \"Time in s\": 497.166197 }, { \"step\": 25, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Phishing\", \"Accuracy\": 0.64, \"F1\": 0.6666666666666666, \"Memory in Mb\": 0.0066585540771484, \"Time in s\": 0.023761 }, { \"step\": 50, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Phishing\", \"Accuracy\": 0.78, \"F1\": 0.7659574468085107, \"Memory in Mb\": 0.0066585540771484, \"Time in s\": 0.064622 }, { \"step\": 75, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8133333333333334, \"F1\": 0.8108108108108109, \"Memory in Mb\": 0.0066585540771484, \"Time in s\": 0.122385 }, { \"step\": 100, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Phishing\", \"Accuracy\": 0.82, \"F1\": 0.8125, \"Memory in Mb\": 0.0066585540771484, \"Time in s\": 0.197128 }, { \"step\": 125, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Phishing\", \"Accuracy\": 0.808, \"F1\": 0.8, \"Memory in Mb\": 0.0066585540771484, \"Time in s\": 0.288843 }, { \"step\": 150, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8133333333333334, \"F1\": 0.8133333333333335, \"Memory in Mb\": 0.0066585540771484, \"Time in s\": 0.397551 }, { \"step\": 175, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8228571428571428, \"F1\": 0.812121212121212, \"Memory in Mb\": 0.0066585540771484, \"Time in s\": 0.523492 }, { \"step\": 200, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Phishing\", \"Accuracy\": 0.82, \"F1\": 0.8085106382978724, \"Memory in Mb\": 0.0066585540771484, \"Time in s\": 0.6664519999999999 }, { \"step\": 225, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8177777777777778, \"F1\": 0.8019323671497586, \"Memory in Mb\": 0.0066585540771484, \"Time in s\": 0.8264199999999999 }, { \"step\": 250, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Phishing\", \"Accuracy\": 0.82, \"F1\": 0.8068669527896996, \"Memory in Mb\": 0.0066585540771484, \"Time in s\": 1.003478 }, { \"step\": 275, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8218181818181818, \"F1\": 0.8078431372549019, \"Memory in Mb\": 0.0068721771240234, \"Time in s\": 1.197523 }, { \"step\": 300, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8333333333333334, \"F1\": 0.8161764705882353, \"Memory in Mb\": 0.0068721771240234, \"Time in s\": 1.4085949999999998 }, { \"step\": 325, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8430769230769231, \"F1\": 0.8222996515679442, \"Memory in Mb\": 0.0068721771240234, \"Time in s\": 1.6370319999999998 }, { \"step\": 350, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8485714285714285, \"F1\": 0.8262295081967213, \"Memory in Mb\": 0.0068721771240234, \"Time in s\": 1.88249 }, { \"step\": 375, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Phishing\", \"Accuracy\": 0.848, \"F1\": 0.8246153846153846, \"Memory in Mb\": 0.0068721771240234, \"Time in s\": 2.144992 }, { \"step\": 400, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Phishing\", \"Accuracy\": 0.85, \"F1\": 0.8245614035087719, \"Memory in Mb\": 0.0068721771240234, \"Time in s\": 2.424587 }, { \"step\": 425, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8541176470588235, \"F1\": 0.8258426966292134, \"Memory in Mb\": 0.0068721771240234, \"Time in s\": 2.721266 }, { \"step\": 450, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8577777777777778, \"F1\": 0.8279569892473118, \"Memory in Mb\": 0.0068721771240234, \"Time in s\": 3.035051 }, { \"step\": 475, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8568421052631578, \"F1\": 0.8282828282828283, \"Memory in Mb\": 0.0068721771240234, \"Time in s\": 3.366223 }, { \"step\": 500, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Phishing\", \"Accuracy\": 0.856, \"F1\": 0.8309859154929577, \"Memory in Mb\": 0.0068721771240234, \"Time in s\": 3.714529 }, { \"step\": 525, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8552380952380952, \"F1\": 0.8264840182648402, \"Memory in Mb\": 0.0068721771240234, \"Time in s\": 4.0798760000000005 }, { \"step\": 550, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Phishing\", \"Accuracy\": 0.86, \"F1\": 0.8336933045356371, \"Memory in Mb\": 0.0068721771240234, \"Time in s\": 4.4622850000000005 }, { \"step\": 575, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8608695652173913, \"F1\": 0.8347107438016529, \"Memory in Mb\": 0.0068721771240234, \"Time in s\": 4.8618310000000005 }, { \"step\": 600, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Phishing\", \"Accuracy\": 0.865, \"F1\": 0.8370221327967807, \"Memory in Mb\": 0.0068721771240234, \"Time in s\": 5.278527 }, { \"step\": 625, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8656, \"F1\": 0.8346456692913387, \"Memory in Mb\": 0.0068721771240234, \"Time in s\": 5.712664 }, { \"step\": 650, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8692307692307693, \"F1\": 0.8417132216014899, \"Memory in Mb\": 0.0068721771240234, \"Time in s\": 6.163824 }, { \"step\": 675, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8711111111111111, \"F1\": 0.8471001757469244, \"Memory in Mb\": 0.0068721771240234, \"Time in s\": 6.632034 }, { \"step\": 700, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8757142857142857, \"F1\": 0.8507718696397941, \"Memory in Mb\": 0.0068721771240234, \"Time in s\": 7.117376 }, { \"step\": 725, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8772413793103448, \"F1\": 0.8552845528455284, \"Memory in Mb\": 0.0068721771240234, \"Time in s\": 7.619913 }, { \"step\": 750, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8786666666666667, \"F1\": 0.8575899843505477, \"Memory in Mb\": 0.0068721771240234, \"Time in s\": 8.139518 }, { \"step\": 775, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Phishing\", \"Accuracy\": 0.88, \"F1\": 0.8584474885844748, \"Memory in Mb\": 0.0068721771240234, \"Time in s\": 8.676567 }, { \"step\": 800, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Phishing\", \"Accuracy\": 0.88, \"F1\": 0.8600583090379008, \"Memory in Mb\": 0.0068721771240234, \"Time in s\": 9.230727 }, { \"step\": 825, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Phishing\", \"Accuracy\": 0.88, \"F1\": 0.8611500701262274, \"Memory in Mb\": 0.0068721771240234, \"Time in s\": 9.801975 }, { \"step\": 850, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8811764705882353, \"F1\": 0.8618331053351573, \"Memory in Mb\": 0.0068721771240234, \"Time in s\": 10.390364 }, { \"step\": 875, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8845714285714286, \"F1\": 0.8651535380507342, \"Memory in Mb\": 0.0068721771240234, \"Time in s\": 10.99587 }, { \"step\": 900, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8833333333333333, \"F1\": 0.8634590377113134, \"Memory in Mb\": 0.0068721771240234, \"Time in s\": 11.618539 }, { \"step\": 925, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8854054054054054, \"F1\": 0.8671679197994987, \"Memory in Mb\": 0.0068721771240234, \"Time in s\": 12.258516 }, { \"step\": 950, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8852631578947369, \"F1\": 0.8685162846803377, \"Memory in Mb\": 0.0068721771240234, \"Time in s\": 12.91558 }, { \"step\": 975, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8861538461538462, \"F1\": 0.8692579505300353, \"Memory in Mb\": 0.0068721771240234, \"Time in s\": 13.589863 }, { \"step\": 1000, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Phishing\", \"Accuracy\": 0.887, \"F1\": 0.8702640642939151, \"Memory in Mb\": 0.0068721771240234, \"Time in s\": 14.281225 }, { \"step\": 1025, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8868292682926829, \"F1\": 0.8705357142857143, \"Memory in Mb\": 0.0068721771240234, \"Time in s\": 14.989779 }, { \"step\": 1050, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8885714285714286, \"F1\": 0.8729641693811075, \"Memory in Mb\": 0.0068721771240234, \"Time in s\": 15.714934 }, { \"step\": 1075, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8874418604651163, \"F1\": 0.8727655099894849, \"Memory in Mb\": 0.0068721771240234, \"Time in s\": 16.456921 }, { \"step\": 1100, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Phishing\", \"Accuracy\": 0.889090909090909, \"F1\": 0.8747433264887063, \"Memory in Mb\": 0.0068721771240234, \"Time in s\": 17.215539 }, { \"step\": 1125, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8906666666666667, \"F1\": 0.8776119402985074, \"Memory in Mb\": 0.0068721771240234, \"Time in s\": 17.990769 }, { \"step\": 1150, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8904347826086957, \"F1\": 0.8771929824561404, \"Memory in Mb\": 0.0068721771240234, \"Time in s\": 18.782632 }, { \"step\": 1175, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8893617021276595, \"F1\": 0.875717017208413, \"Memory in Mb\": 0.0068721771240234, \"Time in s\": 19.591204 }, { \"step\": 1200, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Phishing\", \"Accuracy\": 0.89, \"F1\": 0.8761726078799249, \"Memory in Mb\": 0.0068721771240234, \"Time in s\": 20.416515 }, { \"step\": 1225, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8906122448979592, \"F1\": 0.8768382352941176, \"Memory in Mb\": 0.0068721771240234, \"Time in s\": 21.258769 }, { \"step\": 1250, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8888, \"F1\": 0.8753363228699551, \"Memory in Mb\": 0.0068721771240234, \"Time in s\": 22.117832 }, { \"step\": 1903, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"SMTP\", \"Accuracy\": 0.998949027850762, \"F1\": 0.0, \"Memory in Mb\": 0.0056447982788085, \"Time in s\": 1.261861 }, { \"step\": 3806, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999474513925381, \"F1\": 0.0, \"Memory in Mb\": 0.0056447982788085, \"Time in s\": 3.784872 }, { \"step\": 5709, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999649675950254, \"F1\": 0.0, \"Memory in Mb\": 0.0056447982788085, \"Time in s\": 7.562933 }, { \"step\": 7612, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997372569626904, \"F1\": 0.0, \"Memory in Mb\": 0.0056447982788085, \"Time in s\": 12.592241 }, { \"step\": 9515, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997898055701524, \"F1\": 0.0, \"Memory in Mb\": 0.0056447982788085, \"Time in s\": 18.508988 }, { \"step\": 11418, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999824837975127, \"F1\": 0.0, \"Memory in Mb\": 0.0056447982788085, \"Time in s\": 25.182225000000003 }, { \"step\": 13321, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998498611215374, \"F1\": 0.0, \"Memory in Mb\": 0.0056447982788085, \"Time in s\": 32.571041 }, { \"step\": 15224, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9995401996847084, \"F1\": 0.631578947368421, \"Memory in Mb\": 0.0056447982788085, \"Time in s\": 40.604957 }, { \"step\": 17127, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9995912886086296, \"F1\": 0.6956521739130435, \"Memory in Mb\": 0.0056447982788085, \"Time in s\": 49.282818 }, { \"step\": 19030, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996321597477666, \"F1\": 0.6956521739130435, \"Memory in Mb\": 0.0056447982788085, \"Time in s\": 58.610725 }, { \"step\": 20933, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999665599770697, \"F1\": 0.6956521739130435, \"Memory in Mb\": 0.0056447982788085, \"Time in s\": 68.59192900000001 }, { \"step\": 22836, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996934664564724, \"F1\": 0.6956521739130435, \"Memory in Mb\": 0.0056447982788085, \"Time in s\": 79.219372 }, { \"step\": 24739, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997170459598204, \"F1\": 0.6956521739130435, \"Memory in Mb\": 0.0056447982788085, \"Time in s\": 90.492903 }, { \"step\": 26642, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996997222430748, \"F1\": 0.6666666666666666, \"Memory in Mb\": 0.0056447982788085, \"Time in s\": 102.41066 }, { \"step\": 28545, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997197407602032, \"F1\": 0.6666666666666666, \"Memory in Mb\": 0.0056447982788085, \"Time in s\": 114.976109 }, { \"step\": 30448, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997372569626904, \"F1\": 0.6666666666666666, \"Memory in Mb\": 0.0056447982788085, \"Time in s\": 128.185954 }, { \"step\": 32351, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997527124354734, \"F1\": 0.6666666666666666, \"Memory in Mb\": 0.0056447982788085, \"Time in s\": 142.03946100000002 }, { \"step\": 34254, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997664506335028, \"F1\": 0.6666666666666666, \"Memory in Mb\": 0.0056447982788085, \"Time in s\": 156.53684400000003 }, { \"step\": 36157, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997787427054236, \"F1\": 0.6666666666666666, \"Memory in Mb\": 0.0056447982788085, \"Time in s\": 171.68202700000003 }, { \"step\": 38060, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997898055701524, \"F1\": 0.6666666666666666, \"Memory in Mb\": 0.0056447982788085, \"Time in s\": 187.47483000000005 }, { \"step\": 39963, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997998148287166, \"F1\": 0.6666666666666666, \"Memory in Mb\": 0.0056447982788085, \"Time in s\": 203.911748 }, { \"step\": 41866, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999808914154684, \"F1\": 0.6666666666666666, \"Memory in Mb\": 0.0056447982788085, \"Time in s\": 220.99296 }, { \"step\": 43769, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998172222349152, \"F1\": 0.6666666666666666, \"Memory in Mb\": 0.0056447982788085, \"Time in s\": 238.718792 }, { \"step\": 45672, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999824837975127, \"F1\": 0.6666666666666666, \"Memory in Mb\": 0.0056447982788085, \"Time in s\": 257.088535 }, { \"step\": 47575, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999831844456122, \"F1\": 0.6666666666666666, \"Memory in Mb\": 0.0056447982788085, \"Time in s\": 276.101869 }, { \"step\": 49478, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998383119770404, \"F1\": 0.6666666666666666, \"Memory in Mb\": 0.0056447982788085, \"Time in s\": 295.75838 }, { \"step\": 51381, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998053755279188, \"F1\": 0.6153846153846154, \"Memory in Mb\": 0.0056447982788085, \"Time in s\": 316.063483 }, { \"step\": 53284, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998123264019216, \"F1\": 0.6153846153846154, \"Memory in Mb\": 0.0056447982788085, \"Time in s\": 337.01301299999994 }, { \"step\": 55187, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998187979053038, \"F1\": 0.6153846153846154, \"Memory in Mb\": 0.0056447982788085, \"Time in s\": 358.6068609999999 }, { \"step\": 57090, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996671921527412, \"F1\": 0.4571428571428571, \"Memory in Mb\": 0.0056447982788085, \"Time in s\": 380.844451 }, { \"step\": 58993, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996779278897496, \"F1\": 0.4571428571428571, \"Memory in Mb\": 0.0056447982788085, \"Time in s\": 403.726308 }, { \"step\": 60896, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999687992643195, \"F1\": 0.4571428571428571, \"Memory in Mb\": 0.0056447982788085, \"Time in s\": 427.25218 }, { \"step\": 62799, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999665599770697, \"F1\": 0.4324324324324324, \"Memory in Mb\": 0.0056447982788085, \"Time in s\": 451.4221749999999 }, { \"step\": 64702, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996754350715588, \"F1\": 0.4324324324324324, \"Memory in Mb\": 0.0056447982788085, \"Time in s\": 476.235657 }, { \"step\": 66605, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996847083552286, \"F1\": 0.4324324324324324, \"Memory in Mb\": 0.0056447982788085, \"Time in s\": 501.6931359999999 }, { \"step\": 68508, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"SMTP\", \"Accuracy\": 0.99966427278566, \"F1\": 0.4102564102564103, \"Memory in Mb\": 0.0056447982788085, \"Time in s\": 527.7941669999999 }, { \"step\": 70411, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996733464941556, \"F1\": 0.4102564102564103, \"Memory in Mb\": 0.0056447982788085, \"Time in s\": 554.5388179999999 }, { \"step\": 72314, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996819426390464, \"F1\": 0.4102564102564103, \"Memory in Mb\": 0.0056447982788085, \"Time in s\": 581.9269989999999 }, { \"step\": 74217, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999690097955994, \"F1\": 0.4102564102564103, \"Memory in Mb\": 0.0056447982788085, \"Time in s\": 609.9586109999999 }, { \"step\": 76120, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999697845507094, \"F1\": 0.4102564102564103, \"Memory in Mb\": 0.0056447982788085, \"Time in s\": 638.633216 }, { \"step\": 78023, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997052151288722, \"F1\": 0.4102564102564103, \"Memory in Mb\": 0.0056447982788085, \"Time in s\": 667.951428 }, { \"step\": 79926, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"SMTP\", \"Accuracy\": 0.99971223381628, \"F1\": 0.4102564102564103, \"Memory in Mb\": 0.0056447982788085, \"Time in s\": 697.9132639999999 }, { \"step\": 81829, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997189260531107, \"F1\": 0.4102564102564103, \"Memory in Mb\": 0.0056447982788085, \"Time in s\": 728.5228309999999 }, { \"step\": 83732, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997253140973582, \"F1\": 0.4102564102564103, \"Memory in Mb\": 0.0056447982788085, \"Time in s\": 759.7753099999999 }, { \"step\": 85635, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999731418228528, \"F1\": 0.4102564102564103, \"Memory in Mb\": 0.0056447982788085, \"Time in s\": 791.6709739999999 }, { \"step\": 87538, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997372569626904, \"F1\": 0.4102564102564103, \"Memory in Mb\": 0.0056447982788085, \"Time in s\": 824.2098649999999 }, { \"step\": 89441, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997316666853008, \"F1\": 0.4, \"Memory in Mb\": 0.0056447982788085, \"Time in s\": 857.3917109999999 }, { \"step\": 91344, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997372569626904, \"F1\": 0.4, \"Memory in Mb\": 0.0056447982788085, \"Time in s\": 891.2164919999999 }, { \"step\": 93247, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997426190654928, \"F1\": 0.4, \"Memory in Mb\": 0.0056447982788085, \"Time in s\": 925.683851 }, { \"step\": 95150, \"track\": \"Binary classification\", \"model\": \"sklearn SGDClassifier\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997477666841827, \"F1\": 0.4, \"Memory in Mb\": 0.0056447982788085, \"Time in s\": 960.794209 }, { \"step\": 106, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5, \"F1\": 0.0, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 0.01481 }, { \"step\": 212, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5283018867924528, \"F1\": 0.0, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 0.041137 }, { \"step\": 318, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5314465408805031, \"F1\": 0.0, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 0.078177 }, { \"step\": 424, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5400943396226415, \"F1\": 0.0, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 0.125909 }, { \"step\": 530, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5547169811320755, \"F1\": 0.0, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 0.184447 }, { \"step\": 636, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5550314465408805, \"F1\": 0.0, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 0.253805 }, { \"step\": 742, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5660377358490566, \"F1\": 0.0, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 0.333961 }, { \"step\": 848, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5636792452830188, \"F1\": 0.0, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 0.425093 }, { \"step\": 954, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5649895178197065, \"F1\": 0.0, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 0.527744 }, { \"step\": 1060, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5707547169811321, \"F1\": 0.0, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 0.641554 }, { \"step\": 1166, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5686106346483705, \"F1\": 0.0, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 0.7661 }, { \"step\": 1272, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5644654088050315, \"F1\": 0.0, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 0.90156 }, { \"step\": 1378, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5682148040638607, \"F1\": 0.0, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 1.047827 }, { \"step\": 1484, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5680592991913747, \"F1\": 0.0, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 1.204662 }, { \"step\": 1590, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5679245283018868, \"F1\": 0.0, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 1.3725140000000002 }, { \"step\": 1696, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5683962264150944, \"F1\": 0.0, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 1.5511630000000003 }, { \"step\": 1802, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5643729189789123, \"F1\": 0.0, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 1.7405230000000005 }, { \"step\": 1908, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.560272536687631, \"F1\": 0.0, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 1.940796 }, { \"step\": 2014, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5551142005958292, \"F1\": 0.0, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 2.1520810000000004 }, { \"step\": 2120, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5509433962264151, \"F1\": 0.0, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 2.3743700000000003 }, { \"step\": 2226, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5512129380053908, \"F1\": 0.0, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 2.607588 }, { \"step\": 2332, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5506003430531733, \"F1\": 0.0, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 2.8515750000000004 }, { \"step\": 2438, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.551681706316653, \"F1\": 0.0, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 3.1063150000000004 }, { \"step\": 2544, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5487421383647799, \"F1\": 0.0, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 3.3719430000000004 }, { \"step\": 2650, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5467924528301886, \"F1\": 0.0, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 3.648299 }, { \"step\": 2756, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5471698113207547, \"F1\": 0.0, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 3.935475 }, { \"step\": 2862, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5489168413696716, \"F1\": 0.0, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 4.233696 }, { \"step\": 2968, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5505390835579514, \"F1\": 0.0, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 4.542693 }, { \"step\": 3074, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5487963565387117, \"F1\": 0.0, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 4.86295 }, { \"step\": 3180, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5509433962264151, \"F1\": 0.0, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 5.194204 }, { \"step\": 3286, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5517346317711503, \"F1\": 0.0, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 5.536035 }, { \"step\": 3392, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5498231132075472, \"F1\": 0.0, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 5.888475 }, { \"step\": 3498, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5514579759862779, \"F1\": 0.0, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 6.251513999999999 }, { \"step\": 3604, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5535516093229744, \"F1\": 0.0, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 6.625144999999999 }, { \"step\": 3710, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5522911051212938, \"F1\": 0.0, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 7.009503999999999 }, { \"step\": 3816, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5516247379454927, \"F1\": 0.0, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 7.4043329999999985 }, { \"step\": 3922, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5525242223355431, \"F1\": 0.0, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 7.8097699999999985 }, { \"step\": 4028, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5528798411122146, \"F1\": 0.0, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 8.225969999999998 }, { \"step\": 4134, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5529753265602322, \"F1\": 0.0, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 8.653192999999998 }, { \"step\": 4240, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5523584905660377, \"F1\": 0.0, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 9.090958 }, { \"step\": 4346, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5526921306948919, \"F1\": 0.0, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 9.539357999999998 }, { \"step\": 4452, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5530098831985625, \"F1\": 0.0, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 9.998288 }, { \"step\": 4558, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5508995173321632, \"F1\": 0.0, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 10.467735 }, { \"step\": 4664, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5497427101200686, \"F1\": 0.0, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 10.947871999999998 }, { \"step\": 4770, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5505241090146751, \"F1\": 0.0, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 11.43843 }, { \"step\": 4876, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5518867924528302, \"F1\": 0.0, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 11.939415 }, { \"step\": 4982, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5509835407466881, \"F1\": 0.0, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 12.450955999999998 }, { \"step\": 5088, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5511006289308176, \"F1\": 0.0, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 12.972935999999995 }, { \"step\": 5194, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5514054678475163, \"F1\": 0.0, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 13.505583999999995 }, { \"step\": 5300, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5513207547169812, \"F1\": 0.0, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 14.048808999999997 }, { \"step\": 906, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.6799116997792495, \"F1\": 0.5482866043613708, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 0.144107 }, { \"step\": 1812, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.7190949227373068, \"F1\": 0.4904904904904904, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 0.426551 }, { \"step\": 2718, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.6986754966887417, \"F1\": 0.4324324324324324, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 0.845137 }, { \"step\": 3624, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.7047461368653422, \"F1\": 0.4478844169246646, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 1.400054 }, { \"step\": 4530, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.7024282560706402, \"F1\": 0.4118673647469459, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 2.091892 }, { \"step\": 5436, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.7041942604856513, \"F1\": 0.4165457184325108, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 2.9195709999999995 }, { \"step\": 6342, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.6986754966887417, \"F1\": 0.4048582995951417, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 3.882304 }, { \"step\": 7248, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.695364238410596, \"F1\": 0.3953997809419496, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 4.980137999999999 }, { \"step\": 8154, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.6873926907039489, \"F1\": 0.4084474355999072, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 6.213938999999999 }, { \"step\": 9060, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.6864238410596026, \"F1\": 0.4240827082911007, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 7.583110999999999 }, { \"step\": 9966, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.687537627934979, \"F1\": 0.4433321415802646, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 9.088205 }, { \"step\": 10872, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.6938925680647535, \"F1\": 0.4717460317460317, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 10.727541 }, { \"step\": 11778, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.6932416369502462, \"F1\": 0.4715518502267076, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 12.500958999999998 }, { \"step\": 12684, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.6944970040996531, \"F1\": 0.4755717959128434, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 14.408948999999998 }, { \"step\": 13590, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.6942604856512141, \"F1\": 0.4842993670100534, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 16.456863 }, { \"step\": 14496, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.6935016556291391, \"F1\": 0.4860613071139387, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 18.638716 }, { \"step\": 15402, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.6929619529931178, \"F1\": 0.480957084842498, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 20.95394 }, { \"step\": 16308, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.6904586705911209, \"F1\": 0.4713028906577293, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 23.402754 }, { \"step\": 17214, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.6921691646334379, \"F1\": 0.4645852278468223, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 25.9851 }, { \"step\": 18120, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.694205298013245, \"F1\": 0.4685911575716888, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 28.701738 }, { \"step\": 19026, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.6967307894460212, \"F1\": 0.467515688445921, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 31.552559 }, { \"step\": 19932, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.6958157736303432, \"F1\": 0.4737435986459509, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 34.538332 }, { \"step\": 20838, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.6933966791438718, \"F1\": 0.4696604963891426, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 37.65893 }, { \"step\": 21744, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.6968359087564385, \"F1\": 0.4670978172999191, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 40.913762 }, { \"step\": 22650, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.6977041942604857, \"F1\": 0.4643667370726746, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 44.302397 }, { \"step\": 23556, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.6952368823229751, \"F1\": 0.4573285962657797, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 47.824759 }, { \"step\": 24462, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.6978170223203336, \"F1\": 0.4597281099254495, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 51.480483 }, { \"step\": 25368, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.6976505834121728, \"F1\": 0.4612250632200056, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 55.26987 }, { \"step\": 26274, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.6983329527289336, \"F1\": 0.4614757439869547, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 59.192657 }, { \"step\": 27180, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.6959896983075791, \"F1\": 0.4576304561864129, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 63.254198 }, { \"step\": 28086, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.695649077832372, \"F1\": 0.4559572301425662, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 67.449702 }, { \"step\": 28992, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.6952262693156733, \"F1\": 0.4515207945375543, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 71.778902 }, { \"step\": 29898, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.6939260151180681, \"F1\": 0.4465678863017841, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 76.241703 }, { \"step\": 30804, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.6941630957018569, \"F1\": 0.4433020150091591, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 80.838056 }, { \"step\": 31710, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.6917376222011984, \"F1\": 0.4368266405484819, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 85.568011 }, { \"step\": 32616, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.6893549178317391, \"F1\": 0.4316805025802109, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 90.431842 }, { \"step\": 33522, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.688353916830738, \"F1\": 0.4290944860374884, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 95.429482 }, { \"step\": 34428, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.6863599395840595, \"F1\": 0.4245363461948412, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 100.561249 }, { \"step\": 35334, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.6869304352748061, \"F1\": 0.4212013394725827, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 105.826946 }, { \"step\": 36240, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.6911147902869758, \"F1\": 0.4267718148299877, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 111.170297 }, { \"step\": 37146, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.6919722177354224, \"F1\": 0.4269831730769231, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 116.582555 }, { \"step\": 38052, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.6944181646168401, \"F1\": 0.431171118285882, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 122.063741 }, { \"step\": 38958, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.6937727809435803, \"F1\": 0.4308206106870229, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 127.61367 }, { \"step\": 39864, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.6930814770218744, \"F1\": 0.4344288818009522, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 133.232984 }, { \"step\": 40770, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.6924208977189109, \"F1\": 0.4391771019677997, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 138.92121699999998 }, { \"step\": 41676, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.6933966791438718, \"F1\": 0.4472227028897733, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 144.67862699999998 }, { \"step\": 42582, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.6956225635244939, \"F1\": 0.4550767290309018, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 150.505407 }, { \"step\": 43488, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.6962150478292862, \"F1\": 0.4576097220511558, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 156.401784 }, { \"step\": 44394, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.6963103122043519, \"F1\": 0.4557564992733731, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 162.367508 }, { \"step\": 45300, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Elec2\", \"Accuracy\": 0.697439293598234, \"F1\": 0.4596278189560006, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 168.40291499999998 }, { \"step\": 25, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.52, \"F1\": 0.3333333333333333, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 0.006954 }, { \"step\": 50, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.56, \"F1\": 0.2142857142857142, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 0.018084 }, { \"step\": 75, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.5866666666666667, \"F1\": 0.3404255319148936, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 0.032476 }, { \"step\": 100, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.6, \"F1\": 0.375, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 0.049891 }, { \"step\": 125, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.64, \"F1\": 0.4705882352941176, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 0.070336 }, { \"step\": 150, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.62, \"F1\": 0.4466019417475728, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 0.0938939999999999 }, { \"step\": 175, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.6342857142857142, \"F1\": 0.4181818181818181, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 0.120402 }, { \"step\": 200, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.63, \"F1\": 0.4126984126984127, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 0.149914 }, { \"step\": 225, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.6488888888888888, \"F1\": 0.4316546762589928, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 0.1823739999999999 }, { \"step\": 250, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.648, \"F1\": 0.4358974358974359, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 0.218053 }, { \"step\": 275, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.6618181818181819, \"F1\": 0.4561403508771929, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 0.257041 }, { \"step\": 300, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.6733333333333333, \"F1\": 0.4615384615384615, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 0.299108 }, { \"step\": 325, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.683076923076923, \"F1\": 0.4663212435233161, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 0.34419 }, { \"step\": 350, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.6942857142857143, \"F1\": 0.4780487804878048, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 0.39236 }, { \"step\": 375, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.7013333333333334, \"F1\": 0.4909090909090909, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 0.44361 }, { \"step\": 400, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.705, \"F1\": 0.4913793103448276, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 0.497978 }, { \"step\": 425, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.7105882352941176, \"F1\": 0.4896265560165975, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 0.555381 }, { \"step\": 450, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.7222222222222222, \"F1\": 0.5098039215686275, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 0.615936 }, { \"step\": 475, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.7157894736842105, \"F1\": 0.5054945054945055, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 0.679555 }, { \"step\": 500, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.718, \"F1\": 0.5252525252525252, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 0.74642 }, { \"step\": 525, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.7257142857142858, \"F1\": 0.5294117647058824, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 0.816531 }, { \"step\": 550, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.7218181818181818, \"F1\": 0.5233644859813085, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 0.889759 }, { \"step\": 575, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.7217391304347827, \"F1\": 0.5209580838323353, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 0.966149 }, { \"step\": 600, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.7283333333333334, \"F1\": 0.5275362318840581, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 1.04577 }, { \"step\": 625, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.7376, \"F1\": 0.5340909090909091, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 1.128489 }, { \"step\": 650, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.7369230769230769, \"F1\": 0.5415549597855228, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 1.2144389999999998 }, { \"step\": 675, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.7333333333333333, \"F1\": 0.5477386934673367, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 1.3035119999999998 }, { \"step\": 700, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.74, \"F1\": 0.5560975609756097, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 1.3960879999999998 }, { \"step\": 725, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.743448275862069, \"F1\": 0.5753424657534246, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 1.4918939999999998 }, { \"step\": 750, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.7453333333333333, \"F1\": 0.5820568927789934, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 1.590912 }, { \"step\": 775, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.7470967741935484, \"F1\": 0.5847457627118644, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 1.693051 }, { \"step\": 800, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.74625, \"F1\": 0.5915492957746479, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 1.7983589999999998 }, { \"step\": 825, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.7490909090909091, \"F1\": 0.602687140115163, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 1.906819 }, { \"step\": 850, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.7541176470588236, \"F1\": 0.6122448979591837, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 2.01847 }, { \"step\": 875, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.7554285714285714, \"F1\": 0.6123188405797102, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 2.13332 }, { \"step\": 900, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.7566666666666667, \"F1\": 0.6123893805309735, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 2.251249 }, { \"step\": 925, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.76, \"F1\": 0.6237288135593221, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 2.372436 }, { \"step\": 950, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.7589473684210526, \"F1\": 0.6288492706645057, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 2.496743 }, { \"step\": 975, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.7610256410256411, \"F1\": 0.631911532385466, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 2.624394 }, { \"step\": 1000, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.761, \"F1\": 0.6328725038402457, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 2.755204 }, { \"step\": 1025, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.7609756097560976, \"F1\": 0.635958395245171, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 2.889431 }, { \"step\": 1050, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.7638095238095238, \"F1\": 0.6436781609195402, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 3.026807 }, { \"step\": 1075, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.7665116279069767, \"F1\": 0.651872399445215, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 3.1673590000000003 }, { \"step\": 1100, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.77, \"F1\": 0.6594885598923284, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 3.311033 }, { \"step\": 1125, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.768, \"F1\": 0.6597131681877444, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 3.457802 }, { \"step\": 1150, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.7695652173913043, \"F1\": 0.6615581098339719, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 3.607738 }, { \"step\": 1175, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.7702127659574468, \"F1\": 0.6633416458852868, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 3.760779 }, { \"step\": 1200, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.7741666666666667, \"F1\": 0.6691086691086692, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 3.917058 }, { \"step\": 1225, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.7771428571428571, \"F1\": 0.6746126340882003, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 4.076379 }, { \"step\": 1250, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"Phishing\", \"Accuracy\": 0.7736, \"F1\": 0.6697782963827306, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 4.239016 }, { \"step\": 1903, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 0.209651 }, { \"step\": 3806, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 0.63219 }, { \"step\": 5709, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 1.262199 }, { \"step\": 7612, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 2.097272 }, { \"step\": 9515, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 3.138863 }, { \"step\": 11418, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 4.386558 }, { \"step\": 13321, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 5.839991 }, { \"step\": 15224, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9992774566473988, \"F1\": 0.0, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 7.498149999999999 }, { \"step\": 17127, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999299351900508, \"F1\": 0.1428571428571428, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 9.361126 }, { \"step\": 19030, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9993694167104572, \"F1\": 0.1428571428571428, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 11.45241 }, { \"step\": 20933, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999426742464052, \"F1\": 0.1428571428571428, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 13.765639999999998 }, { \"step\": 22836, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999474513925381, \"F1\": 0.1428571428571428, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 16.300417999999997 }, { \"step\": 24739, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999514935931121, \"F1\": 0.1428571428571428, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 19.05618 }, { \"step\": 26642, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9995120486449968, \"F1\": 0.1333333333333333, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 22.032826 }, { \"step\": 28545, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9995445787353302, \"F1\": 0.1333333333333333, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 25.228375 }, { \"step\": 30448, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999573042564372, \"F1\": 0.1333333333333333, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 28.637878 }, { \"step\": 32351, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9995981577076444, \"F1\": 0.1333333333333333, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 32.260672 }, { \"step\": 34254, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996204822794418, \"F1\": 0.1333333333333333, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 36.096896 }, { \"step\": 36157, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996404568963132, \"F1\": 0.1333333333333333, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 40.148391 }, { \"step\": 38060, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996584340514976, \"F1\": 0.1333333333333333, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 44.38058100000001 }, { \"step\": 39963, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996746990966644, \"F1\": 0.1333333333333333, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 48.731359000000005 }, { \"step\": 41866, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996894855013616, \"F1\": 0.1333333333333333, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 53.205186000000005 }, { \"step\": 43769, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999702986131737, \"F1\": 0.1333333333333333, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 57.777449 }, { \"step\": 45672, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997153617095814, \"F1\": 0.1333333333333333, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 62.448002 }, { \"step\": 47575, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999726747241198, \"F1\": 0.1333333333333333, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 67.217049 }, { \"step\": 49478, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997372569626904, \"F1\": 0.1333333333333333, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 72.084119 }, { \"step\": 51381, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997080632918784, \"F1\": 0.1176470588235294, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 77.049615 }, { \"step\": 53284, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997184896028828, \"F1\": 0.1176470588235294, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 82.113263 }, { \"step\": 55187, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997281968579556, \"F1\": 0.1176470588235294, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 87.275593 }, { \"step\": 57090, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9995796111403048, \"F1\": 0.1428571428571428, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 92.536371 }, { \"step\": 58993, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9995931720712626, \"F1\": 0.1428571428571428, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 97.895723 }, { \"step\": 60896, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996058854440356, \"F1\": 0.1428571428571428, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 103.35374 }, { \"step\": 62799, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999585980668482, \"F1\": 0.1333333333333333, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 108.910478 }, { \"step\": 64702, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9995981577076444, \"F1\": 0.1333333333333333, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 114.565649 }, { \"step\": 66605, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996096389159972, \"F1\": 0.1333333333333333, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 120.319466 }, { \"step\": 68508, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9995912886086296, \"F1\": 0.125, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 126.171806 }, { \"step\": 70411, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996023348624504, \"F1\": 0.125, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 132.122918 }, { \"step\": 72314, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996127997344912, \"F1\": 0.125, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 138.18042 }, { \"step\": 74217, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996227279464274, \"F1\": 0.125, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 144.337343 }, { \"step\": 76120, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996321597477666, \"F1\": 0.125, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 150.592744 }, { \"step\": 78023, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996411314612358, \"F1\": 0.125, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 156.94675700000002 }, { \"step\": 79926, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999649675950254, \"F1\": 0.125, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 163.39908200000002 }, { \"step\": 81829, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996578230211784, \"F1\": 0.125, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 169.949778 }, { \"step\": 83732, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999665599770697, \"F1\": 0.125, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 176.598846 }, { \"step\": 85635, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996730308869036, \"F1\": 0.125, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 183.346496 }, { \"step\": 87538, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996801389111014, \"F1\": 0.125, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 190.192536 }, { \"step\": 89441, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996757639114052, \"F1\": 0.1212121212121212, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 197.137274 }, { \"step\": 91344, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996825188299177, \"F1\": 0.1212121212121212, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 204.180628 }, { \"step\": 93247, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996889980374704, \"F1\": 0.1212121212121212, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 211.32291 }, { \"step\": 95150, \"track\": \"Binary classification\", \"model\": \"Vowpal Wabbit logistic regression\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999695218076721, \"F1\": 0.1212121212121212, \"Memory in Mb\": 0.0006465911865234, \"Time in s\": 218.563617 }, { \"step\": 106, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5333333333333333, \"F1\": 0.4615384615384615, \"Memory in Mb\": 0.0140247344970703, \"Time in s\": 0.027532 }, { \"step\": 212, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5592417061611374, \"F1\": 0.5026737967914437, \"Memory in Mb\": 0.0140247344970703, \"Time in s\": 0.072437 }, { \"step\": 318, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Bananas\", \"Accuracy\": 0.555205047318612, \"F1\": 0.5154639175257733, \"Memory in Mb\": 0.0140247344970703, \"Time in s\": 0.134216 }, { \"step\": 424, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5626477541371159, \"F1\": 0.5066666666666667, \"Memory in Mb\": 0.0140247344970703, \"Time in s\": 0.212262 }, { \"step\": 530, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5689981096408318, \"F1\": 0.4818181818181818, \"Memory in Mb\": 0.0140247344970703, \"Time in s\": 0.306946 }, { \"step\": 636, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5716535433070866, \"F1\": 0.4645669291338582, \"Memory in Mb\": 0.0140247344970703, \"Time in s\": 0.418348 }, { \"step\": 742, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5870445344129555, \"F1\": 0.4555160142348755, \"Memory in Mb\": 0.0140247344970703, \"Time in s\": 0.54645 }, { \"step\": 848, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5962219598583235, \"F1\": 0.4554140127388535, \"Memory in Mb\": 0.0140247344970703, \"Time in s\": 0.690987 }, { \"step\": 954, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6002098635886673, \"F1\": 0.4454148471615721, \"Memory in Mb\": 0.0140247344970703, \"Time in s\": 0.852195 }, { \"step\": 1060, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6090651558073654, \"F1\": 0.4405405405405405, \"Memory in Mb\": 0.0140247344970703, \"Time in s\": 1.029786 }, { \"step\": 1166, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6068669527896996, \"F1\": 0.4260651629072681, \"Memory in Mb\": 0.0140247344970703, \"Time in s\": 1.2235980000000002 }, { \"step\": 1272, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6136900078678206, \"F1\": 0.433679354094579, \"Memory in Mb\": 0.0140247344970703, \"Time in s\": 1.4337380000000002 }, { \"step\": 1378, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6143790849673203, \"F1\": 0.419672131147541, \"Memory in Mb\": 0.0140247344970703, \"Time in s\": 1.660063 }, { \"step\": 1484, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6142953472690492, \"F1\": 0.4127310061601643, \"Memory in Mb\": 0.0140247344970703, \"Time in s\": 1.902843 }, { \"step\": 1590, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6135934550031467, \"F1\": 0.4061895551257253, \"Memory in Mb\": 0.0140247344970703, \"Time in s\": 2.161936 }, { \"step\": 1696, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6141592920353982, \"F1\": 0.4010989010989011, \"Memory in Mb\": 0.0140247344970703, \"Time in s\": 2.437209 }, { \"step\": 1802, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Bananas\", \"Accuracy\": 0.614658523042754, \"F1\": 0.4037800687285223, \"Memory in Mb\": 0.0140247344970703, \"Time in s\": 2.728596 }, { \"step\": 1908, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6151022548505506, \"F1\": 0.4080645161290322, \"Memory in Mb\": 0.0140247344970703, \"Time in s\": 3.0363329999999995 }, { \"step\": 2014, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6100347739692003, \"F1\": 0.4048521607278241, \"Memory in Mb\": 0.0140247344970703, \"Time in s\": 3.360227 }, { \"step\": 2120, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Bananas\", \"Accuracy\": 0.608305804624823, \"F1\": 0.4071428571428571, \"Memory in Mb\": 0.0140247344970703, \"Time in s\": 3.700257 }, { \"step\": 2226, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6089887640449438, \"F1\": 0.4089673913043478, \"Memory in Mb\": 0.0140247344970703, \"Time in s\": 4.056792 }, { \"step\": 2332, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6096096096096096, \"F1\": 0.4098573281452659, \"Memory in Mb\": 0.0140247344970703, \"Time in s\": 4.429501999999999 }, { \"step\": 2438, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6101764464505539, \"F1\": 0.4084682440846824, \"Memory in Mb\": 0.0140247344970703, \"Time in s\": 4.818382999999999 }, { \"step\": 2544, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6114825009830909, \"F1\": 0.4153846153846153, \"Memory in Mb\": 0.0140247344970703, \"Time in s\": 5.223696999999999 }, { \"step\": 2650, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6100415251038127, \"F1\": 0.41273450824332, \"Memory in Mb\": 0.0140247344970703, \"Time in s\": 5.645150999999999 }, { \"step\": 2756, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6076225045372051, \"F1\": 0.4070213933077345, \"Memory in Mb\": 0.0140247344970703, \"Time in s\": 6.082884999999998 }, { \"step\": 2862, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6085284865431667, \"F1\": 0.4092827004219409, \"Memory in Mb\": 0.0140247344970703, \"Time in s\": 6.537074999999998 }, { \"step\": 2968, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6083586113919784, \"F1\": 0.4065372829417773, \"Memory in Mb\": 0.0140247344970703, \"Time in s\": 7.007774999999998 }, { \"step\": 3074, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Bananas\", \"Accuracy\": 0.60624796615685, \"F1\": 0.4062806673209028, \"Memory in Mb\": 0.0140247344970703, \"Time in s\": 7.494666999999998 }, { \"step\": 3180, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6071091538219566, \"F1\": 0.4077761972498815, \"Memory in Mb\": 0.0140247344970703, \"Time in s\": 7.997970999999998 }, { \"step\": 3286, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6063926940639269, \"F1\": 0.4049700874367234, \"Memory in Mb\": 0.0140247344970703, \"Time in s\": 8.517511999999998 }, { \"step\": 3392, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6048363314656443, \"F1\": 0.4060283687943262, \"Memory in Mb\": 0.0140247344970703, \"Time in s\": 9.053437 }, { \"step\": 3498, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6065198741778668, \"F1\": 0.4053586862575626, \"Memory in Mb\": 0.0140247344970703, \"Time in s\": 9.605597 }, { \"step\": 3604, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6086594504579517, \"F1\": 0.4090528080469404, \"Memory in Mb\": 0.0140247344970703, \"Time in s\": 10.173998 }, { \"step\": 3710, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6085198166621731, \"F1\": 0.4078303425774878, \"Memory in Mb\": 0.0140247344970703, \"Time in s\": 10.759293 }, { \"step\": 3816, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6070773263433814, \"F1\": 0.4049225883287018, \"Memory in Mb\": 0.0140247344970703, \"Time in s\": 11.360927 }, { \"step\": 3922, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6067329762815609, \"F1\": 0.4027885360185902, \"Memory in Mb\": 0.0140247344970703, \"Time in s\": 11.978992 }, { \"step\": 4028, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6088899925502855, \"F1\": 0.405436013590034, \"Memory in Mb\": 0.0140247344970703, \"Time in s\": 12.613729 }, { \"step\": 4134, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6106944108395839, \"F1\": 0.4078027235921972, \"Memory in Mb\": 0.0140247344970703, \"Time in s\": 13.264737 }, { \"step\": 4240, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Bananas\", \"Accuracy\": 0.611936777541873, \"F1\": 0.4118698605648909, \"Memory in Mb\": 0.0140247344970703, \"Time in s\": 13.931975 }, { \"step\": 4346, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6131185270425776, \"F1\": 0.4128536500174642, \"Memory in Mb\": 0.0140247344970703, \"Time in s\": 14.615508 }, { \"step\": 4452, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6137946528869916, \"F1\": 0.413510747185261, \"Memory in Mb\": 0.0140247344970703, \"Time in s\": 15.315549 }, { \"step\": 4558, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6122448979591837, \"F1\": 0.4115884115884116, \"Memory in Mb\": 0.0140247344970703, \"Time in s\": 16.031772 }, { \"step\": 4664, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6126956894702981, \"F1\": 0.4124918672739102, \"Memory in Mb\": 0.0140247344970703, \"Time in s\": 16.764316 }, { \"step\": 4770, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6143845669951772, \"F1\": 0.4130226619853175, \"Memory in Mb\": 0.0140247344970703, \"Time in s\": 17.512985 }, { \"step\": 4876, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6153846153846154, \"F1\": 0.4131455399061033, \"Memory in Mb\": 0.0140247344970703, \"Time in s\": 18.277781 }, { \"step\": 4982, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6163420999799237, \"F1\": 0.4168446750076289, \"Memory in Mb\": 0.0140247344970703, \"Time in s\": 19.058798 }, { \"step\": 5088, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6150973068606251, \"F1\": 0.4141232794733692, \"Memory in Mb\": 0.0140247344970703, \"Time in s\": 19.855913 }, { \"step\": 5194, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6146735990756788, \"F1\": 0.4133685136323659, \"Memory in Mb\": 0.0140247344970703, \"Time in s\": 20.669466 }, { \"step\": 5300, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6152104170598226, \"F1\": 0.4139120436907157, \"Memory in Mb\": 0.0140247344970703, \"Time in s\": 21.499334 }, { \"step\": 906, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8187845303867404, \"F1\": 0.8284518828451883, \"Memory in Mb\": 0.0510377883911132, \"Time in s\": 0.17926 }, { \"step\": 1812, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8023191606847045, \"F1\": 0.7475317348377998, \"Memory in Mb\": 0.0510377883911132, \"Time in s\": 0.525852 }, { \"step\": 2718, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Elec2\", \"Accuracy\": 0.784688995215311, \"F1\": 0.706177800100452, \"Memory in Mb\": 0.0510377883911132, \"Time in s\": 1.0396 }, { \"step\": 3624, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8032017664918576, \"F1\": 0.7356321839080461, \"Memory in Mb\": 0.0510377883911132, \"Time in s\": 1.7208420000000002 }, { \"step\": 4530, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Elec2\", \"Accuracy\": 0.7979686465003312, \"F1\": 0.7073872721458268, \"Memory in Mb\": 0.0510377883911132, \"Time in s\": 2.5693340000000005 }, { \"step\": 5436, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Elec2\", \"Accuracy\": 0.7937442502299908, \"F1\": 0.6972724817715366, \"Memory in Mb\": 0.0510377883911132, \"Time in s\": 3.5852350000000004 }, { \"step\": 6342, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Elec2\", \"Accuracy\": 0.7982967986122063, \"F1\": 0.7065840789171829, \"Memory in Mb\": 0.0510377883911132, \"Time in s\": 4.768595 }, { \"step\": 7248, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Elec2\", \"Accuracy\": 0.790396025941769, \"F1\": 0.6875128574367414, \"Memory in Mb\": 0.0510377883911132, \"Time in s\": 6.119432000000001 }, { \"step\": 8154, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Elec2\", \"Accuracy\": 0.7841285416411137, \"F1\": 0.6888260254596887, \"Memory in Mb\": 0.0510377883911132, \"Time in s\": 7.637661 }, { \"step\": 9060, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Elec2\", \"Accuracy\": 0.7897118887294403, \"F1\": 0.7086710506193606, \"Memory in Mb\": 0.0510377883911132, \"Time in s\": 9.323211 }, { \"step\": 9966, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Elec2\", \"Accuracy\": 0.793176116407426, \"F1\": 0.7240594457089301, \"Memory in Mb\": 0.0510377883911132, \"Time in s\": 11.17581 }, { \"step\": 10872, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Elec2\", \"Accuracy\": 0.7960629196946003, \"F1\": 0.7361656551231703, \"Memory in Mb\": 0.0510377883911132, \"Time in s\": 13.198889 }, { \"step\": 11778, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Elec2\", \"Accuracy\": 0.792137216608644, \"F1\": 0.7295027624309391, \"Memory in Mb\": 0.0510377883911132, \"Time in s\": 15.38848 }, { \"step\": 12684, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Elec2\", \"Accuracy\": 0.7820704880548766, \"F1\": 0.7260111022997621, \"Memory in Mb\": 0.0510377883911132, \"Time in s\": 17.743988 }, { \"step\": 13590, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Elec2\", \"Accuracy\": 0.7858562072264331, \"F1\": 0.7383564107174968, \"Memory in Mb\": 0.0510377883911132, \"Time in s\": 20.265390000000004 }, { \"step\": 14496, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Elec2\", \"Accuracy\": 0.7866850638151086, \"F1\": 0.7435727317963178, \"Memory in Mb\": 0.0510377883911132, \"Time in s\": 22.952880000000004 }, { \"step\": 15402, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Elec2\", \"Accuracy\": 0.785728199467567, \"F1\": 0.738593155893536, \"Memory in Mb\": 0.0510377883911132, \"Time in s\": 25.806911000000003 }, { \"step\": 16308, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Elec2\", \"Accuracy\": 0.7806463481940271, \"F1\": 0.7274666666666666, \"Memory in Mb\": 0.0510377883911132, \"Time in s\": 28.828021000000003 }, { \"step\": 17214, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Elec2\", \"Accuracy\": 0.7788880497298554, \"F1\": 0.7181158346911569, \"Memory in Mb\": 0.0510377883911132, \"Time in s\": 32.016234000000004 }, { \"step\": 18120, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Elec2\", \"Accuracy\": 0.7728903361112645, \"F1\": 0.7138983522213725, \"Memory in Mb\": 0.0510377883911132, \"Time in s\": 35.376132000000005 }, { \"step\": 19026, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Elec2\", \"Accuracy\": 0.7701445466491459, \"F1\": 0.7094931242941608, \"Memory in Mb\": 0.0510377883911132, \"Time in s\": 38.860646 }, { \"step\": 19932, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Elec2\", \"Accuracy\": 0.7628317696051378, \"F1\": 0.702236220472441, \"Memory in Mb\": 0.0510377883911132, \"Time in s\": 42.445598 }, { \"step\": 20838, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Elec2\", \"Accuracy\": 0.7537553390603254, \"F1\": 0.6903626817934946, \"Memory in Mb\": 0.0510377883911132, \"Time in s\": 46.130991 }, { \"step\": 21744, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Elec2\", \"Accuracy\": 0.7508163546888654, \"F1\": 0.6836389115964032, \"Memory in Mb\": 0.0510377883911132, \"Time in s\": 49.916905 }, { \"step\": 22650, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Elec2\", \"Accuracy\": 0.7509823833281822, \"F1\": 0.6798001589644601, \"Memory in Mb\": 0.0510377883911132, \"Time in s\": 53.803318 }, { \"step\": 23556, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Elec2\", \"Accuracy\": 0.7457015495648482, \"F1\": 0.668217569513681, \"Memory in Mb\": 0.0510377883911132, \"Time in s\": 57.790137 }, { \"step\": 24462, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Elec2\", \"Accuracy\": 0.7466170638976329, \"F1\": 0.665839982747466, \"Memory in Mb\": 0.0510377883911132, \"Time in s\": 61.87940199999999 }, { \"step\": 25368, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Elec2\", \"Accuracy\": 0.7447865336854969, \"F1\": 0.6611180904522613, \"Memory in Mb\": 0.0510377883911132, \"Time in s\": 66.06947 }, { \"step\": 26274, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Elec2\", \"Accuracy\": 0.7448711605069843, \"F1\": 0.6581322996888865, \"Memory in Mb\": 0.0510377883911132, \"Time in s\": 70.360011 }, { \"step\": 27180, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Elec2\", \"Accuracy\": 0.741123661650539, \"F1\": 0.650402464473815, \"Memory in Mb\": 0.0510377883911132, \"Time in s\": 74.751133 }, { \"step\": 28086, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Elec2\", \"Accuracy\": 0.7390065871461634, \"F1\": 0.6440019426906265, \"Memory in Mb\": 0.0510377883911132, \"Time in s\": 79.242684 }, { \"step\": 28992, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Elec2\", \"Accuracy\": 0.7358145631402849, \"F1\": 0.6343280019097637, \"Memory in Mb\": 0.0510377883911132, \"Time in s\": 83.83464099999999 }, { \"step\": 29898, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Elec2\", \"Accuracy\": 0.7320466936481921, \"F1\": 0.6243023964732918, \"Memory in Mb\": 0.0510377883911132, \"Time in s\": 88.52693 }, { \"step\": 30804, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Elec2\", \"Accuracy\": 0.7297990455475116, \"F1\": 0.6158319870759289, \"Memory in Mb\": 0.0510377883911132, \"Time in s\": 93.319472 }, { \"step\": 31710, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Elec2\", \"Accuracy\": 0.7256930209088902, \"F1\": 0.6059617649723658, \"Memory in Mb\": 0.0510377883911132, \"Time in s\": 98.21253099999998 }, { \"step\": 32616, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Elec2\", \"Accuracy\": 0.7215391690939752, \"F1\": 0.596427301813011, \"Memory in Mb\": 0.0510377883911132, \"Time in s\": 103.20618199999998 }, { \"step\": 33522, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Elec2\", \"Accuracy\": 0.7176695205990274, \"F1\": 0.5867248908296943, \"Memory in Mb\": 0.0510377883911132, \"Time in s\": 108.30076099999998 }, { \"step\": 34428, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Elec2\", \"Accuracy\": 0.7142359194818021, \"F1\": 0.5779493779493778, \"Memory in Mb\": 0.0510377883911132, \"Time in s\": 113.49672599999998 }, { \"step\": 35334, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Elec2\", \"Accuracy\": 0.7138369229898395, \"F1\": 0.5724554949469323, \"Memory in Mb\": 0.0510377883911132, \"Time in s\": 118.79330799999998 }, { \"step\": 36240, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Elec2\", \"Accuracy\": 0.7174866856149452, \"F1\": 0.5752924583091347, \"Memory in Mb\": 0.0510377883911132, \"Time in s\": 124.19030599999998 }, { \"step\": 37146, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Elec2\", \"Accuracy\": 0.7169740207295733, \"F1\": 0.5716148486206756, \"Memory in Mb\": 0.0510377883911132, \"Time in s\": 129.68778099999997 }, { \"step\": 38052, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Elec2\", \"Accuracy\": 0.7183516858952459, \"F1\": 0.573859795618116, \"Memory in Mb\": 0.0510377883911132, \"Time in s\": 135.28579999999997 }, { \"step\": 38958, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Elec2\", \"Accuracy\": 0.7206407064198989, \"F1\": 0.5799529121154812, \"Memory in Mb\": 0.0510377883911132, \"Time in s\": 140.98460499999996 }, { \"step\": 39864, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Elec2\", \"Accuracy\": 0.7217720693374808, \"F1\": 0.5866964784795975, \"Memory in Mb\": 0.0510377883911132, \"Time in s\": 146.78445599999995 }, { \"step\": 40770, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Elec2\", \"Accuracy\": 0.7228776766660944, \"F1\": 0.5923065819861432, \"Memory in Mb\": 0.0510377883911132, \"Time in s\": 152.68634499999996 }, { \"step\": 41676, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Elec2\", \"Accuracy\": 0.724127174565087, \"F1\": 0.5973170817134251, \"Memory in Mb\": 0.0510377883911132, \"Time in s\": 158.68998499999995 }, { \"step\": 42582, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Elec2\", \"Accuracy\": 0.7260280406754186, \"F1\": 0.6013259517462921, \"Memory in Mb\": 0.0510377883911132, \"Time in s\": 164.79474299999995 }, { \"step\": 43488, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Elec2\", \"Accuracy\": 0.7277117299422816, \"F1\": 0.6045222270465248, \"Memory in Mb\": 0.0510377883911132, \"Time in s\": 170.99996699999994 }, { \"step\": 44394, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Elec2\", \"Accuracy\": 0.7273894532921857, \"F1\": 0.6015933631814591, \"Memory in Mb\": 0.0510377883911132, \"Time in s\": 177.30586899999994 }, { \"step\": 45300, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Elec2\", \"Accuracy\": 0.7287136581381487, \"F1\": 0.6038234630387828, \"Memory in Mb\": 0.0510377883911132, \"Time in s\": 183.71242299999997 }, { \"step\": 25, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Phishing\", \"Accuracy\": 0.5833333333333334, \"F1\": 0.7058823529411764, \"Memory in Mb\": 0.057229995727539, \"Time in s\": 0.01864 }, { \"step\": 50, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Phishing\", \"Accuracy\": 0.7346938775510204, \"F1\": 0.7636363636363637, \"Memory in Mb\": 0.057229995727539, \"Time in s\": 0.044672 }, { \"step\": 75, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Phishing\", \"Accuracy\": 0.7837837837837838, \"F1\": 0.8048780487804877, \"Memory in Mb\": 0.057229995727539, \"Time in s\": 0.0769 }, { \"step\": 100, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8080808080808081, \"F1\": 0.819047619047619, \"Memory in Mb\": 0.057229995727539, \"Time in s\": 0.1152479999999999 }, { \"step\": 125, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8145161290322581, \"F1\": 0.8217054263565893, \"Memory in Mb\": 0.057229995727539, \"Time in s\": 0.1596739999999999 }, { \"step\": 150, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8187919463087249, \"F1\": 0.830188679245283, \"Memory in Mb\": 0.057229995727539, \"Time in s\": 0.2100959999999999 }, { \"step\": 175, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8333333333333334, \"F1\": 0.8323699421965318, \"Memory in Mb\": 0.057229995727539, \"Time in s\": 0.266544 }, { \"step\": 200, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8341708542713567, \"F1\": 0.83248730964467, \"Memory in Mb\": 0.057229995727539, \"Time in s\": 0.329068 }, { \"step\": 225, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8303571428571429, \"F1\": 0.8240740740740741, \"Memory in Mb\": 0.057229995727539, \"Time in s\": 0.397573 }, { \"step\": 250, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8313253012048193, \"F1\": 0.825, \"Memory in Mb\": 0.057229995727539, \"Time in s\": 0.472283 }, { \"step\": 275, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8321167883211679, \"F1\": 0.8244274809160306, \"Memory in Mb\": 0.057229995727539, \"Time in s\": 0.553059 }, { \"step\": 300, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8394648829431438, \"F1\": 0.8285714285714285, \"Memory in Mb\": 0.057229995727539, \"Time in s\": 0.639906 }, { \"step\": 325, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Phishing\", \"Accuracy\": 0.845679012345679, \"F1\": 0.8299319727891157, \"Memory in Mb\": 0.057229995727539, \"Time in s\": 0.732828 }, { \"step\": 350, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8510028653295129, \"F1\": 0.8322580645161292, \"Memory in Mb\": 0.057229995727539, \"Time in s\": 0.831756 }, { \"step\": 375, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8529411764705882, \"F1\": 0.8318042813455658, \"Memory in Mb\": 0.057229995727539, \"Time in s\": 0.937046 }, { \"step\": 400, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8546365914786967, \"F1\": 0.8313953488372093, \"Memory in Mb\": 0.057229995727539, \"Time in s\": 1.048453 }, { \"step\": 425, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8561320754716981, \"F1\": 0.8291316526610645, \"Memory in Mb\": 0.057229995727539, \"Time in s\": 1.165941 }, { \"step\": 450, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8596881959910914, \"F1\": 0.8310991957104559, \"Memory in Mb\": 0.057229995727539, \"Time in s\": 1.289511 }, { \"step\": 475, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8565400843881856, \"F1\": 0.8291457286432161, \"Memory in Mb\": 0.057229995727539, \"Time in s\": 1.419108 }, { \"step\": 500, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8577154308617234, \"F1\": 0.8337236533957845, \"Memory in Mb\": 0.057229995727539, \"Time in s\": 1.554885 }, { \"step\": 525, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8587786259541985, \"F1\": 0.8310502283105022, \"Memory in Mb\": 0.057229995727539, \"Time in s\": 1.696767 }, { \"step\": 550, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8579234972677595, \"F1\": 0.8311688311688311, \"Memory in Mb\": 0.057229995727539, \"Time in s\": 1.844693 }, { \"step\": 575, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8606271777003485, \"F1\": 0.8340248962655602, \"Memory in Mb\": 0.057229995727539, \"Time in s\": 1.998693 }, { \"step\": 600, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8647746243739566, \"F1\": 0.8363636363636363, \"Memory in Mb\": 0.057229995727539, \"Time in s\": 2.1587810000000003 }, { \"step\": 625, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8669871794871795, \"F1\": 0.8356435643564357, \"Memory in Mb\": 0.057229995727539, \"Time in s\": 2.3249540000000004 }, { \"step\": 650, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8705701078582434, \"F1\": 0.8426966292134833, \"Memory in Mb\": 0.057229995727539, \"Time in s\": 2.4971910000000004 }, { \"step\": 675, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Phishing\", \"Accuracy\": 0.870919881305638, \"F1\": 0.8465608465608465, \"Memory in Mb\": 0.057229995727539, \"Time in s\": 2.675498 }, { \"step\": 700, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8755364806866953, \"F1\": 0.8502581755593803, \"Memory in Mb\": 0.057229995727539, \"Time in s\": 2.85988 }, { \"step\": 725, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8784530386740331, \"F1\": 0.8562091503267973, \"Memory in Mb\": 0.057229995727539, \"Time in s\": 3.050473 }, { \"step\": 750, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8798397863818425, \"F1\": 0.8584905660377359, \"Memory in Mb\": 0.057229995727539, \"Time in s\": 3.247084 }, { \"step\": 775, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8798449612403101, \"F1\": 0.8580152671755725, \"Memory in Mb\": 0.057229995727539, \"Time in s\": 3.4497720000000003 }, { \"step\": 800, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8798498122653317, \"F1\": 0.8596491228070174, \"Memory in Mb\": 0.057229995727539, \"Time in s\": 3.658789000000001 }, { \"step\": 825, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8798543689320388, \"F1\": 0.860759493670886, \"Memory in Mb\": 0.057229995727539, \"Time in s\": 3.8738800000000007 }, { \"step\": 850, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8798586572438163, \"F1\": 0.8602739726027396, \"Memory in Mb\": 0.057229995727539, \"Time in s\": 4.094979 }, { \"step\": 875, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8832951945080092, \"F1\": 0.8636363636363635, \"Memory in Mb\": 0.057229995727539, \"Time in s\": 4.322174 }, { \"step\": 900, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8809788654060067, \"F1\": 0.8608582574772432, \"Memory in Mb\": 0.057229995727539, \"Time in s\": 4.555426000000001 }, { \"step\": 925, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8820346320346321, \"F1\": 0.8635794743429286, \"Memory in Mb\": 0.057229995727539, \"Time in s\": 4.7947690000000005 }, { \"step\": 950, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8819810326659642, \"F1\": 0.8650602409638554, \"Memory in Mb\": 0.057229995727539, \"Time in s\": 5.040106000000001 }, { \"step\": 975, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8829568788501027, \"F1\": 0.8661971830985915, \"Memory in Mb\": 0.057229995727539, \"Time in s\": 5.291615 }, { \"step\": 1000, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8808808808808809, \"F1\": 0.8643101482326111, \"Memory in Mb\": 0.057229995727539, \"Time in s\": 5.549186000000001 }, { \"step\": 1025, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Phishing\", \"Accuracy\": 0.880859375, \"F1\": 0.8647450110864746, \"Memory in Mb\": 0.057229995727539, \"Time in s\": 5.812868000000001 }, { \"step\": 1050, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Phishing\", \"Accuracy\": 0.882745471877979, \"F1\": 0.8673139158576052, \"Memory in Mb\": 0.057229995727539, \"Time in s\": 6.082483000000001 }, { \"step\": 1075, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8817504655493482, \"F1\": 0.8672936259143157, \"Memory in Mb\": 0.057229995727539, \"Time in s\": 6.358049000000001 }, { \"step\": 1100, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8835304822565969, \"F1\": 0.8693877551020409, \"Memory in Mb\": 0.057229995727539, \"Time in s\": 6.639582000000001 }, { \"step\": 1125, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8861209964412812, \"F1\": 0.8735177865612648, \"Memory in Mb\": 0.057229995727539, \"Time in s\": 6.927116000000001 }, { \"step\": 1150, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8859878154917319, \"F1\": 0.8731848983543079, \"Memory in Mb\": 0.057229995727539, \"Time in s\": 7.220572000000001 }, { \"step\": 1175, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8850085178875639, \"F1\": 0.8717948717948718, \"Memory in Mb\": 0.057229995727539, \"Time in s\": 7.520004000000001 }, { \"step\": 1200, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8865721434528774, \"F1\": 0.8731343283582089, \"Memory in Mb\": 0.057229995727539, \"Time in s\": 7.825445000000001 }, { \"step\": 1225, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Phishing\", \"Accuracy\": 0.886437908496732, \"F1\": 0.8728270814272644, \"Memory in Mb\": 0.057229995727539, \"Time in s\": 8.137127000000001 }, { \"step\": 1250, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8847077662129704, \"F1\": 0.8714285714285714, \"Memory in Mb\": 0.057229995727539, \"Time in s\": 8.454914 }, { \"step\": 1903, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.0107755661010742, \"Time in s\": 0.257042 }, { \"step\": 3806, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.0107755661010742, \"Time in s\": 0.764431 }, { \"step\": 5709, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.0107755661010742, \"Time in s\": 1.522309 }, { \"step\": 7612, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.0107755661010742, \"Time in s\": 2.530537 }, { \"step\": 9515, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.0107755661010742, \"Time in s\": 3.789363 }, { \"step\": 11418, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.0107755661010742, \"Time in s\": 5.298413 }, { \"step\": 13321, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.0107755661010742, \"Time in s\": 7.057599 }, { \"step\": 15224, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997372397030808, \"F1\": 0.7777777777777778, \"Memory in Mb\": 0.0201406478881835, \"Time in s\": 9.078055 }, { \"step\": 17127, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997664369963798, \"F1\": 0.8181818181818181, \"Memory in Mb\": 0.0201406478881835, \"Time in s\": 11.376218 }, { \"step\": 19030, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997897945241474, \"F1\": 0.8181818181818181, \"Memory in Mb\": 0.0201406478881835, \"Time in s\": 13.952029 }, { \"step\": 20933, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998089050257978, \"F1\": 0.8181818181818181, \"Memory in Mb\": 0.0201406478881835, \"Time in s\": 16.806125 }, { \"step\": 22836, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998248303043572, \"F1\": 0.8181818181818181, \"Memory in Mb\": 0.0201406478881835, \"Time in s\": 19.939484 }, { \"step\": 24739, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999838305441022, \"F1\": 0.8181818181818181, \"Memory in Mb\": 0.0201406478881835, \"Time in s\": 23.347474 }, { \"step\": 26642, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998498554859052, \"F1\": 0.8333333333333333, \"Memory in Mb\": 0.0201406478881835, \"Time in s\": 26.939926 }, { \"step\": 28545, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999859865470852, \"F1\": 0.8333333333333333, \"Memory in Mb\": 0.0201406478881835, \"Time in s\": 30.696722 }, { \"step\": 30448, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998686241665844, \"F1\": 0.8333333333333333, \"Memory in Mb\": 0.0201406478881835, \"Time in s\": 34.617556 }, { \"step\": 32351, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998763523956724, \"F1\": 0.8333333333333333, \"Memory in Mb\": 0.0201406478881835, \"Time in s\": 38.704559 }, { \"step\": 34254, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998832219075702, \"F1\": 0.8333333333333333, \"Memory in Mb\": 0.0201406478881835, \"Time in s\": 42.956191 }, { \"step\": 36157, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998893682929528, \"F1\": 0.8333333333333333, \"Memory in Mb\": 0.0201406478881835, \"Time in s\": 47.37231800000001 }, { \"step\": 38060, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998949000236474, \"F1\": 0.8333333333333333, \"Memory in Mb\": 0.0201406478881835, \"Time in s\": 51.95229900000001 }, { \"step\": 39963, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998999049096642, \"F1\": 0.8333333333333333, \"Memory in Mb\": 0.0201406478881835, \"Time in s\": 56.69619700000001 }, { \"step\": 41866, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999904454795175, \"F1\": 0.8333333333333333, \"Memory in Mb\": 0.0201406478881835, \"Time in s\": 61.60400300000001 }, { \"step\": 43769, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999908609029428, \"F1\": 0.8333333333333333, \"Memory in Mb\": 0.0201406478881835, \"Time in s\": 66.676032 }, { \"step\": 45672, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9999124170699132, \"F1\": 0.8333333333333333, \"Memory in Mb\": 0.0201406478881835, \"Time in s\": 71.913656 }, { \"step\": 47575, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9999159204607558, \"F1\": 0.8333333333333333, \"Memory in Mb\": 0.0201406478881835, \"Time in s\": 77.315763 }, { \"step\": 49478, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9999191543545486, \"F1\": 0.8333333333333333, \"Memory in Mb\": 0.0201406478881835, \"Time in s\": 82.88181300000001 }, { \"step\": 51381, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9999026858699884, \"F1\": 0.8275862068965517, \"Memory in Mb\": 0.0201406478881835, \"Time in s\": 88.61249600000001 }, { \"step\": 53284, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9999061614398588, \"F1\": 0.8275862068965517, \"Memory in Mb\": 0.0201406478881835, \"Time in s\": 94.508066 }, { \"step\": 55187, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998912767730946, \"F1\": 0.7999999999999999, \"Memory in Mb\": 0.0201406478881835, \"Time in s\": 100.56946700000002 }, { \"step\": 57090, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9993869221741492, \"F1\": 0.4444444444444444, \"Memory in Mb\": 0.0201406478881835, \"Time in s\": 106.795227 }, { \"step\": 58993, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9988473013289938, \"F1\": 0.2916666666666666, \"Memory in Mb\": 0.0201406478881835, \"Time in s\": 113.185031 }, { \"step\": 60896, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9986369981115034, \"F1\": 0.2522522522522523, \"Memory in Mb\": 0.0201406478881835, \"Time in s\": 119.73505000000002 }, { \"step\": 62799, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9979139463040224, \"F1\": 0.1761006289308176, \"Memory in Mb\": 0.0201406478881835, \"Time in s\": 126.446401 }, { \"step\": 64702, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9979443903494536, \"F1\": 0.1739130434782608, \"Memory in Mb\": 0.0201406478881835, \"Time in s\": 133.31736600000002 }, { \"step\": 66605, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9977478830100296, \"F1\": 0.1573033707865168, \"Memory in Mb\": 0.0201406478881835, \"Time in s\": 140.34740000000002 }, { \"step\": 68508, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9967302611411972, \"F1\": 0.125, \"Memory in Mb\": 0.0201406478881835, \"Time in s\": 147.535714 }, { \"step\": 70411, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9964777730436016, \"F1\": 0.1142857142857143, \"Memory in Mb\": 0.0201406478881835, \"Time in s\": 154.879523 }, { \"step\": 72314, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9964045192427364, \"F1\": 0.1095890410958904, \"Memory in Mb\": 0.0201406478881835, \"Time in s\": 162.37251700000002 }, { \"step\": 74217, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9958230031260106, \"F1\": 0.0935672514619883, \"Memory in Mb\": 0.0201406478881835, \"Time in s\": 170.01463900000002 }, { \"step\": 76120, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9956515456062218, \"F1\": 0.0881542699724517, \"Memory in Mb\": 0.0201406478881835, \"Time in s\": 177.804854 }, { \"step\": 78023, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9951936633257288, \"F1\": 0.0786240786240786, \"Memory in Mb\": 0.0201406478881835, \"Time in s\": 185.743198 }, { \"step\": 79926, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9946700031279324, \"F1\": 0.0698689956331877, \"Memory in Mb\": 0.0201406478881835, \"Time in s\": 193.829526 }, { \"step\": 81829, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9945862052109302, \"F1\": 0.0673684210526315, \"Memory in Mb\": 0.0201406478881835, \"Time in s\": 202.057449 }, { \"step\": 83732, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9945539883675102, \"F1\": 0.0655737704918032, \"Memory in Mb\": 0.0201406478881835, \"Time in s\": 210.444774 }, { \"step\": 85635, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9939860335847912, \"F1\": 0.0585009140767824, \"Memory in Mb\": 0.0201406478881835, \"Time in s\": 218.97044600000004 }, { \"step\": 87538, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9938540274398254, \"F1\": 0.0561403508771929, \"Memory in Mb\": 0.0201406478881835, \"Time in s\": 227.63481900000005 }, { \"step\": 89441, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9938618067978532, \"F1\": 0.0550774526678141, \"Memory in Mb\": 0.0201406478881835, \"Time in s\": 236.43819800000009 }, { \"step\": 91344, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9939677917300724, \"F1\": 0.0548885077186964, \"Memory in Mb\": 0.0201406478881835, \"Time in s\": 245.38462600000005 }, { \"step\": 93247, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"SMTP\", \"Accuracy\": 0.993543958990198, \"F1\": 0.0504731861198738, \"Memory in Mb\": 0.0201406478881835, \"Time in s\": 254.47095700000008 }, { \"step\": 95150, \"track\": \"Binary classification\", \"model\": \"Naive Bayes\", \"dataset\": \"SMTP\", \"Accuracy\": 0.993483904192372, \"F1\": 0.0490797546012269, \"Memory in Mb\": 0.0201406478881835, \"Time in s\": 263.6963870000001 }, { \"step\": 106, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.4952380952380952, \"F1\": 0.208955223880597, \"Memory in Mb\": 0.0192947387695312, \"Time in s\": 0.019877 }, { \"step\": 212, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5213270142180095, \"F1\": 0.3129251700680272, \"Memory in Mb\": 0.019317626953125, \"Time in s\": 0.0520889999999999 }, { \"step\": 318, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5299684542586751, \"F1\": 0.4063745019920318, \"Memory in Mb\": 0.019317626953125, \"Time in s\": 0.095483 }, { \"step\": 424, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5437352245862884, \"F1\": 0.4238805970149253, \"Memory in Mb\": 0.019317626953125, \"Time in s\": 0.150343 }, { \"step\": 530, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.553875236294896, \"F1\": 0.4099999999999999, \"Memory in Mb\": 0.019317626953125, \"Time in s\": 0.216684 }, { \"step\": 636, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5590551181102362, \"F1\": 0.4017094017094017, \"Memory in Mb\": 0.019317626953125, \"Time in s\": 0.295132 }, { \"step\": 742, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5762483130904184, \"F1\": 0.3984674329501916, \"Memory in Mb\": 0.019317626953125, \"Time in s\": 0.3850179999999999 }, { \"step\": 848, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5867768595041323, \"F1\": 0.4047619047619047, \"Memory in Mb\": 0.019317626953125, \"Time in s\": 0.486959 }, { \"step\": 954, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5918153200419727, \"F1\": 0.3987635239567234, \"Memory in Mb\": 0.019317626953125, \"Time in s\": 0.601178 }, { \"step\": 1060, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6015108593012276, \"F1\": 0.3971428571428571, \"Memory in Mb\": 0.019317626953125, \"Time in s\": 0.727681 }, { \"step\": 1166, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6, \"F1\": 0.3852242744063324, \"Memory in Mb\": 0.019317626953125, \"Time in s\": 0.865767 }, { \"step\": 1272, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6073957513768686, \"F1\": 0.3966142684401451, \"Memory in Mb\": 0.019317626953125, \"Time in s\": 1.0159 }, { \"step\": 1378, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6085693536673928, \"F1\": 0.384, \"Memory in Mb\": 0.019317626953125, \"Time in s\": 1.177562 }, { \"step\": 1484, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6089008766014835, \"F1\": 0.3790149892933619, \"Memory in Mb\": 0.019317626953125, \"Time in s\": 1.351249 }, { \"step\": 1590, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6085588420390182, \"F1\": 0.3742454728370221, \"Memory in Mb\": 0.019317626953125, \"Time in s\": 1.5365639999999998 }, { \"step\": 1696, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6094395280235988, \"F1\": 0.370722433460076, \"Memory in Mb\": 0.019317626953125, \"Time in s\": 1.73385 }, { \"step\": 1802, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6102165463631316, \"F1\": 0.3754448398576512, \"Memory in Mb\": 0.019317626953125, \"Time in s\": 1.943126 }, { \"step\": 1908, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.610907184058731, \"F1\": 0.3816666666666667, \"Memory in Mb\": 0.019317626953125, \"Time in s\": 2.16423 }, { \"step\": 2014, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6060606060606061, \"F1\": 0.3799843627834245, \"Memory in Mb\": 0.019317626953125, \"Time in s\": 2.39723 }, { \"step\": 2120, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6045304388862671, \"F1\": 0.3838235294117647, \"Memory in Mb\": 0.019317626953125, \"Time in s\": 2.641805 }, { \"step\": 2226, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6053932584269663, \"F1\": 0.3868715083798882, \"Memory in Mb\": 0.019317626953125, \"Time in s\": 2.898512 }, { \"step\": 2332, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6061776061776062, \"F1\": 0.388814913448735, \"Memory in Mb\": 0.019317626953125, \"Time in s\": 3.166739 }, { \"step\": 2438, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.606893721789085, \"F1\": 0.388250319284802, \"Memory in Mb\": 0.019317626953125, \"Time in s\": 3.447054 }, { \"step\": 2544, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.608336610302792, \"F1\": 0.3963636363636363, \"Memory in Mb\": 0.019317626953125, \"Time in s\": 3.73904 }, { \"step\": 2650, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6070215175537939, \"F1\": 0.3944153577661431, \"Memory in Mb\": 0.019317626953125, \"Time in s\": 4.043011 }, { \"step\": 2756, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6047186932849364, \"F1\": 0.3892316320807628, \"Memory in Mb\": 0.019317626953125, \"Time in s\": 4.35852 }, { \"step\": 2862, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6057322614470465, \"F1\": 0.3922413793103448, \"Memory in Mb\": 0.019317626953125, \"Time in s\": 4.686463999999999 }, { \"step\": 2968, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6056622851365016, \"F1\": 0.3899895724713243, \"Memory in Mb\": 0.019317626953125, \"Time in s\": 5.02596 }, { \"step\": 3074, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6036446469248291, \"F1\": 0.3903903903903904, \"Memory in Mb\": 0.019317626953125, \"Time in s\": 5.377489 }, { \"step\": 3180, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6045926391947153, \"F1\": 0.3924601256645723, \"Memory in Mb\": 0.019317626953125, \"Time in s\": 5.740578999999999 }, { \"step\": 3286, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6039573820395738, \"F1\": 0.3900609470229723, \"Memory in Mb\": 0.019317626953125, \"Time in s\": 6.115373999999999 }, { \"step\": 3392, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6024771453848422, \"F1\": 0.391696750902527, \"Memory in Mb\": 0.019317626953125, \"Time in s\": 6.501596999999999 }, { \"step\": 3498, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6030883614526737, \"F1\": 0.3933566433566433, \"Memory in Mb\": 0.0351476669311523, \"Time in s\": 6.903521 }, { \"step\": 3604, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6069941715237303, \"F1\": 0.4035383319292334, \"Memory in Mb\": 0.0351476669311523, \"Time in s\": 7.317575 }, { \"step\": 3710, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6079805877595039, \"F1\": 0.4079804560260586, \"Memory in Mb\": 0.0351476669311523, \"Time in s\": 7.744409999999999 }, { \"step\": 3816, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6107470511140236, \"F1\": 0.4146629877808436, \"Memory in Mb\": 0.0351476669311523, \"Time in s\": 8.183644999999999 }, { \"step\": 3922, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6123437898495282, \"F1\": 0.4180704441041348, \"Memory in Mb\": 0.0446500778198242, \"Time in s\": 8.636627999999998 }, { \"step\": 4028, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6143531164638689, \"F1\": 0.4246017043349389, \"Memory in Mb\": 0.0508146286010742, \"Time in s\": 9.102372 }, { \"step\": 4134, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.617227195741592, \"F1\": 0.4321608040201005, \"Memory in Mb\": 0.0508146286010742, \"Time in s\": 9.580540999999998 }, { \"step\": 4240, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6218447747110167, \"F1\": 0.4439819632327437, \"Memory in Mb\": 0.0508146286010742, \"Time in s\": 10.071727 }, { \"step\": 4346, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6239355581127733, \"F1\": 0.4513096037609133, \"Memory in Mb\": 0.0508146286010742, \"Time in s\": 10.575599 }, { \"step\": 4452, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6259267580319029, \"F1\": 0.4567699836867862, \"Memory in Mb\": 0.0508146286010742, \"Time in s\": 11.092444999999998 }, { \"step\": 4558, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6276058810621022, \"F1\": 0.4638230647709321, \"Memory in Mb\": 0.0508146286010742, \"Time in s\": 11.621769 }, { \"step\": 4664, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6283508470941453, \"F1\": 0.4695439240893787, \"Memory in Mb\": 0.0508146286010742, \"Time in s\": 12.163799 }, { \"step\": 4770, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6288530090165653, \"F1\": 0.471641791044776, \"Memory in Mb\": 0.0602216720581054, \"Time in s\": 12.720221 }, { \"step\": 4876, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6311794871794871, \"F1\": 0.475801749271137, \"Memory in Mb\": 0.0602674484252929, \"Time in s\": 13.289238 }, { \"step\": 4982, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6336077092953222, \"F1\": 0.484026010743568, \"Memory in Mb\": 0.0602674484252929, \"Time in s\": 13.87141 }, { \"step\": 5088, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6361313151169649, \"F1\": 0.4905037159372419, \"Memory in Mb\": 0.0602674484252929, \"Time in s\": 14.466033 }, { \"step\": 5194, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6383593298671288, \"F1\": 0.495703544575725, \"Memory in Mb\": 0.0602674484252929, \"Time in s\": 15.073179 }, { \"step\": 5300, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6421966408756369, \"F1\": 0.5034049240440022, \"Memory in Mb\": 0.0602674484252929, \"Time in s\": 15.693544 }, { \"step\": 906, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8530386740331491, \"F1\": 0.8513966480446927, \"Memory in Mb\": 0.178506851196289, \"Time in s\": 0.136823 }, { \"step\": 1812, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8663721700717836, \"F1\": 0.8393094289508632, \"Memory in Mb\": 0.2121829986572265, \"Time in s\": 0.372341 }, { \"step\": 2718, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8369525211630475, \"F1\": 0.810926163038839, \"Memory in Mb\": 0.2367534637451172, \"Time in s\": 0.715196 }, { \"step\": 3624, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8459839911675407, \"F1\": 0.8219527760051053, \"Memory in Mb\": 0.2367534637451172, \"Time in s\": 1.157087 }, { \"step\": 4530, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8511812762199161, \"F1\": 0.8165487207403377, \"Memory in Mb\": 0.2367000579833984, \"Time in s\": 1.692418 }, { \"step\": 5436, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8404783808647655, \"F1\": 0.8027303754266211, \"Memory in Mb\": 0.2367000579833984, \"Time in s\": 2.325246 }, { \"step\": 6342, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8334647531935025, \"F1\": 0.7973128598848368, \"Memory in Mb\": 0.2367000579833984, \"Time in s\": 3.058544 }, { \"step\": 7248, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8330343590451221, \"F1\": 0.7918100481761873, \"Memory in Mb\": 0.2367000579833984, \"Time in s\": 3.886934 }, { \"step\": 8154, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8344167790997179, \"F1\": 0.8018203170874927, \"Memory in Mb\": 0.2367000579833984, \"Time in s\": 4.816345 }, { \"step\": 9060, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8403797328623468, \"F1\": 0.8133711925658234, \"Memory in Mb\": 0.3037357330322265, \"Time in s\": 5.843331 }, { \"step\": 9966, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8398394380331159, \"F1\": 0.8174748398902103, \"Memory in Mb\": 0.3038501739501953, \"Time in s\": 6.967896 }, { \"step\": 10872, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.840493054916751, \"F1\": 0.8203108808290155, \"Memory in Mb\": 0.3038501739501953, \"Time in s\": 8.190139 }, { \"step\": 11778, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8404517279442982, \"F1\": 0.8186818488854579, \"Memory in Mb\": 0.3877925872802734, \"Time in s\": 9.510034 }, { \"step\": 12684, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8397066939998423, \"F1\": 0.8187572434697333, \"Memory in Mb\": 0.3877925872802734, \"Time in s\": 10.932755 }, { \"step\": 13590, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8422253293104717, \"F1\": 0.8231023102310231, \"Memory in Mb\": 0.3877925872802734, \"Time in s\": 12.458737 }, { \"step\": 14496, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8440841669541221, \"F1\": 0.8261270964763809, \"Memory in Mb\": 0.3890361785888672, \"Time in s\": 14.085848 }, { \"step\": 15402, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8445555483410169, \"F1\": 0.8248207229620957, \"Memory in Mb\": 0.3890628814697265, \"Time in s\": 15.806373999999998 }, { \"step\": 16308, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8382289814190225, \"F1\": 0.8146430578976953, \"Memory in Mb\": 0.4148235321044922, \"Time in s\": 17.623171 }, { \"step\": 17214, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8344855632370882, \"F1\": 0.8052764677739047, \"Memory in Mb\": 0.4148235321044922, \"Time in s\": 19.541046 }, { \"step\": 18120, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8333793255698438, \"F1\": 0.8031044153133764, \"Memory in Mb\": 0.4155101776123047, \"Time in s\": 21.569665 }, { \"step\": 19026, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8341655716162943, \"F1\": 0.8009086893418313, \"Memory in Mb\": 0.4168033599853515, \"Time in s\": 23.708109 }, { \"step\": 19932, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8308163162912047, \"F1\": 0.7980112615310889, \"Memory in Mb\": 0.5072460174560547, \"Time in s\": 25.964372999999995 }, { \"step\": 20838, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8293900273551855, \"F1\": 0.7961699443839229, \"Memory in Mb\": 0.5655117034912109, \"Time in s\": 28.344831 }, { \"step\": 21744, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8298302902083429, \"F1\": 0.7941012799109627, \"Memory in Mb\": 0.6239414215087891, \"Time in s\": 30.844750999999995 }, { \"step\": 22650, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8288224645679722, \"F1\": 0.7904437597967678, \"Memory in Mb\": 0.6005496978759766, \"Time in s\": 33.457669 }, { \"step\": 23556, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8244109530885162, \"F1\": 0.7830920914621354, \"Memory in Mb\": 0.6264286041259766, \"Time in s\": 36.193139 }, { \"step\": 24462, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8225747107640734, \"F1\": 0.7806973218797373, \"Memory in Mb\": 0.6265659332275391, \"Time in s\": 39.050648 }, { \"step\": 25368, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8180707218039185, \"F1\": 0.7764375333042677, \"Memory in Mb\": 0.6265659332275391, \"Time in s\": 42.027952 }, { \"step\": 26274, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8183306055646481, \"F1\": 0.7761572011443043, \"Memory in Mb\": 0.6265659332275391, \"Time in s\": 45.127396 }, { \"step\": 27180, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8178005077449502, \"F1\": 0.7755416553349651, \"Memory in Mb\": 0.6265659332275391, \"Time in s\": 48.341337 }, { \"step\": 28086, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8154174826419797, \"F1\": 0.771830985915493, \"Memory in Mb\": 0.6266345977783203, \"Time in s\": 51.670155 }, { \"step\": 28992, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.81342485598979, \"F1\": 0.7672447179310641, \"Memory in Mb\": 0.6266345977783203, \"Time in s\": 55.116457 }, { \"step\": 29898, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8088771448640332, \"F1\": 0.763747622591582, \"Memory in Mb\": 0.6848773956298828, \"Time in s\": 58.686244 }, { \"step\": 30804, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8062526377300913, \"F1\": 0.7606673083092718, \"Memory in Mb\": 0.6848773956298828, \"Time in s\": 62.38209 }, { \"step\": 31710, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8052603361821565, \"F1\": 0.759212322090076, \"Memory in Mb\": 0.7534084320068359, \"Time in s\": 66.209893 }, { \"step\": 32616, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8025755020695998, \"F1\": 0.7564951026736755, \"Memory in Mb\": 0.7792224884033203, \"Time in s\": 70.16157899999999 }, { \"step\": 33522, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8007517675487008, \"F1\": 0.7563742476746306, \"Memory in Mb\": 0.8375339508056641, \"Time in s\": 74.24016799999998 }, { \"step\": 34428, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.7980945188369594, \"F1\": 0.7537464130088213, \"Memory in Mb\": 0.8621463775634766, \"Time in s\": 78.43750399999999 }, { \"step\": 35334, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.797186765912886, \"F1\": 0.7523842432619212, \"Memory in Mb\": 0.9281406402587892, \"Time in s\": 82.758889 }, { \"step\": 36240, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.7972350230414746, \"F1\": 0.7516392888528357, \"Memory in Mb\": 0.9552211761474608, \"Time in s\": 87.200985 }, { \"step\": 37146, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.7969847893390766, \"F1\": 0.7511960143851661, \"Memory in Mb\": 1.0704402923583984, \"Time in s\": 91.761102 }, { \"step\": 38052, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.7938293343144727, \"F1\": 0.748388338304628, \"Memory in Mb\": 1.0715465545654297, \"Time in s\": 96.434966 }, { \"step\": 38958, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.7918730908437508, \"F1\": 0.7483706784184718, \"Memory in Mb\": 1.0715465545654297, \"Time in s\": 101.219833 }, { \"step\": 39864, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.7913854953214761, \"F1\": 0.7506596306068601, \"Memory in Mb\": 1.1051769256591797, \"Time in s\": 106.118884 }, { \"step\": 40770, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.7935686428413746, \"F1\": 0.7554199360650974, \"Memory in Mb\": 1.1051769256591797, \"Time in s\": 111.1286 }, { \"step\": 41676, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.7953209358128375, \"F1\": 0.7590667721161451, \"Memory in Mb\": 1.129629135131836, \"Time in s\": 116.244938 }, { \"step\": 42582, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.7966933608886593, \"F1\": 0.7607572198424761, \"Memory in Mb\": 1.129629135131836, \"Time in s\": 121.471609 }, { \"step\": 43488, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.7973647296893325, \"F1\": 0.7615800865800865, \"Memory in Mb\": 1.129629135131836, \"Time in s\": 126.806064 }, { \"step\": 44394, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.7960714527065078, \"F1\": 0.7581933278132429, \"Memory in Mb\": 1.129629135131836, \"Time in s\": 132.253655 }, { \"step\": 45300, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.7969933111106206, \"F1\": 0.7591535278403435, \"Memory in Mb\": 1.1878719329833984, \"Time in s\": 137.81786 }, { \"step\": 25, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.5833333333333334, \"F1\": 0.6428571428571429, \"Memory in Mb\": 0.0693511962890625, \"Time in s\": 0.016417 }, { \"step\": 50, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.7346938775510204, \"F1\": 0.7346938775510203, \"Memory in Mb\": 0.0693511962890625, \"Time in s\": 0.037579 }, { \"step\": 75, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.7837837837837838, \"F1\": 0.7894736842105262, \"Memory in Mb\": 0.0693511962890625, \"Time in s\": 0.062555 }, { \"step\": 100, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8080808080808081, \"F1\": 0.8080808080808081, \"Memory in Mb\": 0.0693511962890625, \"Time in s\": 0.093468 }, { \"step\": 125, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8145161290322581, \"F1\": 0.8130081300813008, \"Memory in Mb\": 0.0693511962890625, \"Time in s\": 0.128067 }, { \"step\": 150, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8187919463087249, \"F1\": 0.8235294117647058, \"Memory in Mb\": 0.0693511962890625, \"Time in s\": 0.166217 }, { \"step\": 175, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8333333333333334, \"F1\": 0.8263473053892215, \"Memory in Mb\": 0.0693511962890625, \"Time in s\": 0.207895 }, { \"step\": 200, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8341708542713567, \"F1\": 0.8272251308900525, \"Memory in Mb\": 0.0693740844726562, \"Time in s\": 0.254237 }, { \"step\": 225, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8303571428571429, \"F1\": 0.8190476190476189, \"Memory in Mb\": 0.0693740844726562, \"Time in s\": 0.304152 }, { \"step\": 250, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8313253012048193, \"F1\": 0.8205128205128206, \"Memory in Mb\": 0.0693740844726562, \"Time in s\": 0.3576809999999999 }, { \"step\": 275, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8321167883211679, \"F1\": 0.8203125000000001, \"Memory in Mb\": 0.0693740844726562, \"Time in s\": 0.4147329999999999 }, { \"step\": 300, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8394648829431438, \"F1\": 0.8248175182481753, \"Memory in Mb\": 0.0693740844726562, \"Time in s\": 0.4754899999999999 }, { \"step\": 325, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.845679012345679, \"F1\": 0.8263888888888888, \"Memory in Mb\": 0.0693740844726562, \"Time in s\": 0.539997 }, { \"step\": 350, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8510028653295129, \"F1\": 0.8289473684210527, \"Memory in Mb\": 0.0693740844726562, \"Time in s\": 0.608045 }, { \"step\": 375, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8529411764705882, \"F1\": 0.8286604361370716, \"Memory in Mb\": 0.0693740844726562, \"Time in s\": 0.679831 }, { \"step\": 400, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8546365914786967, \"F1\": 0.8284023668639053, \"Memory in Mb\": 0.0693740844726562, \"Time in s\": 0.75623 }, { \"step\": 425, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8561320754716981, \"F1\": 0.8262108262108262, \"Memory in Mb\": 0.0693740844726562, \"Time in s\": 0.836328 }, { \"step\": 450, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8596881959910914, \"F1\": 0.8283378746594006, \"Memory in Mb\": 0.0693740844726562, \"Time in s\": 0.920157 }, { \"step\": 475, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8565400843881856, \"F1\": 0.826530612244898, \"Memory in Mb\": 0.0693740844726562, \"Time in s\": 1.007627 }, { \"step\": 500, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8577154308617234, \"F1\": 0.8313539192399049, \"Memory in Mb\": 0.0693740844726562, \"Time in s\": 1.098858 }, { \"step\": 525, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8587786259541985, \"F1\": 0.8287037037037036, \"Memory in Mb\": 0.0693740844726562, \"Time in s\": 1.193786 }, { \"step\": 550, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8579234972677595, \"F1\": 0.8289473684210527, \"Memory in Mb\": 0.0693740844726562, \"Time in s\": 1.292398 }, { \"step\": 575, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8606271777003485, \"F1\": 0.8319327731092437, \"Memory in Mb\": 0.0693740844726562, \"Time in s\": 1.395047 }, { \"step\": 600, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8647746243739566, \"F1\": 0.834355828220859, \"Memory in Mb\": 0.0693740844726562, \"Time in s\": 1.502529 }, { \"step\": 625, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8669871794871795, \"F1\": 0.8336673346693387, \"Memory in Mb\": 0.0693740844726562, \"Time in s\": 1.61388 }, { \"step\": 650, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8705701078582434, \"F1\": 0.8409090909090909, \"Memory in Mb\": 0.0693740844726562, \"Time in s\": 1.728993 }, { \"step\": 675, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.870919881305638, \"F1\": 0.8449197860962566, \"Memory in Mb\": 0.0693740844726562, \"Time in s\": 1.847898 }, { \"step\": 700, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8755364806866953, \"F1\": 0.8486956521739131, \"Memory in Mb\": 0.0693740844726562, \"Time in s\": 1.970613 }, { \"step\": 725, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8784530386740331, \"F1\": 0.8547854785478548, \"Memory in Mb\": 0.0693740844726562, \"Time in s\": 2.097195 }, { \"step\": 750, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8798397863818425, \"F1\": 0.8571428571428571, \"Memory in Mb\": 0.0693740844726562, \"Time in s\": 2.227565 }, { \"step\": 775, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8798449612403101, \"F1\": 0.8567026194144837, \"Memory in Mb\": 0.0693740844726562, \"Time in s\": 2.361715 }, { \"step\": 800, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8798498122653317, \"F1\": 0.8584070796460177, \"Memory in Mb\": 0.0058956146240234, \"Time in s\": 2.500804 }, { \"step\": 825, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8786407766990292, \"F1\": 0.8575498575498576, \"Memory in Mb\": 0.1346960067749023, \"Time in s\": 2.646368 }, { \"step\": 850, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8798586572438163, \"F1\": 0.8579387186629527, \"Memory in Mb\": 0.1347188949584961, \"Time in s\": 2.795782 }, { \"step\": 875, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8810068649885584, \"F1\": 0.8583106267029972, \"Memory in Mb\": 0.1347188949584961, \"Time in s\": 2.94896 }, { \"step\": 900, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.882091212458287, \"F1\": 0.8590425531914893, \"Memory in Mb\": 0.1347417831420898, \"Time in s\": 3.105941 }, { \"step\": 925, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8831168831168831, \"F1\": 0.8611825192802056, \"Memory in Mb\": 0.1347417831420898, \"Time in s\": 3.266898 }, { \"step\": 950, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.880927291886196, \"F1\": 0.8599752168525404, \"Memory in Mb\": 0.1347417831420898, \"Time in s\": 3.431805 }, { \"step\": 975, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8819301848049281, \"F1\": 0.8609431680773881, \"Memory in Mb\": 0.1347417831420898, \"Time in s\": 3.600575 }, { \"step\": 1000, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8828828828828829, \"F1\": 0.8621908127208481, \"Memory in Mb\": 0.1347417831420898, \"Time in s\": 3.773271 }, { \"step\": 1025, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8818359375, \"F1\": 0.8613974799541809, \"Memory in Mb\": 0.1347417831420898, \"Time in s\": 3.949737 }, { \"step\": 1050, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8836987607244995, \"F1\": 0.8641425389755011, \"Memory in Mb\": 0.1347417831420898, \"Time in s\": 4.130098 }, { \"step\": 1075, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8845437616387337, \"F1\": 0.8658008658008659, \"Memory in Mb\": 0.1347417831420898, \"Time in s\": 4.314272 }, { \"step\": 1100, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8844404003639672, \"F1\": 0.8656084656084656, \"Memory in Mb\": 0.1347417831420898, \"Time in s\": 4.502263 }, { \"step\": 1125, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8816725978647687, \"F1\": 0.8630278063851698, \"Memory in Mb\": 0.1347417831420898, \"Time in s\": 4.694139 }, { \"step\": 1150, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8807658833768495, \"F1\": 0.8614762386248735, \"Memory in Mb\": 0.1347417831420898, \"Time in s\": 4.889705 }, { \"step\": 1175, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.879045996592845, \"F1\": 0.8594059405940594, \"Memory in Mb\": 0.1347417831420898, \"Time in s\": 5.0886830000000005 }, { \"step\": 1200, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8807339449541285, \"F1\": 0.8610301263362489, \"Memory in Mb\": 0.1347417831420898, \"Time in s\": 5.292268000000001 }, { \"step\": 1225, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.880718954248366, \"F1\": 0.8609523809523809, \"Memory in Mb\": 0.1347417831420898, \"Time in s\": 5.499479000000001 }, { \"step\": 1250, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8799039231385108, \"F1\": 0.8605947955390334, \"Memory in Mb\": 0.1347417831420898, \"Time in s\": 5.711295000000001 }, { \"step\": 1903, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.0172128677368164, \"Time in s\": 0.17714 }, { \"step\": 3806, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.0172128677368164, \"Time in s\": 0.53242 }, { \"step\": 5709, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.0172128677368164, \"Time in s\": 1.05881 }, { \"step\": 7612, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.0172128677368164, \"Time in s\": 1.755561 }, { \"step\": 9515, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.0172128677368164, \"Time in s\": 2.619724 }, { \"step\": 11418, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.0172128677368164, \"Time in s\": 3.65225 }, { \"step\": 13321, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.0172128677368164, \"Time in s\": 4.852976 }, { \"step\": 15224, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9995401694803916, \"F1\": 0.5882352941176471, \"Memory in Mb\": 0.0401153564453125, \"Time in s\": 6.232766 }, { \"step\": 17127, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9992409202382344, \"F1\": 0.48, \"Memory in Mb\": 0.0401153564453125, \"Time in s\": 7.809704 }, { \"step\": 19030, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9993168322034788, \"F1\": 0.48, \"Memory in Mb\": 0.0401153564453125, \"Time in s\": 9.583916 }, { \"step\": 20933, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999378941333843, \"F1\": 0.48, \"Memory in Mb\": 0.0401153564453125, \"Time in s\": 11.564329 }, { \"step\": 22836, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9994306984891612, \"F1\": 0.48, \"Memory in Mb\": 0.0401153564453125, \"Time in s\": 13.76196 }, { \"step\": 24739, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9994744926833212, \"F1\": 0.48, \"Memory in Mb\": 0.0401153564453125, \"Time in s\": 16.175204 }, { \"step\": 26642, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999512030329192, \"F1\": 0.5185185185185186, \"Memory in Mb\": 0.0401382446289062, \"Time in s\": 18.806955 }, { \"step\": 28545, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999544562780269, \"F1\": 0.5185185185185186, \"Memory in Mb\": 0.0401382446289062, \"Time in s\": 21.653103 }, { \"step\": 30448, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9995730285413998, \"F1\": 0.5185185185185186, \"Memory in Mb\": 0.0401382446289062, \"Time in s\": 24.713929 }, { \"step\": 32351, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999598145285935, \"F1\": 0.5185185185185186, \"Memory in Mb\": 0.0401382446289062, \"Time in s\": 27.986042 }, { \"step\": 34254, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999620471199603, \"F1\": 0.5185185185185186, \"Memory in Mb\": 0.0401382446289062, \"Time in s\": 31.466943 }, { \"step\": 36157, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996404469520964, \"F1\": 0.5185185185185186, \"Memory in Mb\": 0.0401382446289062, \"Time in s\": 35.15479 }, { \"step\": 38060, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996584250768544, \"F1\": 0.5185185185185186, \"Memory in Mb\": 0.0321388244628906, \"Time in s\": 39.051373 }, { \"step\": 39963, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996746909564086, \"F1\": 0.5185185185185186, \"Memory in Mb\": 0.0321388244628906, \"Time in s\": 43.155234 }, { \"step\": 41866, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996894780843186, \"F1\": 0.5185185185185186, \"Memory in Mb\": 0.0321388244628906, \"Time in s\": 47.461241 }, { \"step\": 43769, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997029793456408, \"F1\": 0.5185185185185186, \"Memory in Mb\": 0.0321388244628906, \"Time in s\": 51.972515 }, { \"step\": 45672, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997153554772176, \"F1\": 0.5185185185185186, \"Memory in Mb\": 0.0321388244628906, \"Time in s\": 56.689378000000005 }, { \"step\": 47575, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996847017278344, \"F1\": 0.4827586206896552, \"Memory in Mb\": 0.0443153381347656, \"Time in s\": 61.613517 }, { \"step\": 49478, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996766174181944, \"F1\": 0.4666666666666667, \"Memory in Mb\": 0.0443153381347656, \"Time in s\": 66.740207 }, { \"step\": 51381, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996691319579604, \"F1\": 0.5142857142857142, \"Memory in Mb\": 0.0535621643066406, \"Time in s\": 72.066529 }, { \"step\": 53284, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996809488955202, \"F1\": 0.5142857142857142, \"Memory in Mb\": 0.0535621643066406, \"Time in s\": 77.596723 }, { \"step\": 55187, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999673830319284, \"F1\": 0.5, \"Memory in Mb\": 0.0535621643066406, \"Time in s\": 83.324048 }, { \"step\": 57090, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9995270542486292, \"F1\": 0.4489795918367347, \"Memory in Mb\": 0.0805435180664062, \"Time in s\": 89.177762 }, { \"step\": 58993, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9995423108218064, \"F1\": 0.4489795918367347, \"Memory in Mb\": 0.0805435180664062, \"Time in s\": 95.152342 }, { \"step\": 60896, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9995566138435013, \"F1\": 0.4489795918367347, \"Memory in Mb\": 0.0805435180664062, \"Time in s\": 101.246537 }, { \"step\": 62799, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9995222777795472, \"F1\": 0.4230769230769231, \"Memory in Mb\": 0.0897903442382812, \"Time in s\": 107.458379 }, { \"step\": 64702, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9995363286502528, \"F1\": 0.4230769230769231, \"Memory in Mb\": 0.0897903442382812, \"Time in s\": 113.786816 }, { \"step\": 66605, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999549576602006, \"F1\": 0.4230769230769231, \"Memory in Mb\": 0.0897903442382812, \"Time in s\": 120.231179 }, { \"step\": 68508, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999562088545696, \"F1\": 0.4642857142857143, \"Memory in Mb\": 0.0897903442382812, \"Time in s\": 126.79213 }, { \"step\": 70411, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9995739241585002, \"F1\": 0.4642857142857143, \"Memory in Mb\": 0.0897903442382812, \"Time in s\": 133.468631 }, { \"step\": 72314, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9995851368357004, \"F1\": 0.4642857142857143, \"Memory in Mb\": 0.0897903442382812, \"Time in s\": 140.25926299999998 }, { \"step\": 74217, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9995823003126012, \"F1\": 0.456140350877193, \"Memory in Mb\": 0.0897903442382812, \"Time in s\": 147.163887 }, { \"step\": 76120, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9995927429419724, \"F1\": 0.456140350877193, \"Memory in Mb\": 0.0897903442382812, \"Time in s\": 154.18114 }, { \"step\": 78023, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996026761682604, \"F1\": 0.456140350877193, \"Memory in Mb\": 0.0897903442382812, \"Time in s\": 161.310049 }, { \"step\": 79926, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996121363778544, \"F1\": 0.456140350877193, \"Memory in Mb\": 0.0897903442382812, \"Time in s\": 168.551426 }, { \"step\": 81829, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996211565723224, \"F1\": 0.456140350877193, \"Memory in Mb\": 0.0897903442382812, \"Time in s\": 175.901071 }, { \"step\": 83732, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996178237450885, \"F1\": 0.4482758620689655, \"Memory in Mb\": 0.0897903442382812, \"Time in s\": 183.360737 }, { \"step\": 85635, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996146390452392, \"F1\": 0.4406779661016949, \"Memory in Mb\": 0.0940589904785156, \"Time in s\": 190.930796 }, { \"step\": 87538, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996230165530005, \"F1\": 0.4406779661016949, \"Memory in Mb\": 0.0940589904785156, \"Time in s\": 198.60706 }, { \"step\": 89441, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996198568872988, \"F1\": 0.4333333333333333, \"Memory in Mb\": 0.1032600402832031, \"Time in s\": 206.389291 }, { \"step\": 91344, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999616828875776, \"F1\": 0.4262295081967213, \"Memory in Mb\": 0.1032600402832031, \"Time in s\": 214.276883 }, { \"step\": 93247, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996139244578855, \"F1\": 0.4193548387096774, \"Memory in Mb\": 0.1032600402832031, \"Time in s\": 222.269881 }, { \"step\": 95150, \"track\": \"Binary classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996216460498796, \"F1\": 0.4193548387096774, \"Memory in Mb\": 0.1032600402832031, \"Time in s\": 230.368202 }, { \"step\": 106, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5714285714285714, \"F1\": 0.628099173553719, \"Memory in Mb\": 0.0257539749145507, \"Time in s\": 0.030279 }, { \"step\": 212, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5592417061611374, \"F1\": 0.5903083700440529, \"Memory in Mb\": 0.0258378982543945, \"Time in s\": 0.079572 }, { \"step\": 318, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5615141955835962, \"F1\": 0.5947521865889213, \"Memory in Mb\": 0.0258989334106445, \"Time in s\": 0.147698 }, { \"step\": 424, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5555555555555556, \"F1\": 0.5822222222222222, \"Memory in Mb\": 0.0258989334106445, \"Time in s\": 0.234734 }, { \"step\": 530, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.555765595463138, \"F1\": 0.5506692160611854, \"Memory in Mb\": 0.0258989334106445, \"Time in s\": 0.340374 }, { \"step\": 636, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5543307086614173, \"F1\": 0.5291181364392679, \"Memory in Mb\": 0.0259599685668945, \"Time in s\": 0.465898 }, { \"step\": 742, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5708502024291497, \"F1\": 0.5167173252279634, \"Memory in Mb\": 0.0259599685668945, \"Time in s\": 0.6107940000000001 }, { \"step\": 848, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5761511216056671, \"F1\": 0.510231923601637, \"Memory in Mb\": 0.0259599685668945, \"Time in s\": 0.774445 }, { \"step\": 954, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5844700944386149, \"F1\": 0.505, \"Memory in Mb\": 0.0259599685668945, \"Time in s\": 0.95753 }, { \"step\": 1060, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5920679886685553, \"F1\": 0.4953271028037382, \"Memory in Mb\": 0.0259599685668945, \"Time in s\": 1.15978 }, { \"step\": 1166, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.590557939914163, \"F1\": 0.478688524590164, \"Memory in Mb\": 0.0259599685668945, \"Time in s\": 1.380797 }, { \"step\": 1272, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5971675845790716, \"F1\": 0.4807302231237322, \"Memory in Mb\": 0.0259599685668945, \"Time in s\": 1.620256 }, { \"step\": 1378, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.599128540305011, \"F1\": 0.4661508704061895, \"Memory in Mb\": 0.0260210037231445, \"Time in s\": 1.881163 }, { \"step\": 1484, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5994605529332434, \"F1\": 0.458029197080292, \"Memory in Mb\": 0.0260210037231445, \"Time in s\": 2.166507 }, { \"step\": 1590, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5997482693517936, \"F1\": 0.4517241379310345, \"Memory in Mb\": 0.0260210037231445, \"Time in s\": 2.464497 }, { \"step\": 1696, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6011799410029498, \"F1\": 0.4459016393442623, \"Memory in Mb\": 0.0260210037231445, \"Time in s\": 2.7711840000000003 }, { \"step\": 1802, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6018878400888396, \"F1\": 0.445475638051044, \"Memory in Mb\": 0.0260210037231445, \"Time in s\": 3.0861490000000003 }, { \"step\": 1908, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6030414263240692, \"F1\": 0.4470416362308254, \"Memory in Mb\": 0.0260210037231445, \"Time in s\": 3.4094810000000004 }, { \"step\": 2014, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5986090412319921, \"F1\": 0.443526170798898, \"Memory in Mb\": 0.0260210037231445, \"Time in s\": 3.741125 }, { \"step\": 2120, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5960358659745163, \"F1\": 0.4427083333333333, \"Memory in Mb\": 0.0260210037231445, \"Time in s\": 4.081019 }, { \"step\": 2226, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5968539325842697, \"F1\": 0.4425108763206961, \"Memory in Mb\": 0.0260210037231445, \"Time in s\": 4.429484 }, { \"step\": 2332, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5975975975975976, \"F1\": 0.4423305588585017, \"Memory in Mb\": 0.0260210037231445, \"Time in s\": 4.786285 }, { \"step\": 2438, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5982765695527288, \"F1\": 0.4396107613050944, \"Memory in Mb\": 0.0260210037231445, \"Time in s\": 5.150978 }, { \"step\": 2544, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5973259929217459, \"F1\": 0.4398249452954048, \"Memory in Mb\": 0.0303611755371093, \"Time in s\": 5.5260560000000005 }, { \"step\": 2650, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5956964892412231, \"F1\": 0.4436363636363636, \"Memory in Mb\": 0.0572662353515625, \"Time in s\": 5.913093000000001 }, { \"step\": 2756, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5985480943738657, \"F1\": 0.4497512437810945, \"Memory in Mb\": 0.0575103759765625, \"Time in s\": 6.312855000000001 }, { \"step\": 2862, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.600139811254806, \"F1\": 0.4536771728748806, \"Memory in Mb\": 0.0577545166015625, \"Time in s\": 6.725712000000001 }, { \"step\": 2968, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5979103471520054, \"F1\": 0.4525011473152822, \"Memory in Mb\": 0.057861328125, \"Time in s\": 7.151638 }, { \"step\": 3074, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5971363488447771, \"F1\": 0.4497777777777778, \"Memory in Mb\": 0.0579833984375, \"Time in s\": 7.588719 }, { \"step\": 3180, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6008178672538534, \"F1\": 0.4499349804941482, \"Memory in Mb\": 0.05804443359375, \"Time in s\": 8.037238 }, { \"step\": 3286, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6024353120243531, \"F1\": 0.4470787468247248, \"Memory in Mb\": 0.05816650390625, \"Time in s\": 8.49667 }, { \"step\": 3392, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6012975523444412, \"F1\": 0.444991789819376, \"Memory in Mb\": 0.0582275390625, \"Time in s\": 8.967725 }, { \"step\": 3498, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.603946239633972, \"F1\": 0.4431041415359871, \"Memory in Mb\": 0.05828857421875, \"Time in s\": 9.449361 }, { \"step\": 3604, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.607826810990841, \"F1\": 0.4452296819787986, \"Memory in Mb\": 0.05828857421875, \"Time in s\": 9.942275 }, { \"step\": 3710, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6071717444055001, \"F1\": 0.441976254308694, \"Memory in Mb\": 0.05828857421875, \"Time in s\": 10.446028 }, { \"step\": 3816, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6062909567496724, \"F1\": 0.4378742514970059, \"Memory in Mb\": 0.058349609375, \"Time in s\": 10.961132 }, { \"step\": 3922, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.606988013261923, \"F1\": 0.4353242946134115, \"Memory in Mb\": 0.05841064453125, \"Time in s\": 11.487205 }, { \"step\": 4028, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6088899925502855, \"F1\": 0.4360902255639098, \"Memory in Mb\": 0.05841064453125, \"Time in s\": 12.024686 }, { \"step\": 4134, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6082748608758771, \"F1\": 0.4341139461726669, \"Memory in Mb\": 0.0584716796875, \"Time in s\": 12.573182999999998 }, { \"step\": 4240, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6105213493748526, \"F1\": 0.4370951244459598, \"Memory in Mb\": 0.0584716796875, \"Time in s\": 13.132692999999998 }, { \"step\": 4346, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6119677790563867, \"F1\": 0.4372496662216288, \"Memory in Mb\": 0.05853271484375, \"Time in s\": 13.703478 }, { \"step\": 4452, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.614243990114581, \"F1\": 0.4387054593004249, \"Memory in Mb\": 0.05853271484375, \"Time in s\": 14.285281 }, { \"step\": 4558, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6126837831906956, \"F1\": 0.4355612408058842, \"Memory in Mb\": 0.05853271484375, \"Time in s\": 14.878356 }, { \"step\": 4664, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.613339052112374, \"F1\": 0.4360337816703159, \"Memory in Mb\": 0.05853271484375, \"Time in s\": 15.482258 }, { \"step\": 4770, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6148039421262319, \"F1\": 0.4352905010759299, \"Memory in Mb\": 0.0646743774414062, \"Time in s\": 16.097208 }, { \"step\": 4876, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6157948717948718, \"F1\": 0.4332829046898639, \"Memory in Mb\": 0.0646743774414062, \"Time in s\": 16.723433999999997 }, { \"step\": 4982, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6167436257779563, \"F1\": 0.434705359786793, \"Memory in Mb\": 0.0646743774414062, \"Time in s\": 17.360799999999998 }, { \"step\": 5088, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6158836249262827, \"F1\": 0.4313154831199068, \"Memory in Mb\": 0.0647354125976562, \"Time in s\": 18.009093999999997 }, { \"step\": 5194, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6160215674947044, \"F1\": 0.4296338672768878, \"Memory in Mb\": 0.0647964477539062, \"Time in s\": 18.668471 }, { \"step\": 5300, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6165314210228345, \"F1\": 0.4282498593134496, \"Memory in Mb\": 0.0624046325683593, \"Time in s\": 19.339784 }, { \"step\": 906, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8386740331491712, \"F1\": 0.8370535714285713, \"Memory in Mb\": 0.1590566635131836, \"Time in s\": 0.369424 }, { \"step\": 1812, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8823854224185533, \"F1\": 0.857334226389819, \"Memory in Mb\": 0.2951574325561523, \"Time in s\": 1.082516 }, { \"step\": 2718, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8715495031284505, \"F1\": 0.8438478747203579, \"Memory in Mb\": 0.1301527023315429, \"Time in s\": 2.245646 }, { \"step\": 3624, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8755175269113994, \"F1\": 0.8480970023576963, \"Memory in Mb\": 0.2537450790405273, \"Time in s\": 3.683735 }, { \"step\": 4530, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.873923603444469, \"F1\": 0.8402797202797203, \"Memory in Mb\": 0.3769788742065429, \"Time in s\": 5.436553 }, { \"step\": 5436, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8680772769089237, \"F1\": 0.8326721120186699, \"Memory in Mb\": 0.4361524581909179, \"Time in s\": 7.563805 }, { \"step\": 6342, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8667402617883615, \"F1\": 0.8319076984284862, \"Memory in Mb\": 0.2912740707397461, \"Time in s\": 10.057711 }, { \"step\": 7248, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8665654753691182, \"F1\": 0.8309144955411786, \"Memory in Mb\": 0.3158788681030273, \"Time in s\": 12.896512 }, { \"step\": 8154, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8588249724027965, \"F1\": 0.8328249818445898, \"Memory in Mb\": 0.3159399032592773, \"Time in s\": 16.103426000000002 }, { \"step\": 9060, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8594767634396733, \"F1\": 0.8384722750919934, \"Memory in Mb\": 0.3158178329467773, \"Time in s\": 19.644725 }, { \"step\": 9966, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8557952834922228, \"F1\": 0.8381938970836617, \"Memory in Mb\": 0.3156957626342773, \"Time in s\": 23.527363 }, { \"step\": 10872, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8595345414405299, \"F1\": 0.8445801526717558, \"Memory in Mb\": 0.3772764205932617, \"Time in s\": 27.740459 }, { \"step\": 11778, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8540375307803345, \"F1\": 0.8368605865046976, \"Memory in Mb\": 0.4951925277709961, \"Time in s\": 32.366043000000005 }, { \"step\": 12684, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8543719940077269, \"F1\": 0.8371970030850595, \"Memory in Mb\": 0.1922826766967773, \"Time in s\": 37.423083000000005 }, { \"step\": 13590, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8561336375009199, \"F1\": 0.8404472374112462, \"Memory in Mb\": 0.1966886520385742, \"Time in s\": 42.800608 }, { \"step\": 14496, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8560193170058641, \"F1\": 0.8406018483158941, \"Memory in Mb\": 0.1969938278198242, \"Time in s\": 48.503182 }, { \"step\": 15402, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8546198298811766, \"F1\": 0.8374119526541282, \"Memory in Mb\": 0.1967496871948242, \"Time in s\": 54.449200000000005 }, { \"step\": 16308, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8514748267615134, \"F1\": 0.8324339283243393, \"Memory in Mb\": 0.1668386459350586, \"Time in s\": 60.610803 }, { \"step\": 17214, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8503456689711265, \"F1\": 0.8286094477711246, \"Memory in Mb\": 0.1718664169311523, \"Time in s\": 66.962846 }, { \"step\": 18120, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8505436282355539, \"F1\": 0.8284555935639174, \"Memory in Mb\": 0.2014341354370117, \"Time in s\": 73.50726399999999 }, { \"step\": 19026, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8529829172141918, \"F1\": 0.8293992070753278, \"Memory in Mb\": 0.2609548568725586, \"Time in s\": 80.25162699999998 }, { \"step\": 19932, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8476744769454618, \"F1\": 0.8245289561900357, \"Memory in Mb\": 0.3200864791870117, \"Time in s\": 87.27384199999999 }, { \"step\": 20838, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8440274511685943, \"F1\": 0.8206203775251132, \"Memory in Mb\": 0.3286733627319336, \"Time in s\": 94.62519899999998 }, { \"step\": 21744, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8459734167318217, \"F1\": 0.8206693440428381, \"Memory in Mb\": 0.3269262313842773, \"Time in s\": 102.22022699999998 }, { \"step\": 22650, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8452470307739856, \"F1\": 0.8179693586081537, \"Memory in Mb\": 0.4455976486206054, \"Time in s\": 110.06165699999998 }, { \"step\": 23556, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8426236467841223, \"F1\": 0.813952321204517, \"Memory in Mb\": 0.3226041793823242, \"Time in s\": 118.13610299999998 }, { \"step\": 24462, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.83966313723887, \"F1\": 0.8094081057439986, \"Memory in Mb\": 0.3223371505737304, \"Time in s\": 126.42298599999998 }, { \"step\": 25368, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8335632908897387, \"F1\": 0.804101707498144, \"Memory in Mb\": 0.3226423263549804, \"Time in s\": 134.92385099999998 }, { \"step\": 26274, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.833859856126061, \"F1\": 0.8041634887164072, \"Memory in Mb\": 0.3227415084838867, \"Time in s\": 143.62308599999997 }, { \"step\": 27180, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8353508223260606, \"F1\": 0.8067872717067484, \"Memory in Mb\": 0.3344221115112304, \"Time in s\": 152.52188499999997 }, { \"step\": 28086, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.832366031689514, \"F1\": 0.8022679546409073, \"Memory in Mb\": 0.4427366256713867, \"Time in s\": 161.69157399999995 }, { \"step\": 28992, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8298092511469076, \"F1\": 0.7967539957159334, \"Memory in Mb\": 0.4431562423706054, \"Time in s\": 171.13838399999997 }, { \"step\": 29898, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.828912599926414, \"F1\": 0.7954572719638501, \"Memory in Mb\": 0.4460401535034179, \"Time in s\": 180.848187 }, { \"step\": 30804, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8286855176443852, \"F1\": 0.7940682926829268, \"Memory in Mb\": 0.4524259567260742, \"Time in s\": 190.785177 }, { \"step\": 31710, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8270522564571573, \"F1\": 0.7921309984080055, \"Memory in Mb\": 0.3178968429565429, \"Time in s\": 200.976285 }, { \"step\": 32616, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8257550206959988, \"F1\": 0.7908123826701513, \"Memory in Mb\": 0.5092306137084961, \"Time in s\": 211.41806699999995 }, { \"step\": 33522, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8259001819754781, \"F1\": 0.7917201998572448, \"Memory in Mb\": 0.4504041671752929, \"Time in s\": 222.10332899999997 }, { \"step\": 34428, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8258634211520028, \"F1\": 0.7914564998086757, \"Memory in Mb\": 0.3306646347045898, \"Time in s\": 233.03381899999997 }, { \"step\": 35334, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8252625024764385, \"F1\": 0.7899285471248724, \"Memory in Mb\": 0.4912481307983398, \"Time in s\": 244.20613599999996 }, { \"step\": 36240, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8261541433262507, \"F1\": 0.7894103489771359, \"Memory in Mb\": 0.5119619369506836, \"Time in s\": 255.69628199999997 }, { \"step\": 37146, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8248754879526181, \"F1\": 0.7869936802121876, \"Memory in Mb\": 0.4187917709350586, \"Time in s\": 267.45490699999993 }, { \"step\": 38052, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8248403458516202, \"F1\": 0.7864398090294465, \"Memory in Mb\": 0.4483175277709961, \"Time in s\": 279.43877899999995 }, { \"step\": 38958, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8243447904099391, \"F1\": 0.7868689070919114, \"Memory in Mb\": 0.5077886581420898, \"Time in s\": 291.70049499999993 }, { \"step\": 39864, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8249504553094348, \"F1\": 0.7887758808572466, \"Memory in Mb\": 0.1471853256225586, \"Time in s\": 304.138659 }, { \"step\": 40770, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8262159974490422, \"F1\": 0.7919908399635948, \"Memory in Mb\": 0.2297697067260742, \"Time in s\": 316.729427 }, { \"step\": 41676, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8275224955008998, \"F1\": 0.794816168074903, \"Memory in Mb\": 0.2342596054077148, \"Time in s\": 329.475528 }, { \"step\": 42582, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8281393109602875, \"F1\": 0.7956550876801073, \"Memory in Mb\": 0.3305959701538086, \"Time in s\": 342.401664 }, { \"step\": 43488, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8284774760273185, \"F1\": 0.7958619557185473, \"Memory in Mb\": 0.3348875045776367, \"Time in s\": 355.508994 }, { \"step\": 44394, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8276530083571735, \"F1\": 0.7938902508014333, \"Memory in Mb\": 0.2839117050170898, \"Time in s\": 368.88314 }, { \"step\": 45300, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8286717146073864, \"F1\": 0.795391632174211, \"Memory in Mb\": 0.3993253707885742, \"Time in s\": 382.503727 }, { \"step\": 25, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.5833333333333334, \"F1\": 0.6428571428571429, \"Memory in Mb\": 0.075688362121582, \"Time in s\": 0.0191 }, { \"step\": 50, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.7346938775510204, \"F1\": 0.7346938775510203, \"Memory in Mb\": 0.075749397277832, \"Time in s\": 0.048719 }, { \"step\": 75, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.7837837837837838, \"F1\": 0.7894736842105262, \"Memory in Mb\": 0.075749397277832, \"Time in s\": 0.086072 }, { \"step\": 100, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8080808080808081, \"F1\": 0.8080808080808081, \"Memory in Mb\": 0.075810432434082, \"Time in s\": 0.130387 }, { \"step\": 125, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8225806451612904, \"F1\": 0.819672131147541, \"Memory in Mb\": 0.075810432434082, \"Time in s\": 0.181491 }, { \"step\": 150, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.825503355704698, \"F1\": 0.8289473684210527, \"Memory in Mb\": 0.0758333206176757, \"Time in s\": 0.241937 }, { \"step\": 175, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8333333333333334, \"F1\": 0.8242424242424242, \"Memory in Mb\": 0.0758943557739257, \"Time in s\": 0.309852 }, { \"step\": 200, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8291457286432161, \"F1\": 0.8191489361702128, \"Memory in Mb\": 0.0758943557739257, \"Time in s\": 0.385335 }, { \"step\": 225, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8303571428571429, \"F1\": 0.8155339805825242, \"Memory in Mb\": 0.0758943557739257, \"Time in s\": 0.468101 }, { \"step\": 250, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8313253012048193, \"F1\": 0.817391304347826, \"Memory in Mb\": 0.0758943557739257, \"Time in s\": 0.558214 }, { \"step\": 275, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8321167883211679, \"F1\": 0.8174603174603176, \"Memory in Mb\": 0.0758943557739257, \"Time in s\": 0.656226 }, { \"step\": 300, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8361204013377926, \"F1\": 0.8178438661710038, \"Memory in Mb\": 0.0758943557739257, \"Time in s\": 0.765253 }, { \"step\": 325, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8425925925925926, \"F1\": 0.8197879858657244, \"Memory in Mb\": 0.0759553909301757, \"Time in s\": 0.883707 }, { \"step\": 350, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8481375358166189, \"F1\": 0.822742474916388, \"Memory in Mb\": 0.0759553909301757, \"Time in s\": 1.011557 }, { \"step\": 375, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8502673796791443, \"F1\": 0.8227848101265823, \"Memory in Mb\": 0.0759553909301757, \"Time in s\": 1.148821 }, { \"step\": 400, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8521303258145363, \"F1\": 0.8228228228228228, \"Memory in Mb\": 0.0759553909301757, \"Time in s\": 1.295386 }, { \"step\": 425, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8537735849056604, \"F1\": 0.8208092485549133, \"Memory in Mb\": 0.0759553909301757, \"Time in s\": 1.4534280000000002 }, { \"step\": 450, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8574610244988864, \"F1\": 0.8232044198895027, \"Memory in Mb\": 0.0759553909301757, \"Time in s\": 1.6208710000000002 }, { \"step\": 475, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8565400843881856, \"F1\": 0.8238341968911918, \"Memory in Mb\": 0.0759553909301757, \"Time in s\": 1.7976930000000002 }, { \"step\": 500, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8557114228456913, \"F1\": 0.8260869565217391, \"Memory in Mb\": 0.0759553909301757, \"Time in s\": 1.9839770000000003 }, { \"step\": 525, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8568702290076335, \"F1\": 0.823529411764706, \"Memory in Mb\": 0.0759553909301757, \"Time in s\": 2.179609 }, { \"step\": 550, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8561020036429873, \"F1\": 0.8240534521158129, \"Memory in Mb\": 0.0759553909301757, \"Time in s\": 2.3846100000000003 }, { \"step\": 575, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8554006968641115, \"F1\": 0.8230277185501066, \"Memory in Mb\": 0.1161155700683593, \"Time in s\": 2.6084530000000004 }, { \"step\": 600, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8547579298831386, \"F1\": 0.8176100628930818, \"Memory in Mb\": 0.1440086364746093, \"Time in s\": 2.84161 }, { \"step\": 625, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8573717948717948, \"F1\": 0.8172484599589321, \"Memory in Mb\": 0.1442451477050781, \"Time in s\": 3.082865 }, { \"step\": 650, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8597842835130971, \"F1\": 0.8233009708737864, \"Memory in Mb\": 0.1444129943847656, \"Time in s\": 3.332264 }, { \"step\": 675, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8590504451038575, \"F1\": 0.8263254113345521, \"Memory in Mb\": 0.1444740295410156, \"Time in s\": 3.58978 }, { \"step\": 700, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8640915593705293, \"F1\": 0.8306595365418894, \"Memory in Mb\": 0.1445350646972656, \"Time in s\": 3.855481 }, { \"step\": 725, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8646408839779005, \"F1\": 0.8344594594594595, \"Memory in Mb\": 0.1445960998535156, \"Time in s\": 4.1295 }, { \"step\": 750, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8664886515353805, \"F1\": 0.8371335504885993, \"Memory in Mb\": 0.1446571350097656, \"Time in s\": 4.411739 }, { \"step\": 775, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8643410852713178, \"F1\": 0.8330683624801273, \"Memory in Mb\": 0.1446800231933593, \"Time in s\": 4.702108 }, { \"step\": 800, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8635794743429287, \"F1\": 0.8340943683409437, \"Memory in Mb\": 0.1446800231933593, \"Time in s\": 5.000653 }, { \"step\": 825, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8628640776699029, \"F1\": 0.8345534407027819, \"Memory in Mb\": 0.1446800231933593, \"Time in s\": 5.309277 }, { \"step\": 850, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8645465253239105, \"F1\": 0.8364153627311521, \"Memory in Mb\": 0.1447410583496093, \"Time in s\": 5.626329 }, { \"step\": 875, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8672768878718535, \"F1\": 0.838888888888889, \"Memory in Mb\": 0.1447410583496093, \"Time in s\": 5.951525 }, { \"step\": 900, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8665183537263627, \"F1\": 0.8378378378378378, \"Memory in Mb\": 0.1448020935058593, \"Time in s\": 6.284981 }, { \"step\": 925, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8668831168831169, \"F1\": 0.8400520156046815, \"Memory in Mb\": 0.1448020935058593, \"Time in s\": 6.628637 }, { \"step\": 950, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8661749209694415, \"F1\": 0.8410513141426783, \"Memory in Mb\": 0.1448631286621093, \"Time in s\": 6.980485000000001 }, { \"step\": 975, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.86652977412731, \"F1\": 0.8414634146341464, \"Memory in Mb\": 0.1448631286621093, \"Time in s\": 7.342308000000001 }, { \"step\": 1000, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8638638638638638, \"F1\": 0.8392434988179669, \"Memory in Mb\": 0.1448631286621093, \"Time in s\": 7.713462000000001 }, { \"step\": 1025, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8623046875, \"F1\": 0.8377445339470656, \"Memory in Mb\": 0.1448631286621093, \"Time in s\": 8.093853000000001 }, { \"step\": 1050, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8636796949475691, \"F1\": 0.8402234636871508, \"Memory in Mb\": 0.1448631286621093, \"Time in s\": 8.485573 }, { \"step\": 1075, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8649906890130353, \"F1\": 0.8429035752979415, \"Memory in Mb\": 0.1448631286621093, \"Time in s\": 8.889089 }, { \"step\": 1100, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8671519563239308, \"F1\": 0.8456659619450316, \"Memory in Mb\": 0.1448631286621093, \"Time in s\": 9.305059 }, { \"step\": 1125, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8701067615658363, \"F1\": 0.8507157464212679, \"Memory in Mb\": 0.1448631286621093, \"Time in s\": 9.730762 }, { \"step\": 1150, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8720626631853786, \"F1\": 0.852852852852853, \"Memory in Mb\": 0.1449241638183593, \"Time in s\": 10.16685 }, { \"step\": 1175, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8713798977853492, \"F1\": 0.8521057786483839, \"Memory in Mb\": 0.1449241638183593, \"Time in s\": 10.613004 }, { \"step\": 1200, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.872393661384487, \"F1\": 0.8530259365994236, \"Memory in Mb\": 0.1449241638183593, \"Time in s\": 11.069492 }, { \"step\": 1225, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8733660130718954, \"F1\": 0.8541862652869238, \"Memory in Mb\": 0.1449851989746093, \"Time in s\": 11.536355 }, { \"step\": 1250, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8742994395516414, \"F1\": 0.8560953253895509, \"Memory in Mb\": 0.1449851989746093, \"Time in s\": 12.014925 }, { \"step\": 1903, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.0239162445068359, \"Time in s\": 0.25062 }, { \"step\": 3806, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.0239772796630859, \"Time in s\": 0.745512 }, { \"step\": 5709, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.0240383148193359, \"Time in s\": 1.4971299999999998 }, { \"step\": 7612, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.0240383148193359, \"Time in s\": 2.521262 }, { \"step\": 9515, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.0240383148193359, \"Time in s\": 3.817259 }, { \"step\": 11418, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.0240993499755859, \"Time in s\": 5.360746 }, { \"step\": 13321, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.0240993499755859, \"Time in s\": 7.021896 }, { \"step\": 15224, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996058595546212, \"F1\": 0.625, \"Memory in Mb\": 0.050175666809082, \"Time in s\": 8.821636 }, { \"step\": 17127, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9991825294873292, \"F1\": 0.4615384615384615, \"Memory in Mb\": 0.0453500747680664, \"Time in s\": 10.797386 }, { \"step\": 19030, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9992642808345158, \"F1\": 0.4615384615384615, \"Memory in Mb\": 0.0457162857055664, \"Time in s\": 12.952086 }, { \"step\": 20933, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9993311675902924, \"F1\": 0.4615384615384615, \"Memory in Mb\": 0.0458383560180664, \"Time in s\": 15.282579 }, { \"step\": 22836, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9993869060652508, \"F1\": 0.4615384615384615, \"Memory in Mb\": 0.0455942153930664, \"Time in s\": 17.788716 }, { \"step\": 24739, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9994340690435768, \"F1\": 0.4615384615384615, \"Memory in Mb\": 0.0458383560180664, \"Time in s\": 20.470769 }, { \"step\": 26642, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999474494200668, \"F1\": 0.5, \"Memory in Mb\": 0.0581369400024414, \"Time in s\": 23.327571 }, { \"step\": 28545, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999509529147982, \"F1\": 0.5, \"Memory in Mb\": 0.0581979751586914, \"Time in s\": 26.358996 }, { \"step\": 30448, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999540184583046, \"F1\": 0.5, \"Memory in Mb\": 0.0581979751586914, \"Time in s\": 29.565097 }, { \"step\": 32351, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9995672333848532, \"F1\": 0.5, \"Memory in Mb\": 0.0583200454711914, \"Time in s\": 32.946387 }, { \"step\": 34254, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9995912766764956, \"F1\": 0.5, \"Memory in Mb\": 0.0583810806274414, \"Time in s\": 36.502912 }, { \"step\": 36157, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996127890253348, \"F1\": 0.5, \"Memory in Mb\": 0.0584421157836914, \"Time in s\": 40.234574 }, { \"step\": 38060, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996321500827662, \"F1\": 0.5, \"Memory in Mb\": 0.0584421157836914, \"Time in s\": 44.142312 }, { \"step\": 39963, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996496671838246, \"F1\": 0.5, \"Memory in Mb\": 0.0584421157836914, \"Time in s\": 48.227315 }, { \"step\": 41866, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996655917831124, \"F1\": 0.5, \"Memory in Mb\": 0.0584421157836914, \"Time in s\": 52.488216 }, { \"step\": 43769, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996801316029976, \"F1\": 0.5, \"Memory in Mb\": 0.0585031509399414, \"Time in s\": 56.92422 }, { \"step\": 45672, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996934597446958, \"F1\": 0.5, \"Memory in Mb\": 0.0586252212524414, \"Time in s\": 61.535205 }, { \"step\": 47575, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996636818430236, \"F1\": 0.4666666666666667, \"Memory in Mb\": 0.0678491592407226, \"Time in s\": 66.321748 }, { \"step\": 49478, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996564060068316, \"F1\": 0.4516129032258064, \"Memory in Mb\": 0.0678491592407226, \"Time in s\": 71.282934 }, { \"step\": 51381, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999649669131958, \"F1\": 0.5, \"Memory in Mb\": 0.0679788589477539, \"Time in s\": 76.42026 }, { \"step\": 53284, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999662181183492, \"F1\": 0.5, \"Memory in Mb\": 0.0679788589477539, \"Time in s\": 81.732337 }, { \"step\": 55187, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999673830319284, \"F1\": 0.5, \"Memory in Mb\": 0.0679788589477539, \"Time in s\": 87.220308 }, { \"step\": 57090, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9995095377393192, \"F1\": 0.4166666666666666, \"Memory in Mb\": 0.1018075942993164, \"Time in s\": 92.923409 }, { \"step\": 58993, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9994914564686738, \"F1\": 0.4000000000000001, \"Memory in Mb\": 0.1022958755493164, \"Time in s\": 98.859355 }, { \"step\": 60896, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9995073487150012, \"F1\": 0.4000000000000001, \"Memory in Mb\": 0.1024179458618164, \"Time in s\": 105.024921 }, { \"step\": 62799, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9994745055575018, \"F1\": 0.3773584905660377, \"Memory in Mb\": 0.1118478775024414, \"Time in s\": 111.430038 }, { \"step\": 64702, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999489961515278, \"F1\": 0.3773584905660377, \"Memory in Mb\": 0.1119699478149414, \"Time in s\": 118.06567900000002 }, { \"step\": 66605, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9995045342622064, \"F1\": 0.3773584905660377, \"Memory in Mb\": 0.1120920181274414, \"Time in s\": 124.93220100000002 }, { \"step\": 68508, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9995182974002657, \"F1\": 0.4210526315789473, \"Memory in Mb\": 0.1121530532836914, \"Time in s\": 132.02968600000003 }, { \"step\": 70411, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9995313165743502, \"F1\": 0.4210526315789473, \"Memory in Mb\": 0.1122140884399414, \"Time in s\": 139.35861700000004 }, { \"step\": 72314, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9995436505192704, \"F1\": 0.4210526315789473, \"Memory in Mb\": 0.1123361587524414, \"Time in s\": 146.91762900000003 }, { \"step\": 74217, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9995418777622076, \"F1\": 0.4137931034482758, \"Memory in Mb\": 0.1123971939086914, \"Time in s\": 154.70829700000004 }, { \"step\": 76120, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999553330968615, \"F1\": 0.4137931034482758, \"Memory in Mb\": 0.1123971939086914, \"Time in s\": 162.73183200000005 }, { \"step\": 78023, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.99953859167927, \"F1\": 0.3999999999999999, \"Memory in Mb\": 0.1198663711547851, \"Time in s\": 170.98992200000006 }, { \"step\": 79926, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999549577729121, \"F1\": 0.3999999999999999, \"Memory in Mb\": 0.1202325820922851, \"Time in s\": 179.48569900000007 }, { \"step\": 81829, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9995600527936648, \"F1\": 0.3999999999999999, \"Memory in Mb\": 0.1203546524047851, \"Time in s\": 188.21505800000008 }, { \"step\": 83732, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9995581087052584, \"F1\": 0.3934426229508197, \"Memory in Mb\": 0.1204767227172851, \"Time in s\": 197.18003100000004 }, { \"step\": 85635, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.99956792862648, \"F1\": 0.3934426229508197, \"Memory in Mb\": 0.1204767227172851, \"Time in s\": 206.38234800000004 }, { \"step\": 87538, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9995773215897278, \"F1\": 0.3934426229508197, \"Memory in Mb\": 0.1205377578735351, \"Time in s\": 215.81852000000003 }, { \"step\": 89441, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9995751341681576, \"F1\": 0.3870967741935484, \"Memory in Mb\": 0.1298608779907226, \"Time in s\": 225.49133900000004 }, { \"step\": 91344, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999562090143744, \"F1\": 0.375, \"Memory in Mb\": 0.1298608779907226, \"Time in s\": 235.39794000000003 }, { \"step\": 93247, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9995388542135856, \"F1\": 0.3582089552238806, \"Memory in Mb\": 0.1371393203735351, \"Time in s\": 245.54396400000005 }, { \"step\": 95150, \"track\": \"Binary classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9995480772262452, \"F1\": 0.3582089552238806, \"Memory in Mb\": 0.1372613906860351, \"Time in s\": 255.92452900000004 }, { \"step\": 106, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Bananas\", \"Accuracy\": 0.638095238095238, \"F1\": 0.5777777777777778, \"Memory in Mb\": 0.6364564895629883, \"Time in s\": 0.262298 }, { \"step\": 212, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Bananas\", \"Accuracy\": 0.7488151658767772, \"F1\": 0.7103825136612022, \"Memory in Mb\": 1.0992326736450195, \"Time in s\": 0.748759 }, { \"step\": 318, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Bananas\", \"Accuracy\": 0.7917981072555205, \"F1\": 0.7659574468085106, \"Memory in Mb\": 1.488083839416504, \"Time in s\": 1.463509 }, { \"step\": 424, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8274231678486997, \"F1\": 0.8042895442359249, \"Memory in Mb\": 1.85091495513916, \"Time in s\": 2.4097 }, { \"step\": 530, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Bananas\", \"Accuracy\": 0.831758034026465, \"F1\": 0.802660753880266, \"Memory in Mb\": 2.4203081130981445, \"Time in s\": 3.615781 }, { \"step\": 636, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8456692913385827, \"F1\": 0.8191881918819188, \"Memory in Mb\": 2.83583927154541, \"Time in s\": 5.093309 }, { \"step\": 742, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8569500674763832, \"F1\": 0.8284789644012946, \"Memory in Mb\": 3.2995615005493164, \"Time in s\": 6.854744 }, { \"step\": 848, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Bananas\", \"Accuracy\": 0.859504132231405, \"F1\": 0.8326300984528833, \"Memory in Mb\": 3.4132471084594727, \"Time in s\": 8.933942 }, { \"step\": 954, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Bananas\", \"Accuracy\": 0.863588667366212, \"F1\": 0.8370927318295739, \"Memory in Mb\": 3.8167009353637695, \"Time in s\": 11.329061 }, { \"step\": 1060, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8715769593956563, \"F1\": 0.8454545454545456, \"Memory in Mb\": 4.3293962478637695, \"Time in s\": 14.065921 }, { \"step\": 1166, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8738197424892704, \"F1\": 0.8489208633093526, \"Memory in Mb\": 4.776837348937988, \"Time in s\": 17.153408 }, { \"step\": 1272, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8749016522423289, \"F1\": 0.8512628624883068, \"Memory in Mb\": 5.179757118225098, \"Time in s\": 20.492268 }, { \"step\": 1378, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8765432098765432, \"F1\": 0.8516579406631763, \"Memory in Mb\": 5.6712846755981445, \"Time in s\": 24.00032 }, { \"step\": 1484, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8813216453135536, \"F1\": 0.8580645161290322, \"Memory in Mb\": 6.118577003479004, \"Time in s\": 27.676707 }, { \"step\": 1590, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8779106356198867, \"F1\": 0.8554396423248881, \"Memory in Mb\": 6.625298500061035, \"Time in s\": 31.525227 }, { \"step\": 1696, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Bananas\", \"Accuracy\": 0.879646017699115, \"F1\": 0.8573426573426574, \"Memory in Mb\": 5.8858842849731445, \"Time in s\": 35.585591 }, { \"step\": 1802, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8800666296501943, \"F1\": 0.8590078328981724, \"Memory in Mb\": 6.279637336730957, \"Time in s\": 39.950222 }, { \"step\": 1908, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8778185631882538, \"F1\": 0.8578401464307503, \"Memory in Mb\": 6.042496681213379, \"Time in s\": 44.519385 }, { \"step\": 2014, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Bananas\", \"Accuracy\": 0.877297565822156, \"F1\": 0.8584527220630374, \"Memory in Mb\": 6.480931282043457, \"Time in s\": 49.302272 }, { \"step\": 2120, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8801321378008494, \"F1\": 0.8631465517241379, \"Memory in Mb\": 6.779278755187988, \"Time in s\": 54.294834 }, { \"step\": 2226, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8791011235955056, \"F1\": 0.8621219887237315, \"Memory in Mb\": 7.120572090148926, \"Time in s\": 59.4987 }, { \"step\": 2332, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8781638781638782, \"F1\": 0.8611925708699902, \"Memory in Mb\": 7.709580421447754, \"Time in s\": 64.935467 }, { \"step\": 2438, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8789495281083299, \"F1\": 0.8618266978922717, \"Memory in Mb\": 8.127808570861816, \"Time in s\": 70.60280800000001 }, { \"step\": 2544, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8788832088084939, \"F1\": 0.8624999999999999, \"Memory in Mb\": 8.531121253967285, \"Time in s\": 76.48792700000001 }, { \"step\": 2650, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8803322008305021, \"F1\": 0.8645877829987185, \"Memory in Mb\": 8.842900276184082, \"Time in s\": 82.60938300000001 }, { \"step\": 2756, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8827586206896552, \"F1\": 0.8671328671328672, \"Memory in Mb\": 9.237275123596191, \"Time in s\": 88.95420100000001 }, { \"step\": 2862, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Bananas\", \"Accuracy\": 0.88325760223698, \"F1\": 0.8674603174603175, \"Memory in Mb\": 9.480591773986816, \"Time in s\": 95.534715 }, { \"step\": 2968, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8843950117964273, \"F1\": 0.8681276432141485, \"Memory in Mb\": 9.87939167022705, \"Time in s\": 102.339589 }, { \"step\": 3074, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8848031239830785, \"F1\": 0.8690828402366864, \"Memory in Mb\": 10.33882999420166, \"Time in s\": 109.378768 }, { \"step\": 3180, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8867568417741428, \"F1\": 0.8707824838478105, \"Memory in Mb\": 10.641068458557127, \"Time in s\": 116.664225 }, { \"step\": 3286, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8861491628614916, \"F1\": 0.8697771587743733, \"Memory in Mb\": 10.188206672668455, \"Time in s\": 124.173867 }, { \"step\": 3392, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8867590681214981, \"F1\": 0.8712273641851107, \"Memory in Mb\": 10.46657657623291, \"Time in s\": 131.921991 }, { \"step\": 3498, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8881898770374607, \"F1\": 0.8723473718576559, \"Memory in Mb\": 9.652070045471191, \"Time in s\": 139.895805 }, { \"step\": 3604, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8889814043852345, \"F1\": 0.8726925525143221, \"Memory in Mb\": 8.887116432189941, \"Time in s\": 148.067873 }, { \"step\": 3710, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8889188460501483, \"F1\": 0.873152709359606, \"Memory in Mb\": 9.26492404937744, \"Time in s\": 156.441655 }, { \"step\": 3816, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Bananas\", \"Accuracy\": 0.89043250327654, \"F1\": 0.875, \"Memory in Mb\": 9.5267915725708, \"Time in s\": 165.011312 }, { \"step\": 3922, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8888038765621015, \"F1\": 0.8728862973760932, \"Memory in Mb\": 9.89724826812744, \"Time in s\": 173.777209 }, { \"step\": 4028, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8872609883287808, \"F1\": 0.8710227272727272, \"Memory in Mb\": 10.25338077545166, \"Time in s\": 182.74826 }, { \"step\": 4134, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8877328816840068, \"F1\": 0.8716104039845047, \"Memory in Mb\": 10.579400062561035, \"Time in s\": 191.919884 }, { \"step\": 4240, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8886529841943854, \"F1\": 0.8727762803234501, \"Memory in Mb\": 10.81624698638916, \"Time in s\": 201.289592 }, { \"step\": 4346, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8895281933256617, \"F1\": 0.8738833420914347, \"Memory in Mb\": 10.986077308654783, \"Time in s\": 210.860402 }, { \"step\": 4452, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8890137047854415, \"F1\": 0.8732032854209446, \"Memory in Mb\": 11.276833534240724, \"Time in s\": 220.647955 }, { \"step\": 4558, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8887425938117183, \"F1\": 0.8734082397003745, \"Memory in Mb\": 11.626667976379396, \"Time in s\": 230.650874 }, { \"step\": 4664, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8889127171348917, \"F1\": 0.8740272373540856, \"Memory in Mb\": 12.038758277893066, \"Time in s\": 240.866021 }, { \"step\": 4770, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Bananas\", \"Accuracy\": 0.887188089746278, \"F1\": 0.871843735111958, \"Memory in Mb\": 12.429780006408691, \"Time in s\": 251.436375 }, { \"step\": 4876, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8875897435897436, \"F1\": 0.8720224194301728, \"Memory in Mb\": 12.7014741897583, \"Time in s\": 262.227068 }, { \"step\": 4982, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8883758281469585, \"F1\": 0.8732907930720145, \"Memory in Mb\": 12.836377143859863, \"Time in s\": 273.239924 }, { \"step\": 5088, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8875565166109691, \"F1\": 0.8722644037516749, \"Memory in Mb\": 13.209172248840332, \"Time in s\": 284.483402 }, { \"step\": 5194, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8875409204698633, \"F1\": 0.8722100656455142, \"Memory in Mb\": 13.58340549468994, \"Time in s\": 295.957708 }, { \"step\": 5300, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Bananas\", \"Accuracy\": 0.886959803736554, \"F1\": 0.8715419257988419, \"Memory in Mb\": 13.845444679260254, \"Time in s\": 307.673049 }, { \"step\": 906, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8674033149171271, \"F1\": 0.8669623059866962, \"Memory in Mb\": 3.0924072265625, \"Time in s\": 1.44234 }, { \"step\": 1812, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Elec2\", \"Accuracy\": 0.895085588072888, \"F1\": 0.8724832214765101, \"Memory in Mb\": 4.682834625244141, \"Time in s\": 4.396686 }, { \"step\": 2718, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8844313581155686, \"F1\": 0.8575317604355717, \"Memory in Mb\": 7.755058288574219, \"Time in s\": 9.151923 }, { \"step\": 3624, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8923544024289263, \"F1\": 0.8677069199457259, \"Memory in Mb\": 8.665351867675781, \"Time in s\": 15.539979 }, { \"step\": 4530, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8940163391477147, \"F1\": 0.8633257403189066, \"Memory in Mb\": 11.046634674072266, \"Time in s\": 23.437977 }, { \"step\": 5436, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8868445262189513, \"F1\": 0.8541617263457434, \"Memory in Mb\": 15.550697326660156, \"Time in s\": 33.146859 }, { \"step\": 6342, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8856647216527361, \"F1\": 0.8543884314119301, \"Memory in Mb\": 17.033367156982422, \"Time in s\": 44.650345 }, { \"step\": 7248, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8840899682627295, \"F1\": 0.8505869797225187, \"Memory in Mb\": 21.311607360839844, \"Time in s\": 57.88122 }, { \"step\": 8154, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8850729792714338, \"F1\": 0.8592881814086198, \"Memory in Mb\": 20.813556671142575, \"Time in s\": 72.837822 }, { \"step\": 9060, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8881775030356551, \"F1\": 0.866657891272871, \"Memory in Mb\": 22.198707580566406, \"Time in s\": 89.470796 }, { \"step\": 9966, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8867034621174109, \"F1\": 0.8678450193140582, \"Memory in Mb\": 25.867393493652344, \"Time in s\": 108.078283 }, { \"step\": 10872, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8887866801582192, \"F1\": 0.8723202027669237, \"Memory in Mb\": 24.029823303222656, \"Time in s\": 128.509921 }, { \"step\": 11778, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8868981913899975, \"F1\": 0.8695141065830722, \"Memory in Mb\": 23.736316680908203, \"Time in s\": 150.696171 }, { \"step\": 12684, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8834660569265946, \"F1\": 0.8662201303403331, \"Memory in Mb\": 17.480976104736328, \"Time in s\": 174.61452699999998 }, { \"step\": 13590, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8837294870851424, \"F1\": 0.8684648684648686, \"Memory in Mb\": 17.212547302246094, \"Time in s\": 200.151461 }, { \"step\": 14496, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8826491893756467, \"F1\": 0.8678013522965726, \"Memory in Mb\": 16.54094696044922, \"Time in s\": 227.262884 }, { \"step\": 15402, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8836439192260243, \"F1\": 0.8679439941046426, \"Memory in Mb\": 17.876373291015625, \"Time in s\": 255.95351 }, { \"step\": 16308, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8814006254982523, \"F1\": 0.8648119670068503, \"Memory in Mb\": 12.619304656982422, \"Time in s\": 286.224579 }, { \"step\": 17214, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8818334979376053, \"F1\": 0.8629380053908356, \"Memory in Mb\": 9.807907104492188, \"Time in s\": 317.969431 }, { \"step\": 18120, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8821126993763453, \"F1\": 0.8632872503840245, \"Memory in Mb\": 8.873615264892578, \"Time in s\": 351.00359 }, { \"step\": 19026, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Elec2\", \"Accuracy\": 0.883784494086728, \"F1\": 0.8636279528773206, \"Memory in Mb\": 11.29254150390625, \"Time in s\": 385.315707 }, { \"step\": 19932, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8832973759470172, \"F1\": 0.8638810861423221, \"Memory in Mb\": 13.30521011352539, \"Time in s\": 421.189813 }, { \"step\": 20838, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8826126601718097, \"F1\": 0.8630919064144185, \"Memory in Mb\": 13.75485610961914, \"Time in s\": 458.867106 }, { \"step\": 21744, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8820769902957274, \"F1\": 0.8605005440696408, \"Memory in Mb\": 10.461296081542969, \"Time in s\": 498.082692 }, { \"step\": 22650, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8810985032451764, \"F1\": 0.8581959875730609, \"Memory in Mb\": 10.731792449951172, \"Time in s\": 538.811106 }, { \"step\": 23556, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Elec2\", \"Accuracy\": 0.880025472298875, \"F1\": 0.8563732465948364, \"Memory in Mb\": 8.147632598876953, \"Time in s\": 581.039793 }, { \"step\": 24462, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8793998610032296, \"F1\": 0.8547799547110366, \"Memory in Mb\": 10.927783966064451, \"Time in s\": 624.8074220000001 }, { \"step\": 25368, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8777545630149407, \"F1\": 0.8528029619784497, \"Memory in Mb\": 11.403583526611328, \"Time in s\": 670.1639380000001 }, { \"step\": 26274, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8783922658242302, \"F1\": 0.8533663775299463, \"Memory in Mb\": 14.942352294921877, \"Time in s\": 717.1095470000001 }, { \"step\": 27180, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8798704882446006, \"F1\": 0.855843525100446, \"Memory in Mb\": 14.17806625366211, \"Time in s\": 765.6544720000002 }, { \"step\": 28086, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8780843866832829, \"F1\": 0.8531732418524872, \"Memory in Mb\": 12.432361602783203, \"Time in s\": 815.7617920000001 }, { \"step\": 28992, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8780656065675555, \"F1\": 0.852406997620141, \"Memory in Mb\": 10.199352264404297, \"Time in s\": 867.3023770000001 }, { \"step\": 29898, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8784158945713617, \"F1\": 0.8527684393859614, \"Memory in Mb\": 13.818798065185549, \"Time in s\": 920.296853 }, { \"step\": 30804, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8787455767295393, \"F1\": 0.8524356998933271, \"Memory in Mb\": 16.783336639404297, \"Time in s\": 975.029218 }, { \"step\": 31710, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8776057270806396, \"F1\": 0.8507250278856878, \"Memory in Mb\": 18.14492416381836, \"Time in s\": 1031.673342 }, { \"step\": 32616, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8769277939598344, \"F1\": 0.8501567866208751, \"Memory in Mb\": 18.455127716064453, \"Time in s\": 1090.274581 }, { \"step\": 33522, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8766743235583664, \"F1\": 0.8503366881471291, \"Memory in Mb\": 21.331356048583984, \"Time in s\": 1150.683688 }, { \"step\": 34428, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Elec2\", \"Accuracy\": 0.875940395619717, \"F1\": 0.8493102353314751, \"Memory in Mb\": 20.51153945922852, \"Time in s\": 1212.996992 }, { \"step\": 35334, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8751591996150907, \"F1\": 0.8476285882068464, \"Memory in Mb\": 17.70761489868164, \"Time in s\": 1277.0202020000002 }, { \"step\": 36240, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8746930102927785, \"F1\": 0.8461772975170219, \"Memory in Mb\": 18.968151092529297, \"Time in s\": 1342.643234 }, { \"step\": 37146, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8737649750975905, \"F1\": 0.8444930852651478, \"Memory in Mb\": 21.1762809753418, \"Time in s\": 1410.0019350000002 }, { \"step\": 38052, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Elec2\", \"Accuracy\": 0.874063756537279, \"F1\": 0.8444256866437246, \"Memory in Mb\": 13.57645034790039, \"Time in s\": 1479.1416360000003 }, { \"step\": 38958, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8743999794645378, \"F1\": 0.8453001991842929, \"Memory in Mb\": 13.581947326660156, \"Time in s\": 1549.8962470000004 }, { \"step\": 39864, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8743446303589795, \"F1\": 0.8465990873732889, \"Memory in Mb\": 12.123741149902344, \"Time in s\": 1622.0853150000005 }, { \"step\": 40770, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8747823100885477, \"F1\": 0.8484848484848485, \"Memory in Mb\": 12.675861358642578, \"Time in s\": 1695.6369460000003 }, { \"step\": 41676, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8752969406118776, \"F1\": 0.850217598063233, \"Memory in Mb\": 15.801628112792969, \"Time in s\": 1770.5477020000003 }, { \"step\": 42582, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Elec2\", \"Accuracy\": 0.87569573283859, \"F1\": 0.8509895554742266, \"Memory in Mb\": 16.52715301513672, \"Time in s\": 1847.0334210000003 }, { \"step\": 43488, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Elec2\", \"Accuracy\": 0.87568698691563, \"F1\": 0.8510087090728695, \"Memory in Mb\": 18.189510345458984, \"Time in s\": 1925.197809 }, { \"step\": 44394, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8757236501250197, \"F1\": 0.8505242623750305, \"Memory in Mb\": 18.102394104003903, \"Time in s\": 2005.1269950000003 }, { \"step\": 45300, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8766197929314112, \"F1\": 0.8519587847323391, \"Memory in Mb\": 20.35542678833008, \"Time in s\": 2086.7182620000003 }, { \"step\": 25, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Phishing\", \"Accuracy\": 0.625, \"F1\": 0.7096774193548387, \"Memory in Mb\": 0.4235925674438476, \"Time in s\": 0.126705 }, { \"step\": 50, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Phishing\", \"Accuracy\": 0.7346938775510204, \"F1\": 0.7450980392156864, \"Memory in Mb\": 0.6303834915161133, \"Time in s\": 0.333958 }, { \"step\": 75, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Phishing\", \"Accuracy\": 0.7837837837837838, \"F1\": 0.7999999999999999, \"Memory in Mb\": 0.8403291702270508, \"Time in s\": 0.61983 }, { \"step\": 100, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Phishing\", \"Accuracy\": 0.797979797979798, \"F1\": 0.8039215686274509, \"Memory in Mb\": 0.9226388931274414, \"Time in s\": 0.981624 }, { \"step\": 125, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Phishing\", \"Accuracy\": 0.7903225806451613, \"F1\": 0.7968749999999999, \"Memory in Mb\": 1.0709314346313477, \"Time in s\": 1.421284 }, { \"step\": 150, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8120805369127517, \"F1\": 0.8227848101265823, \"Memory in Mb\": 1.1753358840942385, \"Time in s\": 1.940335 }, { \"step\": 175, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8390804597701149, \"F1\": 0.8372093023255814, \"Memory in Mb\": 1.2494592666625977, \"Time in s\": 2.543018 }, { \"step\": 200, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8442211055276382, \"F1\": 0.8426395939086295, \"Memory in Mb\": 1.3681573867797852, \"Time in s\": 3.230103 }, { \"step\": 225, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8526785714285714, \"F1\": 0.8465116279069769, \"Memory in Mb\": 1.4882898330688477, \"Time in s\": 4.000846999999999 }, { \"step\": 250, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8433734939759037, \"F1\": 0.8354430379746836, \"Memory in Mb\": 1.6624422073364258, \"Time in s\": 4.857951999999999 }, { \"step\": 275, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Phishing\", \"Accuracy\": 0.843065693430657, \"F1\": 0.833976833976834, \"Memory in Mb\": 1.7254152297973633, \"Time in s\": 5.796970999999999 }, { \"step\": 300, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8494983277591973, \"F1\": 0.8375451263537907, \"Memory in Mb\": 1.8179521560668943, \"Time in s\": 6.820338 }, { \"step\": 325, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8580246913580247, \"F1\": 0.8424657534246577, \"Memory in Mb\": 1.875351905822754, \"Time in s\": 7.930464 }, { \"step\": 350, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8595988538681948, \"F1\": 0.8414239482200646, \"Memory in Mb\": 2.064530372619629, \"Time in s\": 9.128265 }, { \"step\": 375, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8582887700534759, \"F1\": 0.8379204892966361, \"Memory in Mb\": 2.210324287414551, \"Time in s\": 10.415761 }, { \"step\": 400, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8646616541353384, \"F1\": 0.8439306358381503, \"Memory in Mb\": 2.3119516372680664, \"Time in s\": 11.792425 }, { \"step\": 425, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8679245283018868, \"F1\": 0.8435754189944134, \"Memory in Mb\": 2.393784523010254, \"Time in s\": 13.261163 }, { \"step\": 450, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8752783964365256, \"F1\": 0.8502673796791443, \"Memory in Mb\": 2.504483222961426, \"Time in s\": 14.820437 }, { \"step\": 475, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8776371308016878, \"F1\": 0.8550000000000001, \"Memory in Mb\": 2.601761817932129, \"Time in s\": 16.462295 }, { \"step\": 500, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8797595190380761, \"F1\": 0.8598130841121494, \"Memory in Mb\": 2.5846261978149414, \"Time in s\": 18.198054000000003 }, { \"step\": 525, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8816793893129771, \"F1\": 0.859090909090909, \"Memory in Mb\": 2.742630958557129, \"Time in s\": 20.024147000000003 }, { \"step\": 550, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8779599271402551, \"F1\": 0.855291576673866, \"Memory in Mb\": 2.8854761123657227, \"Time in s\": 21.959689000000004 }, { \"step\": 575, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8763066202090593, \"F1\": 0.8530020703933747, \"Memory in Mb\": 3.0752573013305664, \"Time in s\": 24.00593300000001 }, { \"step\": 600, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8797996661101837, \"F1\": 0.8548387096774194, \"Memory in Mb\": 3.1360864639282227, \"Time in s\": 26.16192000000001 }, { \"step\": 625, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8830128205128205, \"F1\": 0.8554455445544554, \"Memory in Mb\": 3.285130500793457, \"Time in s\": 28.42139000000001 }, { \"step\": 650, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8859784283513097, \"F1\": 0.8609022556390977, \"Memory in Mb\": 3.3397645950317383, \"Time in s\": 30.794885000000008 }, { \"step\": 675, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8887240356083086, \"F1\": 0.8672566371681416, \"Memory in Mb\": 3.5764551162719727, \"Time in s\": 33.284549000000005 }, { \"step\": 700, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8927038626609443, \"F1\": 0.8704663212435233, \"Memory in Mb\": 3.464848518371582, \"Time in s\": 35.882938 }, { \"step\": 725, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Phishing\", \"Accuracy\": 0.893646408839779, \"F1\": 0.8735632183908045, \"Memory in Mb\": 3.70070743560791, \"Time in s\": 38.601152000000006 }, { \"step\": 750, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8958611481975968, \"F1\": 0.8765822784810127, \"Memory in Mb\": 3.883671760559082, \"Time in s\": 41.43692500000001 }, { \"step\": 775, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Phishing\", \"Accuracy\": 0.896640826873385, \"F1\": 0.8769230769230768, \"Memory in Mb\": 4.02083683013916, \"Time in s\": 44.39908700000001 }, { \"step\": 800, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8948685857321652, \"F1\": 0.8761061946902655, \"Memory in Mb\": 4.127713203430176, \"Time in s\": 47.483821000000006 }, { \"step\": 825, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8944174757281553, \"F1\": 0.8765957446808511, \"Memory in Mb\": 4.256714820861816, \"Time in s\": 50.69677800000001 }, { \"step\": 850, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8963486454652533, \"F1\": 0.8784530386740332, \"Memory in Mb\": 4.299836158752441, \"Time in s\": 54.03081400000001 }, { \"step\": 875, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8993135011441648, \"F1\": 0.8814016172506738, \"Memory in Mb\": 4.410748481750488, \"Time in s\": 57.488212 }, { \"step\": 900, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8987764182424917, \"F1\": 0.8804204993429698, \"Memory in Mb\": 4.566498756408691, \"Time in s\": 61.07896100000001 }, { \"step\": 925, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Phishing\", \"Accuracy\": 0.9015151515151516, \"F1\": 0.8846641318124209, \"Memory in Mb\": 4.655289649963379, \"Time in s\": 64.810524 }, { \"step\": 950, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Phishing\", \"Accuracy\": 0.9030558482613276, \"F1\": 0.8878048780487805, \"Memory in Mb\": 4.339470863342285, \"Time in s\": 68.683627 }, { \"step\": 975, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Phishing\", \"Accuracy\": 0.9034907597535934, \"F1\": 0.8880952380952382, \"Memory in Mb\": 4.029709815979004, \"Time in s\": 72.689378 }, { \"step\": 1000, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Phishing\", \"Accuracy\": 0.9029029029029028, \"F1\": 0.8876013904982619, \"Memory in Mb\": 3.6762208938598633, \"Time in s\": 76.821914 }, { \"step\": 1025, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Phishing\", \"Accuracy\": 0.9013671875, \"F1\": 0.8861330326944759, \"Memory in Mb\": 3.842530250549317, \"Time in s\": 81.06784200000001 }, { \"step\": 1050, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Phishing\", \"Accuracy\": 0.9027645376549094, \"F1\": 0.8881578947368421, \"Memory in Mb\": 3.957364082336426, \"Time in s\": 85.43608100000002 }, { \"step\": 1075, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Phishing\", \"Accuracy\": 0.9031657355679702, \"F1\": 0.8893617021276596, \"Memory in Mb\": 4.039715766906738, \"Time in s\": 89.92310000000002 }, { \"step\": 1100, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Phishing\", \"Accuracy\": 0.9044585987261148, \"F1\": 0.8911917098445594, \"Memory in Mb\": 4.059922218322754, \"Time in s\": 94.514674 }, { \"step\": 1125, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Phishing\", \"Accuracy\": 0.9065836298932384, \"F1\": 0.8946840521564694, \"Memory in Mb\": 4.122437477111816, \"Time in s\": 99.180878 }, { \"step\": 1150, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Phishing\", \"Accuracy\": 0.9077458659704092, \"F1\": 0.8958742632612966, \"Memory in Mb\": 4.38341236114502, \"Time in s\": 103.92086 }, { \"step\": 1175, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Phishing\", \"Accuracy\": 0.9063032367972744, \"F1\": 0.8940269749518305, \"Memory in Mb\": 3.936264991760254, \"Time in s\": 108.734067 }, { \"step\": 1200, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Phishing\", \"Accuracy\": 0.9074228523769808, \"F1\": 0.8949858088930936, \"Memory in Mb\": 3.502232551574707, \"Time in s\": 113.618405 }, { \"step\": 1225, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Phishing\", \"Accuracy\": 0.9084967320261438, \"F1\": 0.8961038961038962, \"Memory in Mb\": 3.7125635147094727, \"Time in s\": 118.566747 }, { \"step\": 1250, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Phishing\", \"Accuracy\": 0.9087269815852682, \"F1\": 0.8969258589511755, \"Memory in Mb\": 3.826443672180176, \"Time in s\": 123.577759 }, { \"step\": 1903, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.1726446151733398, \"Time in s\": 2.131651 }, { \"step\": 3806, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.1738653182983398, \"Time in s\": 5.506803 }, { \"step\": 5709, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.1750860214233398, \"Time in s\": 10.14235 }, { \"step\": 7612, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.1750860214233398, \"Time in s\": 15.888209 }, { \"step\": 9515, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.1750860214233398, \"Time in s\": 22.677279 }, { \"step\": 11418, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.1763067245483398, \"Time in s\": 30.407676 }, { \"step\": 13321, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.1763067245483398, \"Time in s\": 39.088803 }, { \"step\": 15224, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998686198515404, \"F1\": 0.9, \"Memory in Mb\": 0.3923320770263672, \"Time in s\": 48.816854 }, { \"step\": 17127, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998832184981898, \"F1\": 0.9166666666666666, \"Memory in Mb\": 0.4169597625732422, \"Time in s\": 59.770396 }, { \"step\": 19030, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998948972620736, \"F1\": 0.9166666666666666, \"Memory in Mb\": 0.4169597625732422, \"Time in s\": 71.94360999999999 }, { \"step\": 20933, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999904452512899, \"F1\": 0.9166666666666666, \"Memory in Mb\": 0.4181804656982422, \"Time in s\": 85.33762899999999 }, { \"step\": 22836, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9999124151521788, \"F1\": 0.9166666666666666, \"Memory in Mb\": 0.4181804656982422, \"Time in s\": 99.94903 }, { \"step\": 24739, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9999191527205108, \"F1\": 0.9166666666666666, \"Memory in Mb\": 0.4181804656982422, \"Time in s\": 115.777799 }, { \"step\": 26642, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9999249277429526, \"F1\": 0.923076923076923, \"Memory in Mb\": 0.4445209503173828, \"Time in s\": 132.832942 }, { \"step\": 28545, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999929932735426, \"F1\": 0.923076923076923, \"Memory in Mb\": 0.4446010589599609, \"Time in s\": 151.111146 }, { \"step\": 30448, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9999343120832924, \"F1\": 0.923076923076923, \"Memory in Mb\": 0.4446010589599609, \"Time in s\": 170.611749 }, { \"step\": 32351, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9999381761978362, \"F1\": 0.923076923076923, \"Memory in Mb\": 0.4446010589599609, \"Time in s\": 191.335277 }, { \"step\": 34254, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9999416109537852, \"F1\": 0.923076923076923, \"Memory in Mb\": 0.4561443328857422, \"Time in s\": 213.283336 }, { \"step\": 36157, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9999446841464764, \"F1\": 0.923076923076923, \"Memory in Mb\": 0.4642963409423828, \"Time in s\": 236.451824 }, { \"step\": 38060, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9999474500118236, \"F1\": 0.923076923076923, \"Memory in Mb\": 0.4643535614013672, \"Time in s\": 260.85276600000003 }, { \"step\": 39963, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999949952454832, \"F1\": 0.923076923076923, \"Memory in Mb\": 0.4643535614013672, \"Time in s\": 286.481847 }, { \"step\": 41866, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9999522273975876, \"F1\": 0.923076923076923, \"Memory in Mb\": 0.4655742645263672, \"Time in s\": 313.33932500000003 }, { \"step\": 43769, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999954304514714, \"F1\": 0.923076923076923, \"Memory in Mb\": 0.4655742645263672, \"Time in s\": 341.422365 }, { \"step\": 45672, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9999562085349566, \"F1\": 0.923076923076923, \"Memory in Mb\": 0.4655742645263672, \"Time in s\": 370.73064 }, { \"step\": 47575, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999957960230378, \"F1\": 0.923076923076923, \"Memory in Mb\": 0.4830074310302734, \"Time in s\": 401.265116 }, { \"step\": 49478, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9999595771772742, \"F1\": 0.923076923076923, \"Memory in Mb\": 0.491189956665039, \"Time in s\": 433.028922 }, { \"step\": 51381, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9999221486959906, \"F1\": 0.8666666666666666, \"Memory in Mb\": 0.5249767303466797, \"Time in s\": 466.02568 }, { \"step\": 53284, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9999249291518872, \"F1\": 0.8666666666666666, \"Memory in Mb\": 0.5249767303466797, \"Time in s\": 500.253815 }, { \"step\": 55187, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9999275178487298, \"F1\": 0.8666666666666666, \"Memory in Mb\": 0.5554332733154297, \"Time in s\": 535.714615 }, { \"step\": 57090, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997547688696596, \"F1\": 0.6818181818181819, \"Memory in Mb\": 0.8414325714111328, \"Time in s\": 572.520543 }, { \"step\": 58993, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999762679685381, \"F1\": 0.6818181818181819, \"Memory in Mb\": 0.8605022430419922, \"Time in s\": 610.6533019999999 }, { \"step\": 60896, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997700960670006, \"F1\": 0.6818181818181819, \"Memory in Mb\": 0.8891468048095703, \"Time in s\": 650.109621 }, { \"step\": 62799, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997452148157584, \"F1\": 0.6521739130434783, \"Memory in Mb\": 1.0364971160888672, \"Time in s\": 690.9092899999999 }, { \"step\": 64702, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999752708613468, \"F1\": 0.6521739130434783, \"Memory in Mb\": 1.061361312866211, \"Time in s\": 733.0436639999999 }, { \"step\": 66605, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997597741877364, \"F1\": 0.6521739130434783, \"Memory in Mb\": 1.0615253448486328, \"Time in s\": 776.5083099999999 }, { \"step\": 68508, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997664472243712, \"F1\": 0.68, \"Memory in Mb\": 1.1361942291259766, \"Time in s\": 821.3091039999999 }, { \"step\": 70411, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997727595512002, \"F1\": 0.68, \"Memory in Mb\": 1.136308670043945, \"Time in s\": 867.450083 }, { \"step\": 72314, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997787396457068, \"F1\": 0.68, \"Memory in Mb\": 1.1362667083740234, \"Time in s\": 914.92252 }, { \"step\": 74217, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997844130645684, \"F1\": 0.68, \"Memory in Mb\": 1.1445026397705078, \"Time in s\": 963.73181 }, { \"step\": 76120, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"SMTP\", \"Accuracy\": 0.99978980280876, \"F1\": 0.68, \"Memory in Mb\": 1.144460678100586, \"Time in s\": 1013.878015 }, { \"step\": 78023, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997949296352312, \"F1\": 0.68, \"Memory in Mb\": 1.1445598602294922, \"Time in s\": 1065.363214 }, { \"step\": 79926, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997998123240538, \"F1\": 0.68, \"Memory in Mb\": 1.154123306274414, \"Time in s\": 1118.183887 }, { \"step\": 81829, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998044679082956, \"F1\": 0.68, \"Memory in Mb\": 1.1541500091552734, \"Time in s\": 1172.339801 }, { \"step\": 83732, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998089118725442, \"F1\": 0.68, \"Memory in Mb\": 1.1554203033447266, \"Time in s\": 1227.8271129999998 }, { \"step\": 85635, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998131583249644, \"F1\": 0.68, \"Memory in Mb\": 1.1554012298583984, \"Time in s\": 1284.648826 }, { \"step\": 87538, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998172201469092, \"F1\": 0.68, \"Memory in Mb\": 1.1554012298583984, \"Time in s\": 1342.8019989999998 }, { \"step\": 89441, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999798747763864, \"F1\": 0.6538461538461539, \"Memory in Mb\": 1.2423763275146484, \"Time in s\": 1402.2973039999997 }, { \"step\": 91344, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998029405646848, \"F1\": 0.6538461538461539, \"Memory in Mb\": 1.2579975128173828, \"Time in s\": 1463.1308069999998 }, { \"step\": 93247, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998069622289428, \"F1\": 0.6538461538461539, \"Memory in Mb\": 1.3168392181396484, \"Time in s\": 1525.3166049999998 }, { \"step\": 95150, \"track\": \"Binary classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998108230249398, \"F1\": 0.6538461538461539, \"Memory in Mb\": 1.3265628814697266, \"Time in s\": 1588.862311 }, { \"step\": 106, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5238095238095238, \"F1\": 0.4186046511627906, \"Memory in Mb\": 0.2278289794921875, \"Time in s\": 0.437833 }, { \"step\": 212, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5308056872037915, \"F1\": 0.4530386740331491, \"Memory in Mb\": 0.5808591842651367, \"Time in s\": 1.338953 }, { \"step\": 318, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6025236593059937, \"F1\": 0.5467625899280575, \"Memory in Mb\": 1.0978193283081057, \"Time in s\": 2.727335 }, { \"step\": 424, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6690307328605201, \"F1\": 0.6236559139784946, \"Memory in Mb\": 1.4776067733764648, \"Time in s\": 4.602884 }, { \"step\": 530, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Bananas\", \"Accuracy\": 0.7069943289224953, \"F1\": 0.6547884187082404, \"Memory in Mb\": 1.862696647644043, \"Time in s\": 7.038838 }, { \"step\": 636, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Bananas\", \"Accuracy\": 0.7401574803149606, \"F1\": 0.6983546617915906, \"Memory in Mb\": 2.7163190841674805, \"Time in s\": 10.152007 }, { \"step\": 742, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Bananas\", \"Accuracy\": 0.7624831309041835, \"F1\": 0.7188498402555912, \"Memory in Mb\": 3.166998863220215, \"Time in s\": 14.047877000000002 }, { \"step\": 848, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Bananas\", \"Accuracy\": 0.7792207792207793, \"F1\": 0.7399165507649514, \"Memory in Mb\": 3.2281599044799805, \"Time in s\": 18.677316 }, { \"step\": 954, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Bananas\", \"Accuracy\": 0.7911857292759706, \"F1\": 0.754017305315204, \"Memory in Mb\": 2.101862907409668, \"Time in s\": 23.96263 }, { \"step\": 1060, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8035882908404155, \"F1\": 0.7657657657657657, \"Memory in Mb\": 1.8145971298217771, \"Time in s\": 29.783676 }, { \"step\": 1166, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8077253218884121, \"F1\": 0.7723577235772358, \"Memory in Mb\": 2.202631950378418, \"Time in s\": 35.938969 }, { \"step\": 1272, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8127458693941778, \"F1\": 0.7800369685767098, \"Memory in Mb\": 2.1398725509643555, \"Time in s\": 42.34268 }, { \"step\": 1378, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8191721132897604, \"F1\": 0.7855297157622738, \"Memory in Mb\": 2.487322807312012, \"Time in s\": 48.992986 }, { \"step\": 1484, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8240053944706676, \"F1\": 0.7916999201915402, \"Memory in Mb\": 2.964076042175293, \"Time in s\": 55.892559000000006 }, { \"step\": 1590, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8244178728760226, \"F1\": 0.7934863064396744, \"Memory in Mb\": 3.359782218933105, \"Time in s\": 63.05187300000001 }, { \"step\": 1696, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8283185840707965, \"F1\": 0.797776233495483, \"Memory in Mb\": 3.637175559997559, \"Time in s\": 70.47404700000001 }, { \"step\": 1802, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8306496390893948, \"F1\": 0.802588996763754, \"Memory in Mb\": 4.031474113464356, \"Time in s\": 78.15954100000002 }, { \"step\": 1908, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8311484006292607, \"F1\": 0.8048484848484848, \"Memory in Mb\": 4.422553062438965, \"Time in s\": 86.11403800000002 }, { \"step\": 2014, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8310978638847492, \"F1\": 0.806378132118451, \"Memory in Mb\": 4.790541648864746, \"Time in s\": 94.35073800000002 }, { \"step\": 2120, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8343558282208589, \"F1\": 0.8119978575254418, \"Memory in Mb\": 4.553057670593262, \"Time in s\": 102.86313600000004 }, { \"step\": 2226, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8350561797752809, \"F1\": 0.8126595201633486, \"Memory in Mb\": 4.935397148132324, \"Time in s\": 111.64595000000004 }, { \"step\": 2332, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8361218361218361, \"F1\": 0.8140214216163584, \"Memory in Mb\": 5.269236564636231, \"Time in s\": 120.70485400000004 }, { \"step\": 2438, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8379154698399671, \"F1\": 0.8156789547363509, \"Memory in Mb\": 5.532515525817871, \"Time in s\": 130.039733 }, { \"step\": 2544, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8391663389697208, \"F1\": 0.8178173719376391, \"Memory in Mb\": 5.79874324798584, \"Time in s\": 139.65537500000002 }, { \"step\": 2650, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Bananas\", \"Accuracy\": 0.840317100792752, \"F1\": 0.81976991904559, \"Memory in Mb\": 5.978323936462402, \"Time in s\": 149.55427200000003 }, { \"step\": 2756, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8442831215970962, \"F1\": 0.8242523555919706, \"Memory in Mb\": 6.180487632751465, \"Time in s\": 159.73621300000002 }, { \"step\": 2862, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Bananas\", \"Accuracy\": 0.846906675987417, \"F1\": 0.826603325415677, \"Memory in Mb\": 6.367312431335449, \"Time in s\": 170.195916 }, { \"step\": 2968, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8490057296932929, \"F1\": 0.828352490421456, \"Memory in Mb\": 6.73636531829834, \"Time in s\": 180.938789 }, { \"step\": 3074, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8503091441588024, \"F1\": 0.8303834808259588, \"Memory in Mb\": 6.987029075622559, \"Time in s\": 191.964907 }, { \"step\": 3180, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8530984586347908, \"F1\": 0.832796276405299, \"Memory in Mb\": 7.20963191986084, \"Time in s\": 203.271668 }, { \"step\": 3286, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8535768645357686, \"F1\": 0.8329281000347343, \"Memory in Mb\": 7.467520713806152, \"Time in s\": 214.858839 }, { \"step\": 3392, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8554998525508699, \"F1\": 0.8360107095046854, \"Memory in Mb\": 7.765227317810059, \"Time in s\": 226.734099 }, { \"step\": 3498, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8573062625107235, \"F1\": 0.8374063212772891, \"Memory in Mb\": 8.026595115661621, \"Time in s\": 238.898839 }, { \"step\": 3604, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8592839300582847, \"F1\": 0.8388941849380362, \"Memory in Mb\": 8.217352867126465, \"Time in s\": 251.354959 }, { \"step\": 3710, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8603397142086816, \"F1\": 0.8405172413793103, \"Memory in Mb\": 8.39356517791748, \"Time in s\": 264.100164 }, { \"step\": 3816, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8629095674967234, \"F1\": 0.843553694286569, \"Memory in Mb\": 8.53998851776123, \"Time in s\": 277.140513 }, { \"step\": 3922, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8630451415455241, \"F1\": 0.8433945756780402, \"Memory in Mb\": 8.81647777557373, \"Time in s\": 290.481842 }, { \"step\": 4028, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8629252545319096, \"F1\": 0.8431818181818181, \"Memory in Mb\": 9.181269645690918, \"Time in s\": 304.122133 }, { \"step\": 4134, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8637793370433099, \"F1\": 0.8442600276625173, \"Memory in Mb\": 9.408194541931152, \"Time in s\": 318.06952099999995 }, { \"step\": 4240, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8655343241330502, \"F1\": 0.8464439655172414, \"Memory in Mb\": 9.550837516784668, \"Time in s\": 332.31391899999994 }, { \"step\": 4346, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8672036823935558, \"F1\": 0.8484370895718414, \"Memory in Mb\": 9.808123588562012, \"Time in s\": 346.87205299999994 }, { \"step\": 4452, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8669961806335655, \"F1\": 0.8480492813141683, \"Memory in Mb\": 10.045561790466309, \"Time in s\": 361.755126 }, { \"step\": 4558, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8670177748518763, \"F1\": 0.848424212106053, \"Memory in Mb\": 10.332926750183104, \"Time in s\": 376.958924 }, { \"step\": 4664, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8674672957323611, \"F1\": 0.8494152046783626, \"Memory in Mb\": 10.668997764587402, \"Time in s\": 392.484642 }, { \"step\": 4770, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Bananas\", \"Accuracy\": 0.866429020759069, \"F1\": 0.8480076354092102, \"Memory in Mb\": 11.001662254333496, \"Time in s\": 408.344032 }, { \"step\": 4876, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8666666666666667, \"F1\": 0.8477751756440282, \"Memory in Mb\": 11.155909538269045, \"Time in s\": 424.538143 }, { \"step\": 4982, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8678980124472997, \"F1\": 0.8495656149977137, \"Memory in Mb\": 11.33142375946045, \"Time in s\": 441.055719 }, { \"step\": 5088, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Bananas\", \"Accuracy\": 0.868291724002359, \"F1\": 0.8499776085982983, \"Memory in Mb\": 11.580191612243652, \"Time in s\": 457.901482 }, { \"step\": 5194, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Bananas\", \"Accuracy\": 0.868476795686501, \"F1\": 0.8501864443957009, \"Memory in Mb\": 11.828421592712402, \"Time in s\": 475.084789 }, { \"step\": 5300, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8690318928099642, \"F1\": 0.850816852966466, \"Memory in Mb\": 12.135478019714355, \"Time in s\": 492.613208 }, { \"step\": 906, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8839779005524862, \"F1\": 0.8810872027180068, \"Memory in Mb\": 5.370833396911621, \"Time in s\": 5.916109 }, { \"step\": 1812, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9033683048039756, \"F1\": 0.8805460750853241, \"Memory in Mb\": 9.031210899353027, \"Time in s\": 15.079536999999998 }, { \"step\": 2718, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9006256900993743, \"F1\": 0.8771610555050046, \"Memory in Mb\": 13.98334789276123, \"Time in s\": 27.621333 }, { \"step\": 3624, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9014628760695556, \"F1\": 0.8784473953013278, \"Memory in Mb\": 18.162775993347168, \"Time in s\": 43.541312 }, { \"step\": 4530, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9008611172444247, \"F1\": 0.87226173541963, \"Memory in Mb\": 21.49211406707764, \"Time in s\": 63.022082 }, { \"step\": 5436, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8943882244710212, \"F1\": 0.863398381722989, \"Memory in Mb\": 25.749701499938965, \"Time in s\": 86.108323 }, { \"step\": 6342, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8941807285917048, \"F1\": 0.86403242147923, \"Memory in Mb\": 30.03483867645264, \"Time in s\": 112.908333 }, { \"step\": 7248, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8907133986477163, \"F1\": 0.8586723768736617, \"Memory in Mb\": 31.268176078796387, \"Time in s\": 143.442151 }, { \"step\": 8154, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8912056911566295, \"F1\": 0.8662343537927913, \"Memory in Mb\": 34.28826427459717, \"Time in s\": 177.844322 }, { \"step\": 9060, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8912683519152225, \"F1\": 0.8698639186154049, \"Memory in Mb\": 37.821166038513184, \"Time in s\": 216.191973 }, { \"step\": 9966, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8896136477671851, \"F1\": 0.8707706766917294, \"Memory in Mb\": 41.62779140472412, \"Time in s\": 258.820887 }, { \"step\": 10872, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8902584858798639, \"F1\": 0.8737966783031843, \"Memory in Mb\": 45.202799797058105, \"Time in s\": 305.617005 }, { \"step\": 11778, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8899549970281057, \"F1\": 0.873015873015873, \"Memory in Mb\": 44.63830471038818, \"Time in s\": 356.880829 }, { \"step\": 12684, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8842545139162659, \"F1\": 0.8672934369915024, \"Memory in Mb\": 49.475626945495605, \"Time in s\": 412.597623 }, { \"step\": 13590, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Elec2\", \"Accuracy\": 0.884538965339613, \"F1\": 0.8694566935685165, \"Memory in Mb\": 52.02482509613037, \"Time in s\": 472.748527 }, { \"step\": 14496, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8854777509486029, \"F1\": 0.871138022046266, \"Memory in Mb\": 51.378371238708496, \"Time in s\": 537.275724 }, { \"step\": 15402, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8870203233556263, \"F1\": 0.8725461470846764, \"Memory in Mb\": 45.887526512146, \"Time in s\": 605.997132 }, { \"step\": 16308, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8855092904887472, \"F1\": 0.8701398066355985, \"Memory in Mb\": 50.45190334320069, \"Time in s\": 678.971724 }, { \"step\": 17214, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8856097135885668, \"F1\": 0.8680206448153361, \"Memory in Mb\": 42.41952419281006, \"Time in s\": 756.021088 }, { \"step\": 18120, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8854241404050996, \"F1\": 0.8677032882997705, \"Memory in Mb\": 44.88027477264404, \"Time in s\": 837.367748 }, { \"step\": 19026, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8873587385019711, \"F1\": 0.8687128591557923, \"Memory in Mb\": 30.162745475769043, \"Time in s\": 922.595636 }, { \"step\": 19932, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Elec2\", \"Accuracy\": 0.887612262304952, \"F1\": 0.8700243704305443, \"Memory in Mb\": 11.138346672058104, \"Time in s\": 1011.395865 }, { \"step\": 20838, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8870758746460623, \"F1\": 0.8696182190945864, \"Memory in Mb\": 16.41995906829834, \"Time in s\": 1103.9159909999998 }, { \"step\": 21744, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8869981143356482, \"F1\": 0.8677539157112869, \"Memory in Mb\": 20.066325187683105, \"Time in s\": 1200.011486 }, { \"step\": 22650, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8854254050951477, \"F1\": 0.8647944563121971, \"Memory in Mb\": 23.948283195495605, \"Time in s\": 1299.755313 }, { \"step\": 23556, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8836765018042878, \"F1\": 0.8620481321115698, \"Memory in Mb\": 27.3106107711792, \"Time in s\": 1403.457577 }, { \"step\": 24462, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8826294918441601, \"F1\": 0.859903381642512, \"Memory in Mb\": 30.048666954040527, \"Time in s\": 1511.388245 }, { \"step\": 25368, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8818149564394686, \"F1\": 0.8590105342362679, \"Memory in Mb\": 27.528754234313965, \"Time in s\": 1623.579427 }, { \"step\": 26274, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Elec2\", \"Accuracy\": 0.882883568682678, \"F1\": 0.8599899895345134, \"Memory in Mb\": 32.44980525970459, \"Time in s\": 1739.735045 }, { \"step\": 27180, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8840649030501491, \"F1\": 0.8618891080429543, \"Memory in Mb\": 36.050021171569824, \"Time in s\": 1859.93783 }, { \"step\": 28086, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8826063735089905, \"F1\": 0.8595287801968386, \"Memory in Mb\": 42.42660045623779, \"Time in s\": 1984.354383 }, { \"step\": 28992, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8825842502845711, \"F1\": 0.8587200132813148, \"Memory in Mb\": 47.772982597351074, \"Time in s\": 2113.006569 }, { \"step\": 29898, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8824296752182493, \"F1\": 0.8583175460518361, \"Memory in Mb\": 47.37248516082764, \"Time in s\": 2246.236218 }, { \"step\": 30804, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8822517287277213, \"F1\": 0.8574348492590699, \"Memory in Mb\": 51.80053234100342, \"Time in s\": 2383.776877 }, { \"step\": 31710, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8812324576618625, \"F1\": 0.8558965332517028, \"Memory in Mb\": 57.62951564788818, \"Time in s\": 2525.746928 }, { \"step\": 32616, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8799018856354438, \"F1\": 0.8544785823085782, \"Memory in Mb\": 57.82863521575928, \"Time in s\": 2672.122785 }, { \"step\": 33522, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8803436651651204, \"F1\": 0.8553604269589989, \"Memory in Mb\": 49.21776485443115, \"Time in s\": 2822.5668949999995 }, { \"step\": 34428, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8800360182414965, \"F1\": 0.8549248278769145, \"Memory in Mb\": 40.49333477020264, \"Time in s\": 2976.721336 }, { \"step\": 35334, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8794045226841762, \"F1\": 0.8535789148139239, \"Memory in Mb\": 46.44182109832764, \"Time in s\": 3134.389793 }, { \"step\": 36240, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8791081431606832, \"F1\": 0.8524468694217102, \"Memory in Mb\": 49.9929723739624, \"Time in s\": 3295.654522 }, { \"step\": 37146, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8779378112801185, \"F1\": 0.8505800158186132, \"Memory in Mb\": 54.79160213470459, \"Time in s\": 3460.6177849999995 }, { \"step\": 38052, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8777693096107855, \"F1\": 0.8499532212794787, \"Memory in Mb\": 58.49489498138428, \"Time in s\": 3629.3978659999993 }, { \"step\": 38958, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8784814025720666, \"F1\": 0.8511414376454312, \"Memory in Mb\": 60.34530162811279, \"Time in s\": 3802.324627 }, { \"step\": 39864, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8790106113438527, \"F1\": 0.8529528339278637, \"Memory in Mb\": 65.39763927459717, \"Time in s\": 3979.298316 }, { \"step\": 40770, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8795408275895902, \"F1\": 0.8548286972715718, \"Memory in Mb\": 71.3544225692749, \"Time in s\": 4160.658093999999 }, { \"step\": 41676, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8803359328134374, \"F1\": 0.8566995201287319, \"Memory in Mb\": 58.17950344085693, \"Time in s\": 4346.269614 }, { \"step\": 42582, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8808858411028393, \"F1\": 0.8576000898422146, \"Memory in Mb\": 62.87830638885498, \"Time in s\": 4535.872427 }, { \"step\": 43488, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8812518683744567, \"F1\": 0.8580850829943938, \"Memory in Mb\": 52.03429698944092, \"Time in s\": 4729.486494000001 }, { \"step\": 44394, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8814452729033857, \"F1\": 0.8579065309538595, \"Memory in Mb\": 55.38854122161865, \"Time in s\": 4926.889749000001 }, { \"step\": 45300, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8822490562705578, \"F1\": 0.8591125198098257, \"Memory in Mb\": 58.343642234802246, \"Time in s\": 5128.273015000001 }, { \"step\": 25, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Phishing\", \"Accuracy\": 0.75, \"F1\": 0.7692307692307692, \"Memory in Mb\": 0.6668167114257812, \"Time in s\": 0.240714 }, { \"step\": 50, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Phishing\", \"Accuracy\": 0.7959183673469388, \"F1\": 0.7826086956521738, \"Memory in Mb\": 1.0995216369628906, \"Time in s\": 0.6628620000000001 }, { \"step\": 75, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8378378378378378, \"F1\": 0.8378378378378377, \"Memory in Mb\": 1.2478713989257812, \"Time in s\": 1.252732 }, { \"step\": 100, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8686868686868687, \"F1\": 0.8686868686868686, \"Memory in Mb\": 1.3291473388671875, \"Time in s\": 2.007362 }, { \"step\": 125, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8629032258064516, \"F1\": 0.8640000000000001, \"Memory in Mb\": 1.6638565063476562, \"Time in s\": 2.948271 }, { \"step\": 150, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8657718120805369, \"F1\": 0.8701298701298702, \"Memory in Mb\": 1.6782913208007812, \"Time in s\": 4.062992 }, { \"step\": 175, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8850574712643678, \"F1\": 0.8809523809523809, \"Memory in Mb\": 1.7604293823242188, \"Time in s\": 5.350090000000001 }, { \"step\": 200, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8844221105527639, \"F1\": 0.8795811518324608, \"Memory in Mb\": 1.9450531005859373, \"Time in s\": 6.8260000000000005 }, { \"step\": 225, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8883928571428571, \"F1\": 0.8803827751196173, \"Memory in Mb\": 2.0522689819335938, \"Time in s\": 8.475057 }, { \"step\": 250, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8795180722891566, \"F1\": 0.8695652173913043, \"Memory in Mb\": 2.253402709960937, \"Time in s\": 10.305544 }, { \"step\": 275, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8795620437956204, \"F1\": 0.8685258964143425, \"Memory in Mb\": 2.2874794006347656, \"Time in s\": 12.343079 }, { \"step\": 300, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8795986622073578, \"F1\": 0.8666666666666666, \"Memory in Mb\": 2.546089172363281, \"Time in s\": 14.59325 }, { \"step\": 325, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8796296296296297, \"F1\": 0.8641114982578397, \"Memory in Mb\": 2.7360763549804688, \"Time in s\": 17.040637 }, { \"step\": 350, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8739255014326648, \"F1\": 0.8562091503267973, \"Memory in Mb\": 2.827232360839844, \"Time in s\": 19.684707 }, { \"step\": 375, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8743315508021391, \"F1\": 0.8553846153846153, \"Memory in Mb\": 3.0366439819335938, \"Time in s\": 22.544768 }, { \"step\": 400, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8721804511278195, \"F1\": 0.8513119533527697, \"Memory in Mb\": 3.1284713745117188, \"Time in s\": 25.604048 }, { \"step\": 425, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Phishing\", \"Accuracy\": 0.875, \"F1\": 0.8515406162464987, \"Memory in Mb\": 3.107044219970703, \"Time in s\": 28.861271 }, { \"step\": 450, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8752783964365256, \"F1\": 0.851063829787234, \"Memory in Mb\": 3.134662628173828, \"Time in s\": 32.309061 }, { \"step\": 475, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8776371308016878, \"F1\": 0.8557213930348259, \"Memory in Mb\": 3.1429481506347656, \"Time in s\": 35.963049 }, { \"step\": 500, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8817635270541082, \"F1\": 0.8624708624708626, \"Memory in Mb\": 3.273334503173828, \"Time in s\": 39.811295 }, { \"step\": 525, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8854961832061069, \"F1\": 0.8642533936651584, \"Memory in Mb\": 3.4039268493652344, \"Time in s\": 43.854378 }, { \"step\": 550, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8834244080145719, \"F1\": 0.8626609442060086, \"Memory in Mb\": 3.5256080627441406, \"Time in s\": 48.092981 }, { \"step\": 575, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8832752613240418, \"F1\": 0.8618556701030927, \"Memory in Mb\": 3.730621337890625, \"Time in s\": 52.525854 }, { \"step\": 600, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8864774624373957, \"F1\": 0.8634538152610441, \"Memory in Mb\": 3.6573638916015625, \"Time in s\": 57.153422000000006 }, { \"step\": 625, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8862179487179487, \"F1\": 0.8605108055009822, \"Memory in Mb\": 3.691375732421875, \"Time in s\": 61.980237 }, { \"step\": 650, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Phishing\", \"Accuracy\": 0.889060092449923, \"F1\": 0.8656716417910448, \"Memory in Mb\": 3.879222869873047, \"Time in s\": 67.019405 }, { \"step\": 675, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8887240356083086, \"F1\": 0.8681898066783831, \"Memory in Mb\": 4.001224517822266, \"Time in s\": 72.2172 }, { \"step\": 700, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8927038626609443, \"F1\": 0.8713550600343053, \"Memory in Mb\": 4.033683776855469, \"Time in s\": 77.519148 }, { \"step\": 725, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Phishing\", \"Accuracy\": 0.893646408839779, \"F1\": 0.8743882544861339, \"Memory in Mb\": 4.112117767333984, \"Time in s\": 82.931309 }, { \"step\": 750, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8958611481975968, \"F1\": 0.8773584905660378, \"Memory in Mb\": 4.362846374511719, \"Time in s\": 88.455864 }, { \"step\": 775, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8953488372093024, \"F1\": 0.8763358778625954, \"Memory in Mb\": 4.623798370361328, \"Time in s\": 94.093371 }, { \"step\": 800, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8936170212765957, \"F1\": 0.8755490483162518, \"Memory in Mb\": 4.795074462890625, \"Time in s\": 99.843891 }, { \"step\": 825, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8932038834951457, \"F1\": 0.876056338028169, \"Memory in Mb\": 5.260898590087891, \"Time in s\": 105.71761 }, { \"step\": 850, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8939929328621908, \"F1\": 0.8767123287671234, \"Memory in Mb\": 5.305454254150391, \"Time in s\": 111.707119 }, { \"step\": 875, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8958810068649885, \"F1\": 0.8781793842034805, \"Memory in Mb\": 5.302814483642578, \"Time in s\": 117.814416 }, { \"step\": 900, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8976640711902113, \"F1\": 0.8795811518324608, \"Memory in Mb\": 5.489250183105469, \"Time in s\": 124.034882 }, { \"step\": 925, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Phishing\", \"Accuracy\": 0.9004329004329005, \"F1\": 0.8838383838383839, \"Memory in Mb\": 5.587982177734375, \"Time in s\": 130.365318 }, { \"step\": 950, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Phishing\", \"Accuracy\": 0.9020021074815596, \"F1\": 0.8869987849331712, \"Memory in Mb\": 5.680080413818359, \"Time in s\": 136.807919 }, { \"step\": 975, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Phishing\", \"Accuracy\": 0.904517453798768, \"F1\": 0.889679715302491, \"Memory in Mb\": 5.6697998046875, \"Time in s\": 143.36438 }, { \"step\": 1000, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Phishing\", \"Accuracy\": 0.9049049049049048, \"F1\": 0.8901734104046244, \"Memory in Mb\": 5.689472198486328, \"Time in s\": 150.035681 }, { \"step\": 1025, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Phishing\", \"Accuracy\": 0.9052734375, \"F1\": 0.8911335578002244, \"Memory in Mb\": 5.899868011474609, \"Time in s\": 156.818681 }, { \"step\": 1050, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Phishing\", \"Accuracy\": 0.9065776930409916, \"F1\": 0.8930131004366813, \"Memory in Mb\": 6.014961242675781, \"Time in s\": 163.714224 }, { \"step\": 1075, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Phishing\", \"Accuracy\": 0.9068901303538176, \"F1\": 0.8940677966101694, \"Memory in Mb\": 6.185920715332031, \"Time in s\": 170.723809 }, { \"step\": 1100, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Phishing\", \"Accuracy\": 0.908098271155596, \"F1\": 0.8957688338493291, \"Memory in Mb\": 6.174674987792969, \"Time in s\": 177.84336199999998 }, { \"step\": 1125, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Phishing\", \"Accuracy\": 0.909252669039146, \"F1\": 0.8979999999999999, \"Memory in Mb\": 6.282234191894531, \"Time in s\": 185.077801 }, { \"step\": 1150, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Phishing\", \"Accuracy\": 0.9103568320278504, \"F1\": 0.8991185112634672, \"Memory in Mb\": 6.438121795654297, \"Time in s\": 192.421784 }, { \"step\": 1175, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Phishing\", \"Accuracy\": 0.9080068143100511, \"F1\": 0.8963531669865643, \"Memory in Mb\": 6.65753173828125, \"Time in s\": 199.885294 }, { \"step\": 1200, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Phishing\", \"Accuracy\": 0.9090909090909092, \"F1\": 0.8972667295004714, \"Memory in Mb\": 6.8576507568359375, \"Time in s\": 207.46223300000003 }, { \"step\": 1225, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Phishing\", \"Accuracy\": 0.9101307189542484, \"F1\": 0.898336414048059, \"Memory in Mb\": 6.963230133056641, \"Time in s\": 215.149468 }, { \"step\": 1250, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Phishing\", \"Accuracy\": 0.911128903122498, \"F1\": 0.8999098286744815, \"Memory in Mb\": 7.0904388427734375, \"Time in s\": 222.947789 }, { \"step\": 1903, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.169290542602539, \"Time in s\": 5.29779 }, { \"step\": 3806, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.170511245727539, \"Time in s\": 13.064902 }, { \"step\": 5709, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.171731948852539, \"Time in s\": 23.298967 }, { \"step\": 7612, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.171731948852539, \"Time in s\": 35.999469999999995 }, { \"step\": 9515, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.171731948852539, \"Time in s\": 51.163715 }, { \"step\": 11418, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.172952651977539, \"Time in s\": 68.795189 }, { \"step\": 13321, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.172952651977539, \"Time in s\": 88.896271 }, { \"step\": 15224, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998029297773108, \"F1\": 0.8421052631578948, \"Memory in Mb\": 0.457615852355957, \"Time in s\": 111.604078 }, { \"step\": 17127, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998248277472848, \"F1\": 0.8695652173913044, \"Memory in Mb\": 0.4529237747192383, \"Time in s\": 137.211141 }, { \"step\": 19030, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998423458931104, \"F1\": 0.8695652173913044, \"Memory in Mb\": 0.4529237747192383, \"Time in s\": 165.706974 }, { \"step\": 20933, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998566787693484, \"F1\": 0.8695652173913044, \"Memory in Mb\": 0.4541444778442383, \"Time in s\": 197.110825 }, { \"step\": 22836, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999868622728268, \"F1\": 0.8695652173913044, \"Memory in Mb\": 0.4541444778442383, \"Time in s\": 231.41631 }, { \"step\": 24739, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998787290807664, \"F1\": 0.8695652173913044, \"Memory in Mb\": 0.4541444778442383, \"Time in s\": 268.619505 }, { \"step\": 26642, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998498554859052, \"F1\": 0.8333333333333333, \"Memory in Mb\": 0.4768953323364258, \"Time in s\": 308.72625800000003 }, { \"step\": 28545, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999859865470852, \"F1\": 0.8333333333333333, \"Memory in Mb\": 0.4768953323364258, \"Time in s\": 351.754387 }, { \"step\": 30448, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998686241665844, \"F1\": 0.8333333333333333, \"Memory in Mb\": 0.4768953323364258, \"Time in s\": 397.678816 }, { \"step\": 32351, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998763523956724, \"F1\": 0.8333333333333333, \"Memory in Mb\": 0.4847612380981445, \"Time in s\": 446.503909 }, { \"step\": 34254, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998832219075702, \"F1\": 0.8333333333333333, \"Memory in Mb\": 0.484715461730957, \"Time in s\": 498.235456 }, { \"step\": 36157, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998893682929528, \"F1\": 0.8333333333333333, \"Memory in Mb\": 0.484715461730957, \"Time in s\": 552.8655449999999 }, { \"step\": 38060, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998949000236474, \"F1\": 0.8333333333333333, \"Memory in Mb\": 0.4847383499145508, \"Time in s\": 610.4097629999999 }, { \"step\": 39963, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998999049096642, \"F1\": 0.8333333333333333, \"Memory in Mb\": 0.4847383499145508, \"Time in s\": 670.8597949999998 }, { \"step\": 41866, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999904454795175, \"F1\": 0.8333333333333333, \"Memory in Mb\": 0.4859590530395508, \"Time in s\": 734.2226509999998 }, { \"step\": 43769, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999908609029428, \"F1\": 0.8333333333333333, \"Memory in Mb\": 0.4859590530395508, \"Time in s\": 800.4598269999998 }, { \"step\": 45672, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9999124170699132, \"F1\": 0.8333333333333333, \"Memory in Mb\": 0.4859590530395508, \"Time in s\": 869.5777599999998 }, { \"step\": 47575, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998738806911338, \"F1\": 0.7692307692307692, \"Memory in Mb\": 0.5104570388793945, \"Time in s\": 941.5744449999996 }, { \"step\": 49478, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998787315318228, \"F1\": 0.7692307692307692, \"Memory in Mb\": 0.5104570388793945, \"Time in s\": 1016.4635609999998 }, { \"step\": 51381, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999883223043986, \"F1\": 0.7999999999999999, \"Memory in Mb\": 0.593510627746582, \"Time in s\": 1094.2664539999998 }, { \"step\": 53284, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998873937278306, \"F1\": 0.7999999999999999, \"Memory in Mb\": 0.593510627746582, \"Time in s\": 1174.983157 }, { \"step\": 55187, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998912767730946, \"F1\": 0.7999999999999999, \"Memory in Mb\": 0.6139116287231445, \"Time in s\": 1258.5948749999998 }, { \"step\": 57090, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997722853789696, \"F1\": 0.6829268292682927, \"Memory in Mb\": 1.0072031021118164, \"Time in s\": 1345.245196 }, { \"step\": 58993, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997796311364252, \"F1\": 0.6829268292682927, \"Memory in Mb\": 1.0209360122680664, \"Time in s\": 1434.9545749999995 }, { \"step\": 60896, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997865177765004, \"F1\": 0.6829268292682927, \"Memory in Mb\": 1.0209360122680664, \"Time in s\": 1527.6493129999997 }, { \"step\": 62799, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997611388897736, \"F1\": 0.6511627906976744, \"Memory in Mb\": 1.1647844314575195, \"Time in s\": 1623.3843759999995 }, { \"step\": 64702, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997681643251264, \"F1\": 0.6511627906976744, \"Memory in Mb\": 1.185868263244629, \"Time in s\": 1722.1888749999998 }, { \"step\": 66605, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999774788301003, \"F1\": 0.6511627906976744, \"Memory in Mb\": 1.195298194885254, \"Time in s\": 1824.040623 }, { \"step\": 68508, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999781044272848, \"F1\": 0.6808510638297872, \"Memory in Mb\": 1.2139558792114258, \"Time in s\": 1928.948048 }, { \"step\": 70411, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"SMTP\", \"Accuracy\": 0.99978696207925, \"F1\": 0.6808510638297872, \"Memory in Mb\": 1.2139787673950195, \"Time in s\": 2036.917723 }, { \"step\": 72314, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"SMTP\", \"Accuracy\": 0.99979256841785, \"F1\": 0.6808510638297872, \"Memory in Mb\": 1.2139787673950195, \"Time in s\": 2147.927521 }, { \"step\": 74217, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997978872480328, \"F1\": 0.6808510638297872, \"Memory in Mb\": 1.2303056716918943, \"Time in s\": 2261.972689 }, { \"step\": 76120, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998029401332124, \"F1\": 0.6808510638297872, \"Memory in Mb\": 1.2302827835083008, \"Time in s\": 2379.0496510000003 }, { \"step\": 78023, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998077465330292, \"F1\": 0.6808510638297872, \"Memory in Mb\": 1.2303743362426758, \"Time in s\": 2499.17284 }, { \"step\": 79926, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998123240538004, \"F1\": 0.6808510638297872, \"Memory in Mb\": 1.2315950393676758, \"Time in s\": 2622.406245 }, { \"step\": 81829, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999816688664027, \"F1\": 0.6808510638297872, \"Memory in Mb\": 1.2398500442504885, \"Time in s\": 2748.681939 }, { \"step\": 83732, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998208548805102, \"F1\": 0.6808510638297872, \"Memory in Mb\": 1.2410707473754885, \"Time in s\": 2878.007463 }, { \"step\": 85635, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999824835929654, \"F1\": 0.6808510638297872, \"Memory in Mb\": 1.2410707473754885, \"Time in s\": 3010.373781 }, { \"step\": 87538, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998286438877276, \"F1\": 0.6808510638297872, \"Memory in Mb\": 1.2410707473754885, \"Time in s\": 3145.77577 }, { \"step\": 89441, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998211091234348, \"F1\": 0.6666666666666667, \"Memory in Mb\": 1.338292121887207, \"Time in s\": 3284.247438 }, { \"step\": 91344, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998248360574976, \"F1\": 0.6666666666666667, \"Memory in Mb\": 1.3284997940063477, \"Time in s\": 3425.773292 }, { \"step\": 93247, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998284108701714, \"F1\": 0.6666666666666667, \"Memory in Mb\": 1.4100160598754885, \"Time in s\": 3570.387533 }, { \"step\": 95150, \"track\": \"Binary classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998318426888354, \"F1\": 0.6666666666666667, \"Memory in Mb\": 1.4101762771606443, \"Time in s\": 3718.075789 }, { \"step\": 106, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Bananas\", \"Accuracy\": 0.7619047619047619, \"F1\": 0.736842105263158, \"Memory in Mb\": 0.041853904724121, \"Time in s\": 0.035102 }, { \"step\": 212, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8199052132701422, \"F1\": 0.7978723404255319, \"Memory in Mb\": 0.0413503646850585, \"Time in s\": 0.119282 }, { \"step\": 318, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8391167192429022, \"F1\": 0.8197879858657242, \"Memory in Mb\": 0.0418806076049804, \"Time in s\": 0.252001 }, { \"step\": 424, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8581560283687943, \"F1\": 0.8412698412698413, \"Memory in Mb\": 0.0418806076049804, \"Time in s\": 0.433555 }, { \"step\": 530, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8525519848771267, \"F1\": 0.8266666666666667, \"Memory in Mb\": 0.0413770675659179, \"Time in s\": 0.663618 }, { \"step\": 636, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8488188976377953, \"F1\": 0.8222222222222222, \"Memory in Mb\": 0.0418806076049804, \"Time in s\": 0.942176 }, { \"step\": 742, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8461538461538461, \"F1\": 0.8155339805825242, \"Memory in Mb\": 0.0418806076049804, \"Time in s\": 1.268723 }, { \"step\": 848, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8488783943329398, \"F1\": 0.8217270194986072, \"Memory in Mb\": 0.0413770675659179, \"Time in s\": 1.64013 }, { \"step\": 954, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8541448058761805, \"F1\": 0.8268991282689911, \"Memory in Mb\": 0.0418806076049804, \"Time in s\": 2.060089 }, { \"step\": 1060, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8583569405099151, \"F1\": 0.8299319727891157, \"Memory in Mb\": 0.0418806076049804, \"Time in s\": 2.5284690000000003 }, { \"step\": 1166, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8549356223175966, \"F1\": 0.8263103802672147, \"Memory in Mb\": 0.0413770675659179, \"Time in s\": 3.0446800000000005 }, { \"step\": 1272, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8575924468922108, \"F1\": 0.8309990662931841, \"Memory in Mb\": 0.0418806076049804, \"Time in s\": 3.608857 }, { \"step\": 1378, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8576615831517792, \"F1\": 0.8298611111111109, \"Memory in Mb\": 0.0418806076049804, \"Time in s\": 4.221075000000001 }, { \"step\": 1484, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8604180714767363, \"F1\": 0.833467417538214, \"Memory in Mb\": 0.0413770675659179, \"Time in s\": 4.881183000000001 }, { \"step\": 1590, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8590308370044053, \"F1\": 0.8318318318318318, \"Memory in Mb\": 0.0418806076049804, \"Time in s\": 5.5893250000000005 }, { \"step\": 1696, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8613569321533924, \"F1\": 0.8341566690190544, \"Memory in Mb\": 0.0413770675659179, \"Time in s\": 6.345487 }, { \"step\": 1802, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8617434758467518, \"F1\": 0.836076366030283, \"Memory in Mb\": 0.0413770675659179, \"Time in s\": 7.149425000000001 }, { \"step\": 1908, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8573675930781332, \"F1\": 0.8329238329238329, \"Memory in Mb\": 0.0418806076049804, \"Time in s\": 8.001348 }, { \"step\": 2014, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8544461003477397, \"F1\": 0.8317059161401493, \"Memory in Mb\": 0.0413770675659179, \"Time in s\": 8.901356 }, { \"step\": 2120, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8551203397829165, \"F1\": 0.8343227199136536, \"Memory in Mb\": 0.0413770675659179, \"Time in s\": 9.849324 }, { \"step\": 2226, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8512359550561798, \"F1\": 0.8301693175987686, \"Memory in Mb\": 0.0418806076049804, \"Time in s\": 10.845354 }, { \"step\": 2332, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8511368511368511, \"F1\": 0.8304836345872008, \"Memory in Mb\": 0.0413770675659179, \"Time in s\": 11.889459 }, { \"step\": 2438, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8518670496512105, \"F1\": 0.8312295465170642, \"Memory in Mb\": 0.0413770675659179, \"Time in s\": 12.981528999999998 }, { \"step\": 2544, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8509634290208415, \"F1\": 0.8307280035730238, \"Memory in Mb\": 0.0418806076049804, \"Time in s\": 14.121683999999998 }, { \"step\": 2650, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8501321253303133, \"F1\": 0.8307036247334755, \"Memory in Mb\": 0.0413770675659179, \"Time in s\": 15.309940999999998 }, { \"step\": 2756, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8500907441016334, \"F1\": 0.8310838445807771, \"Memory in Mb\": 0.0413770675659179, \"Time in s\": 16.545870999999998 }, { \"step\": 2862, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8504019573575673, \"F1\": 0.8310970797158642, \"Memory in Mb\": 0.0418806076049804, \"Time in s\": 17.828975 }, { \"step\": 2968, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8513650151668352, \"F1\": 0.8317436093094239, \"Memory in Mb\": 0.0413770675659179, \"Time in s\": 19.159858 }, { \"step\": 3074, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8490074845427921, \"F1\": 0.8294117647058825, \"Memory in Mb\": 0.0418806076049804, \"Time in s\": 20.53851 }, { \"step\": 3180, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8499528153507392, \"F1\": 0.8298251872993221, \"Memory in Mb\": 0.0418806076049804, \"Time in s\": 21.964265 }, { \"step\": 3286, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8502283105022831, \"F1\": 0.8295218295218295, \"Memory in Mb\": 0.0413770675659179, \"Time in s\": 23.437226 }, { \"step\": 3392, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8493069890887643, \"F1\": 0.8294961628294961, \"Memory in Mb\": 0.0418806076049804, \"Time in s\": 24.957963 }, { \"step\": 3498, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8507291964541035, \"F1\": 0.830299089726918, \"Memory in Mb\": 0.0418806076049804, \"Time in s\": 26.526512 }, { \"step\": 3604, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Bananas\", \"Accuracy\": 0.852345267832362, \"F1\": 0.8313253012048194, \"Memory in Mb\": 0.0413770675659179, \"Time in s\": 28.143271 }, { \"step\": 3710, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8522512806686439, \"F1\": 0.8315918869084205, \"Memory in Mb\": 0.0418806076049804, \"Time in s\": 29.808071 }, { \"step\": 3816, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8521625163826999, \"F1\": 0.8315412186379928, \"Memory in Mb\": 0.0418806076049804, \"Time in s\": 31.520677 }, { \"step\": 3922, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8520785513899516, \"F1\": 0.8309037900874635, \"Memory in Mb\": 0.0413770675659179, \"Time in s\": 33.281115 }, { \"step\": 4028, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8505090638192203, \"F1\": 0.8290743895513913, \"Memory in Mb\": 0.0418806076049804, \"Time in s\": 35.088929 }, { \"step\": 4134, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8499879022501815, \"F1\": 0.8286346047540077, \"Memory in Mb\": 0.0418806076049804, \"Time in s\": 36.944224 }, { \"step\": 4240, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Bananas\", \"Accuracy\": 0.851380042462845, \"F1\": 0.8306451612903226, \"Memory in Mb\": 0.0413770675659179, \"Time in s\": 38.847373 }, { \"step\": 4346, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8515535097813579, \"F1\": 0.8308418568056648, \"Memory in Mb\": 0.0418806076049804, \"Time in s\": 40.797646 }, { \"step\": 4452, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8508200404403505, \"F1\": 0.8296562339661364, \"Memory in Mb\": 0.0418806076049804, \"Time in s\": 42.797836 }, { \"step\": 4558, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8507790212859337, \"F1\": 0.8302546180728907, \"Memory in Mb\": 0.0413770675659179, \"Time in s\": 44.846666000000006 }, { \"step\": 4664, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8496675959682608, \"F1\": 0.8294818778885916, \"Memory in Mb\": 0.0418806076049804, \"Time in s\": 46.942725 }, { \"step\": 4770, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Bananas\", \"Accuracy\": 0.848605577689243, \"F1\": 0.8280133396855646, \"Memory in Mb\": 0.0418806076049804, \"Time in s\": 49.086211000000006 }, { \"step\": 4876, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Bananas\", \"Accuracy\": 0.848, \"F1\": 0.8265855370933771, \"Memory in Mb\": 0.0413770675659179, \"Time in s\": 51.27634200000001 }, { \"step\": 4982, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8490262999397711, \"F1\": 0.8281535648994516, \"Memory in Mb\": 0.0418806076049804, \"Time in s\": 53.513903000000006 }, { \"step\": 5088, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Bananas\", \"Accuracy\": 0.84863377236092, \"F1\": 0.8275089605734768, \"Memory in Mb\": 0.0413770675659179, \"Time in s\": 55.79899700000001 }, { \"step\": 5194, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8488349701521278, \"F1\": 0.8278131169116035, \"Memory in Mb\": 0.0413770675659179, \"Time in s\": 58.13196400000001 }, { \"step\": 5300, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8484619739573505, \"F1\": 0.8274231678486997, \"Memory in Mb\": 0.0418806076049804, \"Time in s\": 60.51276400000001 }, { \"step\": 906, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9049723756906076, \"F1\": 0.903153153153153, \"Memory in Mb\": 0.0616054534912109, \"Time in s\": 0.342387 }, { \"step\": 1812, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9260077305356156, \"F1\": 0.9075862068965518, \"Memory in Mb\": 0.0616054534912109, \"Time in s\": 1.0313949999999998 }, { \"step\": 2718, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9120353330879648, \"F1\": 0.8907178783721992, \"Memory in Mb\": 0.0616054534912109, \"Time in s\": 2.062314 }, { \"step\": 3624, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9149875793541264, \"F1\": 0.8948087431693988, \"Memory in Mb\": 0.0621089935302734, \"Time in s\": 3.434682 }, { \"step\": 4530, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9116802826230956, \"F1\": 0.8866855524079319, \"Memory in Mb\": 0.0621089935302734, \"Time in s\": 5.146577 }, { \"step\": 5436, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9098436062557498, \"F1\": 0.8844884488448844, \"Memory in Mb\": 0.0621089935302734, \"Time in s\": 7.219511 }, { \"step\": 6342, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9090048888187984, \"F1\": 0.8844382134988984, \"Memory in Mb\": 0.0616054534912109, \"Time in s\": 9.664227 }, { \"step\": 7248, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9064440458120602, \"F1\": 0.881509961551905, \"Memory in Mb\": 0.0616054534912109, \"Time in s\": 12.478866 }, { \"step\": 8154, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9053109284925794, \"F1\": 0.8852556480380499, \"Memory in Mb\": 0.0616054534912109, \"Time in s\": 15.66161 }, { \"step\": 9060, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9076056959929352, \"F1\": 0.8903732809430256, \"Memory in Mb\": 0.0621089935302734, \"Time in s\": 19.203614 }, { \"step\": 9966, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9092824887104868, \"F1\": 0.8943431510051426, \"Memory in Mb\": 0.0621089935302734, \"Time in s\": 23.103248 }, { \"step\": 10872, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9103118388372736, \"F1\": 0.8971193415637859, \"Memory in Mb\": 0.0621089935302734, \"Time in s\": 27.356535 }, { \"step\": 11778, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Elec2\", \"Accuracy\": 0.909484588604908, \"F1\": 0.896323672437269, \"Memory in Mb\": 0.0621089935302734, \"Time in s\": 31.965636 }, { \"step\": 12684, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9086178348971063, \"F1\": 0.8953120765965135, \"Memory in Mb\": 0.0616054534912109, \"Time in s\": 36.933351 }, { \"step\": 13590, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Elec2\", \"Accuracy\": 0.908970490838178, \"F1\": 0.8970796239287795, \"Memory in Mb\": 0.0616054534912109, \"Time in s\": 42.24853099999999 }, { \"step\": 14496, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9087271472921696, \"F1\": 0.8973861785464982, \"Memory in Mb\": 0.0616054534912109, \"Time in s\": 47.913716 }, { \"step\": 15402, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9094863969872086, \"F1\": 0.8977556109725686, \"Memory in Mb\": 0.0621089935302734, \"Time in s\": 53.917574 }, { \"step\": 16308, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9067271723799596, \"F1\": 0.8941323867195656, \"Memory in Mb\": 0.0621089935302734, \"Time in s\": 60.251802 }, { \"step\": 17214, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9044907918433742, \"F1\": 0.8901656867985035, \"Memory in Mb\": 0.0621089935302734, \"Time in s\": 66.911875 }, { \"step\": 18120, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9041889729013742, \"F1\": 0.8895674300254454, \"Memory in Mb\": 0.0684490203857421, \"Time in s\": 73.89174799999999 }, { \"step\": 19026, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9049145860709592, \"F1\": 0.8893239522789844, \"Memory in Mb\": 0.0684490203857421, \"Time in s\": 81.18615299999999 }, { \"step\": 19932, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9037679995986152, \"F1\": 0.8889403590040533, \"Memory in Mb\": 0.0684490203857421, \"Time in s\": 88.788198 }, { \"step\": 20838, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9006094927292796, \"F1\": 0.8855990719770204, \"Memory in Mb\": 0.0689525604248046, \"Time in s\": 96.688698 }, { \"step\": 21744, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8996918548498367, \"F1\": 0.8828112406641234, \"Memory in Mb\": 0.0689525604248046, \"Time in s\": 104.88540299999998 }, { \"step\": 22650, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8992891518389333, \"F1\": 0.8815987542174929, \"Memory in Mb\": 0.0689525604248046, \"Time in s\": 113.378449 }, { \"step\": 23556, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8977711738484399, \"F1\": 0.8795999999999999, \"Memory in Mb\": 0.0684490203857421, \"Time in s\": 122.16799199999998 }, { \"step\": 24462, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8973059155390213, \"F1\": 0.8783652914971914, \"Memory in Mb\": 0.0684490203857421, \"Time in s\": 131.253708 }, { \"step\": 25368, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8955729885284031, \"F1\": 0.8765782975352934, \"Memory in Mb\": 0.0684490203857421, \"Time in s\": 140.635731 }, { \"step\": 26274, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8962052297034979, \"F1\": 0.8771012663932579, \"Memory in Mb\": 0.0689525604248046, \"Time in s\": 150.314023 }, { \"step\": 27180, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Elec2\", \"Accuracy\": 0.896133043894183, \"F1\": 0.8774792760730873, \"Memory in Mb\": 0.0689525604248046, \"Time in s\": 160.288049 }, { \"step\": 28086, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8954602100765533, \"F1\": 0.8762330326279403, \"Memory in Mb\": 0.0689525604248046, \"Time in s\": 170.561257 }, { \"step\": 28992, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8944500017246731, \"F1\": 0.874518166160912, \"Memory in Mb\": 0.0689525604248046, \"Time in s\": 181.130608 }, { \"step\": 29898, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8934006756530756, \"F1\": 0.8730025901574019, \"Memory in Mb\": 0.0684490203857421, \"Time in s\": 191.99638700000003 }, { \"step\": 30804, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8926403272408532, \"F1\": 0.8713780094123137, \"Memory in Mb\": 0.0684490203857421, \"Time in s\": 203.15792800000003 }, { \"step\": 31710, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8906304203853795, \"F1\": 0.8690233401314299, \"Memory in Mb\": 0.0684490203857421, \"Time in s\": 214.615558 }, { \"step\": 32616, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8895293576575195, \"F1\": 0.8679010082493126, \"Memory in Mb\": 0.0689525604248046, \"Time in s\": 226.368363 }, { \"step\": 33522, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8885773097461293, \"F1\": 0.8667926816220265, \"Memory in Mb\": 0.0689525604248046, \"Time in s\": 238.416278 }, { \"step\": 34428, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8875010892613355, \"F1\": 0.8655721772933948, \"Memory in Mb\": 0.0689525604248046, \"Time in s\": 250.759651 }, { \"step\": 35334, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Elec2\", \"Accuracy\": 0.88653666544024, \"F1\": 0.8635976999761832, \"Memory in Mb\": 0.0684490203857421, \"Time in s\": 263.39898200000005 }, { \"step\": 36240, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8864483015535749, \"F1\": 0.8623423543973505, \"Memory in Mb\": 0.0684490203857421, \"Time in s\": 276.33438000000007 }, { \"step\": 37146, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8854219948849105, \"F1\": 0.8608149650075216, \"Memory in Mb\": 0.0684490203857421, \"Time in s\": 289.5663660000001 }, { \"step\": 38052, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Elec2\", \"Accuracy\": 0.885574623531576, \"F1\": 0.8604308244646749, \"Memory in Mb\": 0.0689525604248046, \"Time in s\": 303.09499200000005 }, { \"step\": 38958, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8853864517288292, \"F1\": 0.8605515475186608, \"Memory in Mb\": 0.0689525604248046, \"Time in s\": 316.91924300000005 }, { \"step\": 39864, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8853071770815042, \"F1\": 0.8614545454545455, \"Memory in Mb\": 0.0689525604248046, \"Time in s\": 331.03949900000003 }, { \"step\": 40770, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8846672717015379, \"F1\": 0.8618034328709147, \"Memory in Mb\": 0.0689525604248046, \"Time in s\": 345.45792700000004 }, { \"step\": 41676, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8847030593881223, \"F1\": 0.862796607749636, \"Memory in Mb\": 0.0684490203857421, \"Time in s\": 360.17251400000004 }, { \"step\": 42582, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8848547474225593, \"F1\": 0.8633767102293309, \"Memory in Mb\": 0.0684490203857421, \"Time in s\": 375.183354 }, { \"step\": 43488, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8845632027962379, \"F1\": 0.86316305947773, \"Memory in Mb\": 0.0684490203857421, \"Time in s\": 390.490282 }, { \"step\": 44394, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8843511364404298, \"F1\": 0.8626023657870793, \"Memory in Mb\": 0.0689525604248046, \"Time in s\": 406.09386 }, { \"step\": 45300, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8844345349786971, \"F1\": 0.8629042817860416, \"Memory in Mb\": 0.0689525604248046, \"Time in s\": 421.994249 }, { \"step\": 25, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Phishing\", \"Accuracy\": 0.6666666666666666, \"F1\": 0.7499999999999999, \"Memory in Mb\": 0.0211858749389648, \"Time in s\": 0.008448 }, { \"step\": 50, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Phishing\", \"Accuracy\": 0.7959183673469388, \"F1\": 0.8076923076923077, \"Memory in Mb\": 0.0379018783569335, \"Time in s\": 0.023055 }, { \"step\": 75, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8513513513513513, \"F1\": 0.8641975308641976, \"Memory in Mb\": 0.0541143417358398, \"Time in s\": 0.045431 }, { \"step\": 100, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8484848484848485, \"F1\": 0.854368932038835, \"Memory in Mb\": 0.0708036422729492, \"Time in s\": 0.0780319999999999 }, { \"step\": 125, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8548387096774194, \"F1\": 0.859375, \"Memory in Mb\": 0.0712194442749023, \"Time in s\": 0.1221779999999999 }, { \"step\": 150, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8590604026845637, \"F1\": 0.8679245283018867, \"Memory in Mb\": 0.0707159042358398, \"Time in s\": 0.1780699999999999 }, { \"step\": 175, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8735632183908046, \"F1\": 0.8735632183908046, \"Memory in Mb\": 0.0712194442749023, \"Time in s\": 0.2457579999999999 }, { \"step\": 200, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8693467336683417, \"F1\": 0.8686868686868686, \"Memory in Mb\": 0.0707159042358398, \"Time in s\": 0.325372 }, { \"step\": 225, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8616071428571429, \"F1\": 0.8571428571428571, \"Memory in Mb\": 0.0712194442749023, \"Time in s\": 0.416902 }, { \"step\": 250, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8473895582329317, \"F1\": 0.8416666666666667, \"Memory in Mb\": 0.0712194442749023, \"Time in s\": 0.520874 }, { \"step\": 275, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8467153284671532, \"F1\": 0.8384615384615385, \"Memory in Mb\": 0.0709295272827148, \"Time in s\": 0.637346 }, { \"step\": 300, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8494983277591973, \"F1\": 0.8375451263537907, \"Memory in Mb\": 0.0714330673217773, \"Time in s\": 0.76611 }, { \"step\": 325, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8518518518518519, \"F1\": 0.8356164383561644, \"Memory in Mb\": 0.0709295272827148, \"Time in s\": 0.907061 }, { \"step\": 350, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8538681948424068, \"F1\": 0.8349514563106796, \"Memory in Mb\": 0.0709295272827148, \"Time in s\": 1.060329 }, { \"step\": 375, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8502673796791443, \"F1\": 0.8271604938271604, \"Memory in Mb\": 0.0714330673217773, \"Time in s\": 1.2260179999999998 }, { \"step\": 400, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Phishing\", \"Accuracy\": 0.849624060150376, \"F1\": 0.8224852071005918, \"Memory in Mb\": 0.0709295272827148, \"Time in s\": 1.4039159999999995 }, { \"step\": 425, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8514150943396226, \"F1\": 0.8194842406876792, \"Memory in Mb\": 0.0714330673217773, \"Time in s\": 1.5942239999999996 }, { \"step\": 450, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8552338530066815, \"F1\": 0.8219178082191781, \"Memory in Mb\": 0.0714330673217773, \"Time in s\": 1.7968749999999996 }, { \"step\": 475, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8481012658227848, \"F1\": 0.8134715025906737, \"Memory in Mb\": 0.0709295272827148, \"Time in s\": 2.011871 }, { \"step\": 500, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8476953907815631, \"F1\": 0.8164251207729469, \"Memory in Mb\": 0.0714330673217773, \"Time in s\": 2.23918 }, { \"step\": 525, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8492366412213741, \"F1\": 0.8141176470588235, \"Memory in Mb\": 0.0709295272827148, \"Time in s\": 2.479037 }, { \"step\": 550, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8506375227686703, \"F1\": 0.8177777777777777, \"Memory in Mb\": 0.0714330673217773, \"Time in s\": 2.731183 }, { \"step\": 575, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8519163763066202, \"F1\": 0.8187633262260127, \"Memory in Mb\": 0.0714330673217773, \"Time in s\": 2.995642 }, { \"step\": 600, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8514190317195326, \"F1\": 0.8149688149688149, \"Memory in Mb\": 0.0709295272827148, \"Time in s\": 3.272333 }, { \"step\": 625, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8509615384615384, \"F1\": 0.8105906313645621, \"Memory in Mb\": 0.0714330673217773, \"Time in s\": 3.561223 }, { \"step\": 650, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8567026194144838, \"F1\": 0.8208092485549132, \"Memory in Mb\": 0.0709295272827148, \"Time in s\": 3.862355 }, { \"step\": 675, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8590504451038575, \"F1\": 0.8275862068965517, \"Memory in Mb\": 0.0714330673217773, \"Time in s\": 4.17585 }, { \"step\": 700, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8640915593705293, \"F1\": 0.831858407079646, \"Memory in Mb\": 0.0714330673217773, \"Time in s\": 4.501572 }, { \"step\": 725, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8646408839779005, \"F1\": 0.8355704697986577, \"Memory in Mb\": 0.0709295272827148, \"Time in s\": 4.839746999999999 }, { \"step\": 750, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8624833110814419, \"F1\": 0.8341384863123994, \"Memory in Mb\": 0.0714330673217773, \"Time in s\": 5.190338999999999 }, { \"step\": 775, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8591731266149871, \"F1\": 0.8294209702660407, \"Memory in Mb\": 0.0709295272827148, \"Time in s\": 5.552793999999999 }, { \"step\": 800, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8573216520650814, \"F1\": 0.8288288288288288, \"Memory in Mb\": 0.0709295272827148, \"Time in s\": 5.927242999999999 }, { \"step\": 825, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8567961165048543, \"F1\": 0.8299711815561961, \"Memory in Mb\": 0.0714330673217773, \"Time in s\": 6.313648999999999 }, { \"step\": 850, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8598351001177856, \"F1\": 0.8330995792426368, \"Memory in Mb\": 0.0709295272827148, \"Time in s\": 6.712033999999999 }, { \"step\": 875, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8592677345537757, \"F1\": 0.8312757201646092, \"Memory in Mb\": 0.0714330673217773, \"Time in s\": 7.122354999999999 }, { \"step\": 900, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8587319243604005, \"F1\": 0.8304405874499332, \"Memory in Mb\": 0.0709295272827148, \"Time in s\": 7.544645999999999 }, { \"step\": 925, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8593073593073594, \"F1\": 0.8324742268041236, \"Memory in Mb\": 0.0709295272827148, \"Time in s\": 7.978914999999999 }, { \"step\": 950, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8577449947312961, \"F1\": 0.8327137546468402, \"Memory in Mb\": 0.0714330673217773, \"Time in s\": 8.425289 }, { \"step\": 975, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8613963039014374, \"F1\": 0.8367593712212819, \"Memory in Mb\": 0.0709295272827148, \"Time in s\": 8.883884 }, { \"step\": 1000, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8628628628628628, \"F1\": 0.8386336866902238, \"Memory in Mb\": 0.0714330673217773, \"Time in s\": 9.354461 }, { \"step\": 1025, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8623046875, \"F1\": 0.8384879725085911, \"Memory in Mb\": 0.0714330673217773, \"Time in s\": 9.837197 }, { \"step\": 1050, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Phishing\", \"Accuracy\": 0.86558627264061, \"F1\": 0.843159065628476, \"Memory in Mb\": 0.0709295272827148, \"Time in s\": 10.331855 }, { \"step\": 1075, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8640595903165735, \"F1\": 0.8426724137931035, \"Memory in Mb\": 0.0714330673217773, \"Time in s\": 10.838539999999998 }, { \"step\": 1100, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8653321201091901, \"F1\": 0.8445378151260504, \"Memory in Mb\": 0.0709295272827148, \"Time in s\": 11.357143999999998 }, { \"step\": 1125, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8674377224199288, \"F1\": 0.8484231943031536, \"Memory in Mb\": 0.0714330673217773, \"Time in s\": 11.887570999999998 }, { \"step\": 1150, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8685813751087903, \"F1\": 0.8494516450648055, \"Memory in Mb\": 0.0714330673217773, \"Time in s\": 12.429893999999996 }, { \"step\": 1175, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8679727427597955, \"F1\": 0.8484848484848486, \"Memory in Mb\": 0.0709295272827148, \"Time in s\": 12.984101999999996 }, { \"step\": 1200, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8673894912427023, \"F1\": 0.8478468899521532, \"Memory in Mb\": 0.0714330673217773, \"Time in s\": 13.550580999999996 }, { \"step\": 1225, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8676470588235294, \"F1\": 0.848030018761726, \"Memory in Mb\": 0.0709295272827148, \"Time in s\": 14.128897999999996 }, { \"step\": 1250, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8670936749399519, \"F1\": 0.847985347985348, \"Memory in Mb\": 0.0714330673217773, \"Time in s\": 14.719141999999996 }, { \"step\": 1903, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.0443115234375, \"Time in s\": 0.843623 }, { \"step\": 3806, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.0438079833984375, \"Time in s\": 2.551575 }, { \"step\": 5709, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.0438079833984375, \"Time in s\": 5.191815 }, { \"step\": 7612, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.0443115234375, \"Time in s\": 8.760857 }, { \"step\": 9515, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.0443115234375, \"Time in s\": 12.963128 }, { \"step\": 11418, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.0438079833984375, \"Time in s\": 17.55601 }, { \"step\": 13321, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.0438079833984375, \"Time in s\": 22.538869 }, { \"step\": 15224, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9999343099257704, \"F1\": 0.9523809523809524, \"Memory in Mb\": 0.0443382263183593, \"Time in s\": 27.911978 }, { \"step\": 17127, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9999416092490948, \"F1\": 0.96, \"Memory in Mb\": 0.0443382263183593, \"Time in s\": 33.678549000000004 }, { \"step\": 19030, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9999474486310368, \"F1\": 0.96, \"Memory in Mb\": 0.0438346862792968, \"Time in s\": 39.834148000000006 }, { \"step\": 20933, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9999522262564494, \"F1\": 0.96, \"Memory in Mb\": 0.0438346862792968, \"Time in s\": 46.380227000000005 }, { \"step\": 22836, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9999562075760894, \"F1\": 0.96, \"Memory in Mb\": 0.0443382263183593, \"Time in s\": 53.31643100000001 }, { \"step\": 24739, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9999595763602556, \"F1\": 0.96, \"Memory in Mb\": 0.0443382263183593, \"Time in s\": 60.64312500000001 }, { \"step\": 26642, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9999249277429526, \"F1\": 0.923076923076923, \"Memory in Mb\": 0.0438346862792968, \"Time in s\": 68.360031 }, { \"step\": 28545, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999929932735426, \"F1\": 0.923076923076923, \"Memory in Mb\": 0.0443382263183593, \"Time in s\": 76.46777800000001 }, { \"step\": 30448, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9999343120832924, \"F1\": 0.923076923076923, \"Memory in Mb\": 0.0443382263183593, \"Time in s\": 84.96788600000001 }, { \"step\": 32351, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9999381761978362, \"F1\": 0.923076923076923, \"Memory in Mb\": 0.0438346862792968, \"Time in s\": 93.862161 }, { \"step\": 34254, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9999416109537852, \"F1\": 0.923076923076923, \"Memory in Mb\": 0.0438346862792968, \"Time in s\": 103.14740500000002 }, { \"step\": 36157, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9999446841464764, \"F1\": 0.923076923076923, \"Memory in Mb\": 0.0443382263183593, \"Time in s\": 112.82379200000004 }, { \"step\": 38060, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9999474500118236, \"F1\": 0.923076923076923, \"Memory in Mb\": 0.0443382263183593, \"Time in s\": 122.89276800000002 }, { \"step\": 39963, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999949952454832, \"F1\": 0.923076923076923, \"Memory in Mb\": 0.0438346862792968, \"Time in s\": 133.353338 }, { \"step\": 41866, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9999522273975876, \"F1\": 0.923076923076923, \"Memory in Mb\": 0.0438346862792968, \"Time in s\": 144.204522 }, { \"step\": 43769, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999954304514714, \"F1\": 0.923076923076923, \"Memory in Mb\": 0.0443382263183593, \"Time in s\": 155.44662499999998 }, { \"step\": 45672, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9999562085349566, \"F1\": 0.923076923076923, \"Memory in Mb\": 0.0443382263183593, \"Time in s\": 167.07809799999998 }, { \"step\": 47575, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999957960230378, \"F1\": 0.923076923076923, \"Memory in Mb\": 0.0438346862792968, \"Time in s\": 179.099709 }, { \"step\": 49478, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9999595771772742, \"F1\": 0.923076923076923, \"Memory in Mb\": 0.0438346862792968, \"Time in s\": 191.51086 }, { \"step\": 51381, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999941611521993, \"F1\": 0.896551724137931, \"Memory in Mb\": 0.0443382263183593, \"Time in s\": 204.313165 }, { \"step\": 53284, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9999436968639152, \"F1\": 0.896551724137931, \"Memory in Mb\": 0.0443382263183593, \"Time in s\": 217.506745 }, { \"step\": 55187, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9999456383865472, \"F1\": 0.896551724137931, \"Memory in Mb\": 0.0438346862792968, \"Time in s\": 231.092317 }, { \"step\": 57090, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998423514162098, \"F1\": 0.8085106382978724, \"Memory in Mb\": 0.0443382263183593, \"Time in s\": 245.069369 }, { \"step\": 58993, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999847436940602, \"F1\": 0.8085106382978724, \"Memory in Mb\": 0.0443382263183593, \"Time in s\": 259.43782 }, { \"step\": 60896, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998522046145004, \"F1\": 0.8085106382978724, \"Memory in Mb\": 0.0438346862792968, \"Time in s\": 274.196817 }, { \"step\": 62799, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999824835185834, \"F1\": 0.7755102040816326, \"Memory in Mb\": 0.0438346862792968, \"Time in s\": 289.350286 }, { \"step\": 64702, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998299871717592, \"F1\": 0.7755102040816326, \"Memory in Mb\": 0.0443382263183593, \"Time in s\": 304.894594 }, { \"step\": 66605, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998348447540688, \"F1\": 0.7755102040816326, \"Memory in Mb\": 0.0443382263183593, \"Time in s\": 320.829102 }, { \"step\": 68508, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998248354182784, \"F1\": 0.7692307692307693, \"Memory in Mb\": 0.0438346862792968, \"Time in s\": 337.153755 }, { \"step\": 70411, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998295696634, \"F1\": 0.7692307692307693, \"Memory in Mb\": 0.0438346862792968, \"Time in s\": 353.86875 }, { \"step\": 72314, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"SMTP\", \"Accuracy\": 0.99983405473428, \"F1\": 0.7692307692307693, \"Memory in Mb\": 0.0443382263183593, \"Time in s\": 370.973732 }, { \"step\": 74217, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998383097984264, \"F1\": 0.7692307692307693, \"Memory in Mb\": 0.0443382263183593, \"Time in s\": 388.469718 }, { \"step\": 76120, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"SMTP\", \"Accuracy\": 0.99984235210657, \"F1\": 0.7692307692307693, \"Memory in Mb\": 0.0438346862792968, \"Time in s\": 406.356452 }, { \"step\": 78023, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998461972264232, \"F1\": 0.7692307692307693, \"Memory in Mb\": 0.0438346862792968, \"Time in s\": 424.633943 }, { \"step\": 79926, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998498592430404, \"F1\": 0.7692307692307693, \"Memory in Mb\": 0.0443382263183593, \"Time in s\": 443.301766 }, { \"step\": 81829, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998533509312216, \"F1\": 0.7692307692307693, \"Memory in Mb\": 0.0443382263183593, \"Time in s\": 462.36095 }, { \"step\": 83732, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998566839044082, \"F1\": 0.7692307692307693, \"Memory in Mb\": 0.0438346862792968, \"Time in s\": 481.813199 }, { \"step\": 85635, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998598687437232, \"F1\": 0.7692307692307693, \"Memory in Mb\": 0.0443382263183593, \"Time in s\": 501.656136 }, { \"step\": 87538, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999862915110182, \"F1\": 0.7692307692307693, \"Memory in Mb\": 0.0443382263183593, \"Time in s\": 521.891566 }, { \"step\": 89441, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998434704830054, \"F1\": 0.7407407407407408, \"Memory in Mb\": 0.0443382263183593, \"Time in s\": 542.516877 }, { \"step\": 91344, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998467315503105, \"F1\": 0.7407407407407408, \"Memory in Mb\": 0.0438346862792968, \"Time in s\": 563.531742 }, { \"step\": 93247, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998498595114, \"F1\": 0.7407407407407408, \"Memory in Mb\": 0.0443382263183593, \"Time in s\": 584.936913 }, { \"step\": 95150, \"track\": \"Binary classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999852862352731, \"F1\": 0.7407407407407408, \"Memory in Mb\": 0.0443382263183593, \"Time in s\": 606.731321 }, { \"step\": 106, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.4857142857142857, \"F1\": 0.4599999999999999, \"Memory in Mb\": 0.192514419555664, \"Time in s\": 0.128533 }, { \"step\": 212, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5165876777251185, \"F1\": 0.4574468085106383, \"Memory in Mb\": 0.193307876586914, \"Time in s\": 0.37692 }, { \"step\": 318, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5205047318611987, \"F1\": 0.4722222222222222, \"Memory in Mb\": 0.1939868927001953, \"Time in s\": 0.746299 }, { \"step\": 424, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5460992907801419, \"F1\": 0.4838709677419355, \"Memory in Mb\": 0.1940326690673828, \"Time in s\": 1.240314 }, { \"step\": 530, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.55765595463138, \"F1\": 0.455813953488372, \"Memory in Mb\": 0.1940555572509765, \"Time in s\": 1.846868 }, { \"step\": 636, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5543307086614173, \"F1\": 0.4259634888438134, \"Memory in Mb\": 0.194711685180664, \"Time in s\": 2.5750140000000004 }, { \"step\": 742, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5748987854251012, \"F1\": 0.4220183486238532, \"Memory in Mb\": 0.1947574615478515, \"Time in s\": 3.4160590000000006 }, { \"step\": 848, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5785123966942148, \"F1\": 0.4232633279483037, \"Memory in Mb\": 0.1946887969970703, \"Time in s\": 4.380047 }, { \"step\": 954, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5844700944386149, \"F1\": 0.4193548387096774, \"Memory in Mb\": 0.1946659088134765, \"Time in s\": 5.464953 }, { \"step\": 1060, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5920679886685553, \"F1\": 0.4146341463414634, \"Memory in Mb\": 0.1946659088134765, \"Time in s\": 6.672680000000001 }, { \"step\": 1166, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.590557939914163, \"F1\": 0.4015056461731493, \"Memory in Mb\": 0.1946430206298828, \"Time in s\": 7.997066 }, { \"step\": 1272, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5971675845790716, \"F1\": 0.4101382488479262, \"Memory in Mb\": 0.1946430206298828, \"Time in s\": 9.441186 }, { \"step\": 1378, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.599128540305011, \"F1\": 0.3973799126637554, \"Memory in Mb\": 0.1952533721923828, \"Time in s\": 11.005176 }, { \"step\": 1484, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5994605529332434, \"F1\": 0.3926380368098159, \"Memory in Mb\": 0.1952075958251953, \"Time in s\": 12.688563 }, { \"step\": 1590, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5997482693517936, \"F1\": 0.3896353166986563, \"Memory in Mb\": 0.1951847076416015, \"Time in s\": 14.491668 }, { \"step\": 1696, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6011799410029498, \"F1\": 0.3876811594202898, \"Memory in Mb\": 0.1951847076416015, \"Time in s\": 16.418884000000002 }, { \"step\": 1802, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6013325930038868, \"F1\": 0.3904923599320882, \"Memory in Mb\": 0.1952075958251953, \"Time in s\": 18.478568000000003 }, { \"step\": 1908, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6030414263240692, \"F1\": 0.396812749003984, \"Memory in Mb\": 0.1952075958251953, \"Time in s\": 20.667158000000004 }, { \"step\": 2014, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5986090412319921, \"F1\": 0.3961136023916292, \"Memory in Mb\": 0.1952075958251953, \"Time in s\": 22.989297000000004 }, { \"step\": 2120, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5969797074091553, \"F1\": 0.3994374120956399, \"Memory in Mb\": 0.1952075958251953, \"Time in s\": 25.441149000000003 }, { \"step\": 2226, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.597752808988764, \"F1\": 0.4013377926421405, \"Memory in Mb\": 0.1951618194580078, \"Time in s\": 28.026090000000003 }, { \"step\": 2332, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5988845988845989, \"F1\": 0.4033184428844926, \"Memory in Mb\": 0.1951618194580078, \"Time in s\": 30.739590000000003 }, { \"step\": 2438, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5995075913007797, \"F1\": 0.4019607843137255, \"Memory in Mb\": 0.1951847076416015, \"Time in s\": 33.582607 }, { \"step\": 2544, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6008651199370821, \"F1\": 0.4088526499708794, \"Memory in Mb\": 0.1951847076416015, \"Time in s\": 36.555510000000005 }, { \"step\": 2650, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6002265005662514, \"F1\": 0.4073866815892558, \"Memory in Mb\": 0.1958179473876953, \"Time in s\": 39.657922000000006 }, { \"step\": 2756, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5985480943738657, \"F1\": 0.4028077753779697, \"Memory in Mb\": 0.1958179473876953, \"Time in s\": 42.88800400000001 }, { \"step\": 2862, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.599790283117791, \"F1\": 0.4051948051948052, \"Memory in Mb\": 0.1958179473876953, \"Time in s\": 46.243761000000006 }, { \"step\": 2968, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.599932591843613, \"F1\": 0.4026170105686965, \"Memory in Mb\": 0.195840835571289, \"Time in s\": 49.729156 }, { \"step\": 3074, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5977871786527823, \"F1\": 0.4023210831721469, \"Memory in Mb\": 0.195840835571289, \"Time in s\": 53.340997 }, { \"step\": 3180, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5986159169550173, \"F1\": 0.4042950513538749, \"Memory in Mb\": 0.195840835571289, \"Time in s\": 57.083209 }, { \"step\": 3286, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5981735159817352, \"F1\": 0.4021739130434782, \"Memory in Mb\": 0.1913156509399414, \"Time in s\": 60.947023 }, { \"step\": 3392, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5959893836626364, \"F1\": 0.4022687609075043, \"Memory in Mb\": 0.2501306533813476, \"Time in s\": 64.942815 }, { \"step\": 3498, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.597369173577352, \"F1\": 0.4023769100169779, \"Memory in Mb\": 0.2948274612426758, \"Time in s\": 69.00059 }, { \"step\": 3604, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6008881487649181, \"F1\": 0.4087171052631579, \"Memory in Mb\": 0.3147249221801758, \"Time in s\": 73.119483 }, { \"step\": 3710, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6012402264761392, \"F1\": 0.4086365453818472, \"Memory in Mb\": 0.3621034622192383, \"Time in s\": 77.300669 }, { \"step\": 3816, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6023591087811271, \"F1\": 0.4104158569762923, \"Memory in Mb\": 0.3922090530395508, \"Time in s\": 81.542932 }, { \"step\": 3922, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6052027543993879, \"F1\": 0.4145234493192133, \"Memory in Mb\": 0.4281816482543945, \"Time in s\": 85.85207799999999 }, { \"step\": 4028, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.608393344921778, \"F1\": 0.4195804195804196, \"Memory in Mb\": 0.4565858840942383, \"Time in s\": 90.22589 }, { \"step\": 4134, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6121461408178079, \"F1\": 0.4260651629072682, \"Memory in Mb\": 0.4708681106567383, \"Time in s\": 94.668198 }, { \"step\": 4240, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6157112526539278, \"F1\": 0.4329968673860076, \"Memory in Mb\": 0.4722070693969726, \"Time in s\": 99.175385 }, { \"step\": 4346, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6186421173762946, \"F1\": 0.4384954252795662, \"Memory in Mb\": 0.4545450210571289, \"Time in s\": 103.74793499999998 }, { \"step\": 4452, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6212087171422153, \"F1\": 0.4420913302448709, \"Memory in Mb\": 0.4548578262329101, \"Time in s\": 108.38424899999998 }, { \"step\": 4558, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6214614878209348, \"F1\": 0.4437278297323443, \"Memory in Mb\": 0.4440469741821289, \"Time in s\": 113.085683 }, { \"step\": 4664, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6219172206733863, \"F1\": 0.4454230890217049, \"Memory in Mb\": 0.4133005142211914, \"Time in s\": 117.851137 }, { \"step\": 4770, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6227720696162717, \"F1\": 0.4449244060475162, \"Memory in Mb\": 0.4420938491821289, \"Time in s\": 122.680723 }, { \"step\": 4876, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6235897435897436, \"F1\": 0.4444444444444444, \"Memory in Mb\": 0.4093866348266601, \"Time in s\": 127.574861 }, { \"step\": 4982, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6251756675366392, \"F1\": 0.4491000295072292, \"Memory in Mb\": 0.4095392227172851, \"Time in s\": 132.5326 }, { \"step\": 5088, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.624139964615687, \"F1\": 0.4467592592592592, \"Memory in Mb\": 0.4096612930297851, \"Time in s\": 137.553125 }, { \"step\": 5194, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6248796456768727, \"F1\": 0.4469051675184554, \"Memory in Mb\": 0.4100580215454101, \"Time in s\": 142.637397 }, { \"step\": 5300, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6259671636157765, \"F1\": 0.4482182628062361, \"Memory in Mb\": 0.4162149429321289, \"Time in s\": 147.78569 }, { \"step\": 906, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8651933701657458, \"F1\": 0.8685344827586208, \"Memory in Mb\": 1.6044349670410156, \"Time in s\": 1.944742 }, { \"step\": 1812, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8890115958034235, \"F1\": 0.8678500986193294, \"Memory in Mb\": 1.9405479431152344, \"Time in s\": 5.818992 }, { \"step\": 2718, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8778064041221936, \"F1\": 0.8540017590149517, \"Memory in Mb\": 1.825298309326172, \"Time in s\": 10.85362 }, { \"step\": 3624, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8857300579630141, \"F1\": 0.8630952380952381, \"Memory in Mb\": 1.6178092956542969, \"Time in s\": 16.937274 }, { \"step\": 4530, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8887171561051005, \"F1\": 0.8605423353624794, \"Memory in Mb\": 2.356822967529297, \"Time in s\": 24.006141 }, { \"step\": 5436, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.884820607175713, \"F1\": 0.8560257589696412, \"Memory in Mb\": 2.6076393127441406, \"Time in s\": 32.019509 }, { \"step\": 6342, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8821952373442674, \"F1\": 0.8529238038984053, \"Memory in Mb\": 2.2810935974121094, \"Time in s\": 40.994451 }, { \"step\": 7248, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8780184904098247, \"F1\": 0.8452380952380951, \"Memory in Mb\": 2.1880455017089844, \"Time in s\": 50.877339 }, { \"step\": 8154, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8797988470501655, \"F1\": 0.8548148148148149, \"Memory in Mb\": 2.109683990478516, \"Time in s\": 61.695664 }, { \"step\": 9060, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8818854178165361, \"F1\": 0.8612191958495461, \"Memory in Mb\": 2.1360397338867188, \"Time in s\": 73.45561000000001 }, { \"step\": 9966, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8771700953336679, \"F1\": 0.8589861751152074, \"Memory in Mb\": 1.9618644714355469, \"Time in s\": 86.17474100000001 }, { \"step\": 10872, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8781160886762948, \"F1\": 0.8612710710920323, \"Memory in Mb\": 1.9593772888183596, \"Time in s\": 99.82607500000002 }, { \"step\": 11778, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8748407913730152, \"F1\": 0.8565868846079004, \"Memory in Mb\": 2.148365020751953, \"Time in s\": 114.35339900000002 }, { \"step\": 12684, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8732161160608689, \"F1\": 0.8548736462093863, \"Memory in Mb\": 1.7726364135742188, \"Time in s\": 129.76244800000003 }, { \"step\": 13590, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8749724041504158, \"F1\": 0.8586404858973291, \"Memory in Mb\": 1.6236610412597656, \"Time in s\": 146.05238800000004 }, { \"step\": 14496, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8746464298033805, \"F1\": 0.858917617827471, \"Memory in Mb\": 1.996196746826172, \"Time in s\": 163.22720400000003 }, { \"step\": 15402, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8757223556911888, \"F1\": 0.859347442680776, \"Memory in Mb\": 1.8728065490722656, \"Time in s\": 181.335909 }, { \"step\": 16308, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8744097626786043, \"F1\": 0.8568432825387949, \"Memory in Mb\": 2.1095123291015625, \"Time in s\": 200.414984 }, { \"step\": 17214, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8735839191308894, \"F1\": 0.8532703978422117, \"Memory in Mb\": 2.2479400634765625, \"Time in s\": 220.47164 }, { \"step\": 18120, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8715712787681439, \"F1\": 0.8510338646693554, \"Memory in Mb\": 2.008777618408203, \"Time in s\": 241.502757 }, { \"step\": 19026, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.872904073587385, \"F1\": 0.8509615384615385, \"Memory in Mb\": 0.9681053161621094, \"Time in s\": 263.516635 }, { \"step\": 19932, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8660378305152777, \"F1\": 0.8433282478582326, \"Memory in Mb\": 0.8690452575683594, \"Time in s\": 286.595649 }, { \"step\": 20838, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8576090608052983, \"F1\": 0.8330050092868801, \"Memory in Mb\": 0.6502227783203125, \"Time in s\": 310.739453 }, { \"step\": 21744, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8574713700961228, \"F1\": 0.8301452452726775, \"Memory in Mb\": 0.7763557434082031, \"Time in s\": 335.928343 }, { \"step\": 22650, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8562850456973817, \"F1\": 0.8269077373039085, \"Memory in Mb\": 1.191143035888672, \"Time in s\": 362.154691 }, { \"step\": 23556, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8503502441095309, \"F1\": 0.8183645076518782, \"Memory in Mb\": 1.1728401184082031, \"Time in s\": 389.483038 }, { \"step\": 24462, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8480438248640694, \"F1\": 0.8142707240293808, \"Memory in Mb\": 1.1973991394042969, \"Time in s\": 417.874563 }, { \"step\": 25368, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8443647258248906, \"F1\": 0.8102105566772425, \"Memory in Mb\": 1.3723030090332031, \"Time in s\": 447.337578 }, { \"step\": 26274, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8447074943858714, \"F1\": 0.8100558659217878, \"Memory in Mb\": 1.314678192138672, \"Time in s\": 477.8580079999999 }, { \"step\": 27180, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8453953419919791, \"F1\": 0.8118395128067348, \"Memory in Mb\": 1.0832901000976562, \"Time in s\": 509.408593 }, { \"step\": 28086, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8421221292504896, \"F1\": 0.8068142209829209, \"Memory in Mb\": 1.0510520935058594, \"Time in s\": 541.9996819999999 }, { \"step\": 28992, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.839570901314201, \"F1\": 0.8022113544546035, \"Memory in Mb\": 1.0500526428222656, \"Time in s\": 575.6711059999999 }, { \"step\": 29898, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8373749874569355, \"F1\": 0.7989247311827957, \"Memory in Mb\": 1.017780303955078, \"Time in s\": 610.3966439999999 }, { \"step\": 30804, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8364445021588807, \"F1\": 0.7961149332254148, \"Memory in Mb\": 1.2030601501464844, \"Time in s\": 646.1707399999999 }, { \"step\": 31710, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8328234885994512, \"F1\": 0.7904660263251513, \"Memory in Mb\": 1.1833610534667969, \"Time in s\": 683.0083409999999 }, { \"step\": 32616, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8277479687260463, \"F1\": 0.7829714903809009, \"Memory in Mb\": 0.9862403869628906, \"Time in s\": 720.8979669999999 }, { \"step\": 33522, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8274216163002297, \"F1\": 0.7829675483023824, \"Memory in Mb\": 0.977802276611328, \"Time in s\": 759.8188009999999 }, { \"step\": 34428, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8249339181456415, \"F1\": 0.7797229633419832, \"Memory in Mb\": 1.1589393615722656, \"Time in s\": 799.8066919999999 }, { \"step\": 35334, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8250643873998811, \"F1\": 0.7787838660033642, \"Memory in Mb\": 1.467632293701172, \"Time in s\": 840.8572599999999 }, { \"step\": 36240, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8262093324870995, \"F1\": 0.7784422711602055, \"Memory in Mb\": 1.2339591979980469, \"Time in s\": 882.9239929999999 }, { \"step\": 37146, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8240409207161126, \"F1\": 0.7737625475943233, \"Memory in Mb\": 1.2780303955078125, \"Time in s\": 926.004901 }, { \"step\": 38052, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8238679666763029, \"F1\": 0.7722731906218145, \"Memory in Mb\": 1.326324462890625, \"Time in s\": 970.056117 }, { \"step\": 38958, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8231383320070847, \"F1\": 0.7715365740433715, \"Memory in Mb\": 1.127643585205078, \"Time in s\": 1015.162858 }, { \"step\": 39864, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8224920352206306, \"F1\": 0.7721975404030647, \"Memory in Mb\": 0.9921760559082032, \"Time in s\": 1061.343491 }, { \"step\": 40770, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8226348451028969, \"F1\": 0.774362654850688, \"Memory in Mb\": 0.6841468811035156, \"Time in s\": 1108.5828599999998 }, { \"step\": 41676, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8231073785242952, \"F1\": 0.7766331353775299, \"Memory in Mb\": 0.6752357482910156, \"Time in s\": 1156.8826949999998 }, { \"step\": 42582, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8236537422794205, \"F1\": 0.7780569266692283, \"Memory in Mb\": 0.8912887573242188, \"Time in s\": 1206.175932 }, { \"step\": 43488, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8242693218663049, \"F1\": 0.7790690951141949, \"Memory in Mb\": 0.8946151733398438, \"Time in s\": 1256.4285579999998 }, { \"step\": 44394, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8235082107539476, \"F1\": 0.7769013924086677, \"Memory in Mb\": 0.9767723083496094, \"Time in s\": 1307.6797829999998 }, { \"step\": 45300, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.823285282235811, \"F1\": 0.7772366773340753, \"Memory in Mb\": 0.7331352233886719, \"Time in s\": 1359.9622719999998 }, { \"step\": 25, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.7083333333333334, \"F1\": 0.7407407407407408, \"Memory in Mb\": 0.6845798492431641, \"Time in s\": 0.140915 }, { \"step\": 50, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8163265306122449, \"F1\": 0.8085106382978724, \"Memory in Mb\": 0.6852588653564453, \"Time in s\": 0.368444 }, { \"step\": 75, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8513513513513513, \"F1\": 0.8493150684931507, \"Memory in Mb\": 0.6852130889892578, \"Time in s\": 0.67832 }, { \"step\": 100, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8585858585858586, \"F1\": 0.8541666666666666, \"Memory in Mb\": 0.6858234405517578, \"Time in s\": 1.071639 }, { \"step\": 125, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8548387096774194, \"F1\": 0.85, \"Memory in Mb\": 0.6858234405517578, \"Time in s\": 1.547289 }, { \"step\": 150, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8523489932885906, \"F1\": 0.8533333333333335, \"Memory in Mb\": 0.6858234405517578, \"Time in s\": 2.105688 }, { \"step\": 175, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8620689655172413, \"F1\": 0.8536585365853658, \"Memory in Mb\": 0.6864566802978516, \"Time in s\": 2.749261 }, { \"step\": 200, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8592964824120602, \"F1\": 0.8510638297872339, \"Memory in Mb\": 0.6865940093994141, \"Time in s\": 3.487233 }, { \"step\": 225, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8526785714285714, \"F1\": 0.8405797101449276, \"Memory in Mb\": 0.7248620986938477, \"Time in s\": 4.314044 }, { \"step\": 250, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8473895582329317, \"F1\": 0.8347826086956521, \"Memory in Mb\": 0.7525568008422852, \"Time in s\": 5.225865 }, { \"step\": 275, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8467153284671532, \"F1\": 0.8333333333333335, \"Memory in Mb\": 0.7526025772094727, \"Time in s\": 6.222143 }, { \"step\": 300, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8528428093645485, \"F1\": 0.837037037037037, \"Memory in Mb\": 0.7526025772094727, \"Time in s\": 7.300394 }, { \"step\": 325, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8611111111111112, \"F1\": 0.8421052631578947, \"Memory in Mb\": 0.7532129287719727, \"Time in s\": 8.460934 }, { \"step\": 350, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8653295128939829, \"F1\": 0.8438538205980067, \"Memory in Mb\": 0.7532358169555664, \"Time in s\": 9.705145 }, { \"step\": 375, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8663101604278075, \"F1\": 0.8427672955974843, \"Memory in Mb\": 0.7908792495727539, \"Time in s\": 11.033679 }, { \"step\": 400, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8671679197994987, \"F1\": 0.8417910447761194, \"Memory in Mb\": 0.8290948867797852, \"Time in s\": 12.4564 }, { \"step\": 425, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8679245283018868, \"F1\": 0.839080459770115, \"Memory in Mb\": 0.8842554092407227, \"Time in s\": 13.968094999999998 }, { \"step\": 450, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8708240534521158, \"F1\": 0.8406593406593408, \"Memory in Mb\": 0.8843240737915039, \"Time in s\": 15.559030999999996 }, { \"step\": 475, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.869198312236287, \"F1\": 0.8402061855670103, \"Memory in Mb\": 0.8843927383422852, \"Time in s\": 17.224721999999996 }, { \"step\": 500, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8677354709418837, \"F1\": 0.8413461538461539, \"Memory in Mb\": 0.8844156265258789, \"Time in s\": 18.966559 }, { \"step\": 525, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8683206106870229, \"F1\": 0.8384074941451991, \"Memory in Mb\": 0.8844156265258789, \"Time in s\": 20.785336 }, { \"step\": 550, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8670309653916212, \"F1\": 0.8381374722838136, \"Memory in Mb\": 0.8844614028930664, \"Time in s\": 22.682567 }, { \"step\": 575, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.867595818815331, \"F1\": 0.8382978723404255, \"Memory in Mb\": 0.8844614028930664, \"Time in s\": 24.656325 }, { \"step\": 600, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8697829716193656, \"F1\": 0.8381742738589212, \"Memory in Mb\": 0.8844614028930664, \"Time in s\": 26.722162 }, { \"step\": 625, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8717948717948718, \"F1\": 0.8373983739837398, \"Memory in Mb\": 0.9222650527954102, \"Time in s\": 28.872863 }, { \"step\": 650, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8767334360554699, \"F1\": 0.846153846153846, \"Memory in Mb\": 0.9229669570922852, \"Time in s\": 31.097205 }, { \"step\": 675, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8753709198813057, \"F1\": 0.8478260869565216, \"Memory in Mb\": 0.9505243301391602, \"Time in s\": 33.398985 }, { \"step\": 700, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8798283261802575, \"F1\": 0.8515901060070671, \"Memory in Mb\": 0.8879518508911133, \"Time in s\": 35.778321 }, { \"step\": 725, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8825966850828729, \"F1\": 0.8576214405360134, \"Memory in Mb\": 0.9880342483520508, \"Time in s\": 38.239126 }, { \"step\": 750, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8865153538050734, \"F1\": 0.8631239935587761, \"Memory in Mb\": 1.0254030227661133, \"Time in s\": 40.785592 }, { \"step\": 775, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8875968992248062, \"F1\": 0.863849765258216, \"Memory in Mb\": 1.0804262161254885, \"Time in s\": 43.415615 }, { \"step\": 800, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8873591989987485, \"F1\": 0.8652694610778443, \"Memory in Mb\": 1.1831789016723633, \"Time in s\": 46.140861 }, { \"step\": 825, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8871359223300971, \"F1\": 0.8661870503597122, \"Memory in Mb\": 1.1837968826293943, \"Time in s\": 48.939828 }, { \"step\": 850, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8881036513545347, \"F1\": 0.8671328671328671, \"Memory in Mb\": 1.1943635940551758, \"Time in s\": 51.816843 }, { \"step\": 875, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8901601830663616, \"F1\": 0.8688524590163934, \"Memory in Mb\": 1.2218294143676758, \"Time in s\": 54.767134 }, { \"step\": 900, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8887652947719689, \"F1\": 0.8670212765957446, \"Memory in Mb\": 1.2768526077270508, \"Time in s\": 57.795454 }, { \"step\": 925, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8896103896103896, \"F1\": 0.8695652173913043, \"Memory in Mb\": 1.2769441604614258, \"Time in s\": 60.902165 }, { \"step\": 950, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8893572181243414, \"F1\": 0.8708487084870848, \"Memory in Mb\": 1.2769899368286133, \"Time in s\": 64.084493 }, { \"step\": 975, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8901437371663244, \"F1\": 0.8718562874251498, \"Memory in Mb\": 1.2770357131958008, \"Time in s\": 67.34321899999999 }, { \"step\": 1000, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8878878878878879, \"F1\": 0.8697674418604652, \"Memory in Mb\": 1.277012825012207, \"Time in s\": 70.663215 }, { \"step\": 1025, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8876953125, \"F1\": 0.8700564971751412, \"Memory in Mb\": 1.2770586013793943, \"Time in s\": 74.022235 }, { \"step\": 1050, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8894184938036225, \"F1\": 0.8725274725274725, \"Memory in Mb\": 1.2770357131958008, \"Time in s\": 77.421815 }, { \"step\": 1075, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8901303538175046, \"F1\": 0.8742004264392325, \"Memory in Mb\": 1.2770357131958008, \"Time in s\": 80.860683 }, { \"step\": 1100, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.89171974522293, \"F1\": 0.8761706555671176, \"Memory in Mb\": 1.2770357131958008, \"Time in s\": 84.339469 }, { \"step\": 1125, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8932384341637011, \"F1\": 0.8790322580645162, \"Memory in Mb\": 1.2770357131958008, \"Time in s\": 87.855325 }, { \"step\": 1150, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8938207136640557, \"F1\": 0.8794466403162056, \"Memory in Mb\": 1.2770357131958008, \"Time in s\": 91.408321 }, { \"step\": 1175, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8926746166950597, \"F1\": 0.877906976744186, \"Memory in Mb\": 1.2770357131958008, \"Time in s\": 95.001152 }, { \"step\": 1200, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8932443703085905, \"F1\": 0.8783269961977186, \"Memory in Mb\": 1.2872819900512695, \"Time in s\": 98.634357 }, { \"step\": 1225, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8929738562091504, \"F1\": 0.8779123951537745, \"Memory in Mb\": 1.3422365188598633, \"Time in s\": 102.306286 }, { \"step\": 1250, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8935148118494796, \"F1\": 0.8792007266121706, \"Memory in Mb\": 1.3423280715942385, \"Time in s\": 106.015532 }, { \"step\": 1903, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.1738767623901367, \"Time in s\": 1.523397 }, { \"step\": 3806, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.1744871139526367, \"Time in s\": 4.675035 }, { \"step\": 5709, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.1750288009643554, \"Time in s\": 8.984261 }, { \"step\": 7612, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.1750516891479492, \"Time in s\": 14.031006 }, { \"step\": 9515, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.1750516891479492, \"Time in s\": 19.82114 }, { \"step\": 11418, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.1756620407104492, \"Time in s\": 26.34929 }, { \"step\": 13321, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.1756849288940429, \"Time in s\": 33.615204 }, { \"step\": 15224, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999343099257702, \"F1\": 0.1666666666666666, \"Memory in Mb\": 0.4191761016845703, \"Time in s\": 41.690009 }, { \"step\": 17127, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9992993109891392, \"F1\": 0.1428571428571428, \"Memory in Mb\": 0.4156208038330078, \"Time in s\": 50.773074 }, { \"step\": 19030, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999369383572442, \"F1\": 0.1428571428571428, \"Memory in Mb\": 0.3918170928955078, \"Time in s\": 60.837183 }, { \"step\": 20933, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9994267150773934, \"F1\": 0.1428571428571428, \"Memory in Mb\": 0.392496109008789, \"Time in s\": 71.877708 }, { \"step\": 22836, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999474490913072, \"F1\": 0.1428571428571428, \"Memory in Mb\": 0.3925189971923828, \"Time in s\": 83.900187 }, { \"step\": 24739, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9995149163230658, \"F1\": 0.1428571428571428, \"Memory in Mb\": 0.4046955108642578, \"Time in s\": 96.90004 }, { \"step\": 26642, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9995495664577156, \"F1\": 0.25, \"Memory in Mb\": 0.421091079711914, \"Time in s\": 110.876026 }, { \"step\": 28545, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999579596412556, \"F1\": 0.25, \"Memory in Mb\": 0.4210453033447265, \"Time in s\": 125.82831600000002 }, { \"step\": 30448, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996058724997536, \"F1\": 0.25, \"Memory in Mb\": 0.4210681915283203, \"Time in s\": 141.756728 }, { \"step\": 32351, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999629057187017, \"F1\": 0.25, \"Memory in Mb\": 0.433267593383789, \"Time in s\": 158.664658 }, { \"step\": 34254, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996496657227104, \"F1\": 0.25, \"Memory in Mb\": 0.4455127716064453, \"Time in s\": 176.542575 }, { \"step\": 36157, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996681048788584, \"F1\": 0.25, \"Memory in Mb\": 0.445535659790039, \"Time in s\": 195.393511 }, { \"step\": 38060, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996847000709423, \"F1\": 0.25, \"Memory in Mb\": 0.4455127716064453, \"Time in s\": 215.224785 }, { \"step\": 39963, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996997147289926, \"F1\": 0.25, \"Memory in Mb\": 0.4455127716064453, \"Time in s\": 236.028977 }, { \"step\": 41866, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997133643855248, \"F1\": 0.25, \"Memory in Mb\": 0.4461231231689453, \"Time in s\": 257.804564 }, { \"step\": 43769, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997258270882836, \"F1\": 0.25, \"Memory in Mb\": 0.4461002349853515, \"Time in s\": 280.547182 }, { \"step\": 45672, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997372512097392, \"F1\": 0.25, \"Memory in Mb\": 0.4461002349853515, \"Time in s\": 304.26359 }, { \"step\": 47575, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997477613822676, \"F1\": 0.25, \"Memory in Mb\": 0.4643878936767578, \"Time in s\": 328.95641 }, { \"step\": 49478, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997574630636458, \"F1\": 0.25, \"Memory in Mb\": 0.4765186309814453, \"Time in s\": 354.62865 }, { \"step\": 51381, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997469832619696, \"F1\": 0.3157894736842105, \"Memory in Mb\": 0.5477771759033203, \"Time in s\": 381.285293 }, { \"step\": 53284, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999756019743633, \"F1\": 0.3157894736842105, \"Memory in Mb\": 0.5477771759033203, \"Time in s\": 408.929269 }, { \"step\": 55187, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997644330083716, \"F1\": 0.3157894736842105, \"Memory in Mb\": 0.5598621368408203, \"Time in s\": 437.552385 }, { \"step\": 57090, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996321533044896, \"F1\": 0.3225806451612903, \"Memory in Mb\": 0.9146518707275392, \"Time in s\": 467.220958 }, { \"step\": 58993, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996440195280716, \"F1\": 0.3225806451612903, \"Memory in Mb\": 0.9280223846435548, \"Time in s\": 497.92269099999993 }, { \"step\": 60896, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996551441005008, \"F1\": 0.3225806451612903, \"Memory in Mb\": 0.9402217864990234, \"Time in s\": 529.6450219999999 }, { \"step\": 62799, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996337462976528, \"F1\": 0.303030303030303, \"Memory in Mb\": 1.022481918334961, \"Time in s\": 562.409421 }, { \"step\": 64702, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996445186318604, \"F1\": 0.303030303030303, \"Memory in Mb\": 1.0225505828857422, \"Time in s\": 596.2119009999999 }, { \"step\": 66605, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996546753948712, \"F1\": 0.303030303030303, \"Memory in Mb\": 1.0225963592529297, \"Time in s\": 631.0619619999999 }, { \"step\": 68508, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996642678850336, \"F1\": 0.3783783783783784, \"Memory in Mb\": 1.036111831665039, \"Time in s\": 666.9514569999999 }, { \"step\": 70411, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.99967334185485, \"F1\": 0.3783783783783784, \"Memory in Mb\": 1.0532817840576172, \"Time in s\": 703.8889889999999 }, { \"step\": 72314, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996819382407036, \"F1\": 0.3783783783783784, \"Memory in Mb\": 1.053213119506836, \"Time in s\": 741.862274 }, { \"step\": 74217, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996900937803168, \"F1\": 0.3783783783783784, \"Memory in Mb\": 1.0532588958740234, \"Time in s\": 780.872663 }, { \"step\": 76120, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996978415375924, \"F1\": 0.3783783783783784, \"Memory in Mb\": 1.0533504486083984, \"Time in s\": 820.917591 }, { \"step\": 78023, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997052113506448, \"F1\": 0.3783783783783784, \"Memory in Mb\": 1.053327560424805, \"Time in s\": 862.00148 }, { \"step\": 79926, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997122302158272, \"F1\": 0.3783783783783784, \"Memory in Mb\": 1.0532817840576172, \"Time in s\": 904.126635 }, { \"step\": 81829, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997189226181749, \"F1\": 0.3783783783783784, \"Memory in Mb\": 1.053327560424805, \"Time in s\": 947.288015 }, { \"step\": 83732, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997253108167824, \"F1\": 0.3783783783783784, \"Memory in Mb\": 1.053937911987305, \"Time in s\": 991.476835 }, { \"step\": 85635, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997314150921364, \"F1\": 0.3783783783783784, \"Memory in Mb\": 1.0626277923583984, \"Time in s\": 1036.7058969999998 }, { \"step\": 87538, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997372539611822, \"F1\": 0.3783783783783784, \"Memory in Mb\": 1.062604904174805, \"Time in s\": 1082.9660009999998 }, { \"step\": 89441, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999731663685152, \"F1\": 0.3684210526315789, \"Memory in Mb\": 1.0989017486572266, \"Time in s\": 1130.2730419999998 }, { \"step\": 91344, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997372540862464, \"F1\": 0.3684210526315789, \"Memory in Mb\": 1.0989704132080078, \"Time in s\": 1178.6145779999997 }, { \"step\": 93247, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997426163052572, \"F1\": 0.3684210526315789, \"Memory in Mb\": 1.0987415313720703, \"Time in s\": 1227.9921859999995 }, { \"step\": 95150, \"track\": \"Binary classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997477640332532, \"F1\": 0.3684210526315789, \"Memory in Mb\": 1.0987186431884766, \"Time in s\": 1278.4102429999998 }, { \"step\": 106, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5523809523809524, \"F1\": 0.5252525252525252, \"Memory in Mb\": 0.1770515441894531, \"Time in s\": 0.137989 }, { \"step\": 212, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5829383886255924, \"F1\": 0.5555555555555555, \"Memory in Mb\": 0.1772575378417968, \"Time in s\": 0.392857 }, { \"step\": 318, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6025236593059937, \"F1\": 0.5827814569536425, \"Memory in Mb\": 0.1773033142089843, \"Time in s\": 0.757641 }, { \"step\": 424, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6099290780141844, \"F1\": 0.5758354755784061, \"Memory in Mb\": 0.1773033142089843, \"Time in s\": 1.237112 }, { \"step\": 530, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5841209829867675, \"F1\": 0.5089285714285714, \"Memory in Mb\": 0.1772575378417968, \"Time in s\": 1.823779 }, { \"step\": 636, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5748031496062992, \"F1\": 0.4981412639405205, \"Memory in Mb\": 0.1773033142089843, \"Time in s\": 2.523282 }, { \"step\": 742, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Bananas\", \"Accuracy\": 0.582995951417004, \"F1\": 0.4892561983471074, \"Memory in Mb\": 0.1772346496582031, \"Time in s\": 3.326693 }, { \"step\": 848, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5749704840613932, \"F1\": 0.4812680115273775, \"Memory in Mb\": 0.1771888732910156, \"Time in s\": 4.242125 }, { \"step\": 954, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5760755508919203, \"F1\": 0.482051282051282, \"Memory in Mb\": 0.1771888732910156, \"Time in s\": 5.269066 }, { \"step\": 1060, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5873465533522191, \"F1\": 0.4828402366863905, \"Memory in Mb\": 0.1771888732910156, \"Time in s\": 6.408509 }, { \"step\": 1166, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5931330472103005, \"F1\": 0.4925053533190577, \"Memory in Mb\": 0.1772346496582031, \"Time in s\": 7.660058999999999 }, { \"step\": 1272, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5979543666404405, \"F1\": 0.5034013605442177, \"Memory in Mb\": 0.1772346496582031, \"Time in s\": 9.024008 }, { \"step\": 1378, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6005809731299927, \"F1\": 0.4990892531876139, \"Memory in Mb\": 0.1772346496582031, \"Time in s\": 10.499633 }, { \"step\": 1484, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6089008766014835, \"F1\": 0.5117845117845117, \"Memory in Mb\": 0.1772346496582031, \"Time in s\": 12.08531 }, { \"step\": 1590, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6091881686595343, \"F1\": 0.5121759622937941, \"Memory in Mb\": 0.1772346496582031, \"Time in s\": 13.781775 }, { \"step\": 1696, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6135693215339233, \"F1\": 0.5194424064563462, \"Memory in Mb\": 0.1772346496582031, \"Time in s\": 15.591862 }, { \"step\": 1802, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6185452526374237, \"F1\": 0.5354969574036511, \"Memory in Mb\": 0.1772346496582031, \"Time in s\": 17.520521 }, { \"step\": 1908, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6208704771893025, \"F1\": 0.5467084639498432, \"Memory in Mb\": 0.1772575378417968, \"Time in s\": 19.574794 }, { \"step\": 2014, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Bananas\", \"Accuracy\": 0.620963735717834, \"F1\": 0.5561372891215823, \"Memory in Mb\": 0.1772804260253906, \"Time in s\": 21.756325 }, { \"step\": 2120, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6252949504483247, \"F1\": 0.56941431670282, \"Memory in Mb\": 0.1772804260253906, \"Time in s\": 24.064421 }, { \"step\": 2226, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6242696629213483, \"F1\": 0.5721596724667348, \"Memory in Mb\": 0.1773033142089843, \"Time in s\": 26.498977 }, { \"step\": 2332, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6229086229086229, \"F1\": 0.5763855421686748, \"Memory in Mb\": 0.1773033142089843, \"Time in s\": 29.055623 }, { \"step\": 2438, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Bananas\", \"Accuracy\": 0.62330734509643, \"F1\": 0.5796703296703297, \"Memory in Mb\": 0.1773033142089843, \"Time in s\": 31.73345 }, { \"step\": 2544, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6244593000393236, \"F1\": 0.5860424794104898, \"Memory in Mb\": 0.1773033142089843, \"Time in s\": 34.533392 }, { \"step\": 2650, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6266515666289165, \"F1\": 0.591828312009905, \"Memory in Mb\": 0.1773490905761718, \"Time in s\": 37.454912 }, { \"step\": 2756, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6250453720508167, \"F1\": 0.5921831819976313, \"Memory in Mb\": 0.1773490905761718, \"Time in s\": 40.501518 }, { \"step\": 2862, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6249563089828731, \"F1\": 0.5927893738140417, \"Memory in Mb\": 0.1773490905761718, \"Time in s\": 43.671228 }, { \"step\": 2968, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6248736097067745, \"F1\": 0.5924569754668619, \"Memory in Mb\": 0.1773490905761718, \"Time in s\": 46.964135 }, { \"step\": 3074, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6260982753010088, \"F1\": 0.5958494548012664, \"Memory in Mb\": 0.1773490905761718, \"Time in s\": 50.377785 }, { \"step\": 3180, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Bananas\", \"Accuracy\": 0.62378106322743, \"F1\": 0.5934738273283481, \"Memory in Mb\": 0.1645193099975586, \"Time in s\": 53.91364 }, { \"step\": 3286, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6246575342465753, \"F1\": 0.5937397034596376, \"Memory in Mb\": 0.2085180282592773, \"Time in s\": 57.57642799999999 }, { \"step\": 3392, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6234149218519611, \"F1\": 0.5931825422108953, \"Memory in Mb\": 0.2435979843139648, \"Time in s\": 61.313442 }, { \"step\": 3498, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6211038032599371, \"F1\": 0.5894019212891229, \"Memory in Mb\": 0.2788152694702148, \"Time in s\": 65.12271799999999 }, { \"step\": 3604, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6194837635303914, \"F1\": 0.5866747060596926, \"Memory in Mb\": 0.3256673812866211, \"Time in s\": 69.00323599999999 }, { \"step\": 3710, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6238878403882449, \"F1\": 0.5915080527086384, \"Memory in Mb\": 0.3337392807006836, \"Time in s\": 72.95431899999998 }, { \"step\": 3816, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6277850589777195, \"F1\": 0.5970488081725313, \"Memory in Mb\": 0.3397665023803711, \"Time in s\": 76.97715599999998 }, { \"step\": 3922, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6322366743177761, \"F1\": 0.6009961261759823, \"Memory in Mb\": 0.3677358627319336, \"Time in s\": 81.06970999999999 }, { \"step\": 4028, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6354606406754407, \"F1\": 0.6034575904916262, \"Memory in Mb\": 0.3820409774780273, \"Time in s\": 85.23008099999998 }, { \"step\": 4134, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6399709654004355, \"F1\": 0.6073878627968339, \"Memory in Mb\": 0.3902044296264648, \"Time in s\": 89.45861099999999 }, { \"step\": 4240, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Bananas\", \"Accuracy\": 0.644963434772352, \"F1\": 0.6130110568269478, \"Memory in Mb\": 0.3902273178100586, \"Time in s\": 93.754136 }, { \"step\": 4346, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6508630609896433, \"F1\": 0.6185567010309279, \"Memory in Mb\": 0.3902502059936523, \"Time in s\": 98.118583 }, { \"step\": 4452, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6535609975286453, \"F1\": 0.620384047267356, \"Memory in Mb\": 0.3902044296264648, \"Time in s\": 102.55151499999998 }, { \"step\": 4558, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6570111915734036, \"F1\": 0.6243691420331651, \"Memory in Mb\": 0.3903875350952148, \"Time in s\": 107.05130299999998 }, { \"step\": 4664, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6607334334119666, \"F1\": 0.6288127639605818, \"Memory in Mb\": 0.3904333114624023, \"Time in s\": 111.61866799999996 }, { \"step\": 4770, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6630320821975257, \"F1\": 0.6303197607545433, \"Memory in Mb\": 0.4466238021850586, \"Time in s\": 116.25797499999996 }, { \"step\": 4876, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6670769230769231, \"F1\": 0.6330544879041374, \"Memory in Mb\": 0.4547872543334961, \"Time in s\": 120.96504799999995 }, { \"step\": 4982, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6707488456133307, \"F1\": 0.6378091872791519, \"Memory in Mb\": 0.4610433578491211, \"Time in s\": 125.74414799999995 }, { \"step\": 5088, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6734814232356988, \"F1\": 0.6407094959982694, \"Memory in Mb\": 0.4671621322631836, \"Time in s\": 130.59500899999998 }, { \"step\": 5194, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Bananas\", \"Accuracy\": 0.674369343346813, \"F1\": 0.6412051771695311, \"Memory in Mb\": 0.4671392440795898, \"Time in s\": 135.51973499999997 }, { \"step\": 5300, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6778637478769579, \"F1\": 0.64504054897068, \"Memory in Mb\": 0.4684514999389648, \"Time in s\": 140.51779399999998 }, { \"step\": 906, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9337016574585636, \"F1\": 0.933184855233853, \"Memory in Mb\": 1.4600162506103516, \"Time in s\": 2.214325 }, { \"step\": 1812, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Elec2\", \"Accuracy\": 0.946990612921038, \"F1\": 0.9351351351351352, \"Memory in Mb\": 2.0955753326416016, \"Time in s\": 6.240333 }, { \"step\": 2718, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9370629370629372, \"F1\": 0.9227990970654628, \"Memory in Mb\": 2.479440689086914, \"Time in s\": 11.722802 }, { \"step\": 3624, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9343085840463704, \"F1\": 0.9197031039136304, \"Memory in Mb\": 2.829832077026367, \"Time in s\": 18.401477 }, { \"step\": 4530, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9322146169132258, \"F1\": 0.9140778057654632, \"Memory in Mb\": 3.4876842498779297, \"Time in s\": 26.240317 }, { \"step\": 5436, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9262189512419504, \"F1\": 0.9061988304093568, \"Memory in Mb\": 3.730012893676758, \"Time in s\": 35.261976000000004 }, { \"step\": 6342, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Elec2\", \"Accuracy\": 0.92524838353572, \"F1\": 0.9056904098686828, \"Memory in Mb\": 4.267904281616211, \"Time in s\": 45.380796 }, { \"step\": 7248, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Elec2\", \"Accuracy\": 0.922174692976404, \"F1\": 0.9017079121645174, \"Memory in Mb\": 4.513330459594727, \"Time in s\": 56.606028 }, { \"step\": 8154, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9222372132957194, \"F1\": 0.906351550960118, \"Memory in Mb\": 4.528413772583008, \"Time in s\": 68.87909400000001 }, { \"step\": 9060, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9222872281708798, \"F1\": 0.9083810515356586, \"Memory in Mb\": 4.871156692504883, \"Time in s\": 82.16100100000001 }, { \"step\": 9966, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9173105870546914, \"F1\": 0.904252846851034, \"Memory in Mb\": 4.923883438110352, \"Time in s\": 96.59034200000002 }, { \"step\": 10872, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9180388188759084, \"F1\": 0.9064370471490076, \"Memory in Mb\": 5.256982803344727, \"Time in s\": 112.03179200000002 }, { \"step\": 11778, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Elec2\", \"Accuracy\": 0.913645240723444, \"F1\": 0.9013100436681224, \"Memory in Mb\": 5.652528762817383, \"Time in s\": 128.76319300000003 }, { \"step\": 12684, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9114562800599229, \"F1\": 0.8990743237170845, \"Memory in Mb\": 6.184247970581055, \"Time in s\": 146.63877800000003 }, { \"step\": 13590, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Elec2\", \"Accuracy\": 0.911693281330488, \"F1\": 0.9004810084591143, \"Memory in Mb\": 6.168107986450195, \"Time in s\": 165.54320300000003 }, { \"step\": 14496, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Elec2\", \"Accuracy\": 0.90845119006554, \"F1\": 0.8974893781382773, \"Memory in Mb\": 6.370748519897461, \"Time in s\": 185.57117500000004 }, { \"step\": 15402, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9089669501980392, \"F1\": 0.8977090325404932, \"Memory in Mb\": 6.689512252807617, \"Time in s\": 206.60419600000003 }, { \"step\": 16308, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Elec2\", \"Accuracy\": 0.907033789170295, \"F1\": 0.8952315134761576, \"Memory in Mb\": 7.013689041137695, \"Time in s\": 228.760607 }, { \"step\": 17214, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9056527043513624, \"F1\": 0.8922362309223624, \"Memory in Mb\": 7.149255752563477, \"Time in s\": 252.05960700000003 }, { \"step\": 18120, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9003256250344942, \"F1\": 0.8855368234250223, \"Memory in Mb\": 7.591207504272461, \"Time in s\": 276.72553400000004 }, { \"step\": 19026, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Elec2\", \"Accuracy\": 0.901287779237845, \"F1\": 0.8854738382729601, \"Memory in Mb\": 7.899053573608398, \"Time in s\": 302.410993 }, { \"step\": 19932, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9013596909337214, \"F1\": 0.8864633864633865, \"Memory in Mb\": 8.218050003051758, \"Time in s\": 329.267044 }, { \"step\": 20838, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8999376109804674, \"F1\": 0.8850669753596825, \"Memory in Mb\": 8.369176864624023, \"Time in s\": 357.490064 }, { \"step\": 21744, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8988640022076071, \"F1\": 0.8821227552934869, \"Memory in Mb\": 8.549039840698242, \"Time in s\": 386.978862 }, { \"step\": 22650, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8967283323767054, \"F1\": 0.8787517495205017, \"Memory in Mb\": 8.670183181762695, \"Time in s\": 417.760723 }, { \"step\": 23556, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8951814901294842, \"F1\": 0.8767287433221829, \"Memory in Mb\": 8.96574592590332, \"Time in s\": 449.897837 }, { \"step\": 24462, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8929724868157475, \"F1\": 0.8734287371881647, \"Memory in Mb\": 9.238008499145508, \"Time in s\": 483.328506 }, { \"step\": 25368, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8902905349469784, \"F1\": 0.8701535016096672, \"Memory in Mb\": 9.398244857788086, \"Time in s\": 518.131339 }, { \"step\": 26274, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8890115327522552, \"F1\": 0.8681497558328811, \"Memory in Mb\": 9.500497817993164, \"Time in s\": 554.2859920000001 }, { \"step\": 27180, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8889215938776261, \"F1\": 0.8685734186583084, \"Memory in Mb\": 9.676286697387695, \"Time in s\": 591.703834 }, { \"step\": 28086, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Elec2\", \"Accuracy\": 0.886629873598006, \"F1\": 0.8655291832080412, \"Memory in Mb\": 9.93019676208496, \"Time in s\": 630.490308 }, { \"step\": 28992, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8864130247318133, \"F1\": 0.8647916238965305, \"Memory in Mb\": 10.322111129760742, \"Time in s\": 670.5577900000001 }, { \"step\": 29898, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Elec2\", \"Accuracy\": 0.885774492423989, \"F1\": 0.8637977106848004, \"Memory in Mb\": 10.777273178100586, \"Time in s\": 711.8606440000001 }, { \"step\": 30804, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Elec2\", \"Accuracy\": 0.885498165763075, \"F1\": 0.8626931911083429, \"Memory in Mb\": 10.977670669555664, \"Time in s\": 754.3481340000001 }, { \"step\": 31710, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8835661799489104, \"F1\": 0.8603736479842674, \"Memory in Mb\": 11.286626815795898, \"Time in s\": 798.241067 }, { \"step\": 32616, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Elec2\", \"Accuracy\": 0.882906638049977, \"F1\": 0.8599097611973149, \"Memory in Mb\": 11.60590934753418, \"Time in s\": 843.3988 }, { \"step\": 33522, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8826109006294561, \"F1\": 0.8598995976786413, \"Memory in Mb\": 11.809194564819336, \"Time in s\": 889.816371 }, { \"step\": 34428, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8817788363784239, \"F1\": 0.8588765603328711, \"Memory in Mb\": 11.96577262878418, \"Time in s\": 937.652835 }, { \"step\": 35334, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8808762346814593, \"F1\": 0.8571816361847239, \"Memory in Mb\": 12.22038459777832, \"Time in s\": 986.872877 }, { \"step\": 36240, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8797980076712933, \"F1\": 0.8550029958058717, \"Memory in Mb\": 12.516416549682615, \"Time in s\": 1037.336075 }, { \"step\": 37146, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8785839278503164, \"F1\": 0.8529891127192124, \"Memory in Mb\": 12.637868881225586, \"Time in s\": 1089.169624 }, { \"step\": 38052, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8770071745814827, \"F1\": 0.8506891271056662, \"Memory in Mb\": 13.111181259155272, \"Time in s\": 1142.33469 }, { \"step\": 38958, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8756064378673922, \"F1\": 0.8493440278554995, \"Memory in Mb\": 13.190984725952148, \"Time in s\": 1196.840346 }, { \"step\": 39864, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8754233248877406, \"F1\": 0.8501327860936744, \"Memory in Mb\": 13.482259750366213, \"Time in s\": 1252.532055 }, { \"step\": 40770, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8749049522921828, \"F1\": 0.8505713448578962, \"Memory in Mb\": 13.769769668579102, \"Time in s\": 1309.427191 }, { \"step\": 41676, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8752729454109178, \"F1\": 0.8519510111079466, \"Memory in Mb\": 13.892538070678713, \"Time in s\": 1367.440063 }, { \"step\": 42582, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8754139170052371, \"F1\": 0.8523148019264498, \"Memory in Mb\": 14.423887252807615, \"Time in s\": 1426.636647 }, { \"step\": 43488, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8745142226412491, \"F1\": 0.8513605534824177, \"Memory in Mb\": 14.559076309204102, \"Time in s\": 1487.084751 }, { \"step\": 44394, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8743270335413241, \"F1\": 0.8507211088218767, \"Memory in Mb\": 14.714178085327148, \"Time in s\": 1548.853591 }, { \"step\": 45300, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8751186560409722, \"F1\": 0.851922623877706, \"Memory in Mb\": 14.867197036743164, \"Time in s\": 1611.802241 }, { \"step\": 25, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Phishing\", \"Accuracy\": 0.6666666666666666, \"F1\": 0.7142857142857143, \"Memory in Mb\": 0.6709518432617188, \"Time in s\": 0.096996 }, { \"step\": 50, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Phishing\", \"Accuracy\": 0.7551020408163265, \"F1\": 0.7391304347826088, \"Memory in Mb\": 0.671112060546875, \"Time in s\": 0.227732 }, { \"step\": 75, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Phishing\", \"Accuracy\": 0.7972972972972973, \"F1\": 0.7945205479452055, \"Memory in Mb\": 0.6711349487304688, \"Time in s\": 0.390548 }, { \"step\": 100, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8080808080808081, \"F1\": 0.7999999999999999, \"Memory in Mb\": 0.6711578369140625, \"Time in s\": 0.5919840000000001 }, { \"step\": 125, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8064516129032258, \"F1\": 0.8000000000000002, \"Memory in Mb\": 0.6711845397949219, \"Time in s\": 0.8358070000000001 }, { \"step\": 150, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8187919463087249, \"F1\": 0.8211920529801323, \"Memory in Mb\": 0.6711845397949219, \"Time in s\": 1.118507 }, { \"step\": 175, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8390804597701149, \"F1\": 0.8313253012048192, \"Memory in Mb\": 0.6711845397949219, \"Time in s\": 1.4342450000000002 }, { \"step\": 200, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8341708542713567, \"F1\": 0.8253968253968254, \"Memory in Mb\": 0.7092657089233398, \"Time in s\": 1.803702 }, { \"step\": 225, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8303571428571429, \"F1\": 0.8173076923076923, \"Memory in Mb\": 0.7094945907592773, \"Time in s\": 2.2161530000000003 }, { \"step\": 250, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8273092369477911, \"F1\": 0.8154506437768241, \"Memory in Mb\": 0.7095174789428711, \"Time in s\": 2.6652750000000003 }, { \"step\": 275, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8321167883211679, \"F1\": 0.8188976377952757, \"Memory in Mb\": 0.7095861434936523, \"Time in s\": 3.1578190000000004 }, { \"step\": 300, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8394648829431438, \"F1\": 0.823529411764706, \"Memory in Mb\": 0.7096090316772461, \"Time in s\": 3.688734 }, { \"step\": 325, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Phishing\", \"Accuracy\": 0.845679012345679, \"F1\": 0.8263888888888888, \"Memory in Mb\": 0.7096090316772461, \"Time in s\": 4.255895000000001 }, { \"step\": 350, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8510028653295129, \"F1\": 0.8289473684210527, \"Memory in Mb\": 0.7096090316772461, \"Time in s\": 4.8554520000000005 }, { \"step\": 375, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8502673796791443, \"F1\": 0.8260869565217391, \"Memory in Mb\": 0.7095823287963867, \"Time in s\": 5.4957590000000005 }, { \"step\": 400, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Phishing\", \"Accuracy\": 0.849624060150376, \"F1\": 0.8235294117647061, \"Memory in Mb\": 0.7096090316772461, \"Time in s\": 6.1750370000000006 }, { \"step\": 425, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8561320754716981, \"F1\": 0.8271954674220963, \"Memory in Mb\": 0.7096090316772461, \"Time in s\": 6.8921660000000005 }, { \"step\": 450, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8530066815144766, \"F1\": 0.8225806451612903, \"Memory in Mb\": 0.7096090316772461, \"Time in s\": 7.648219 }, { \"step\": 475, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8523206751054853, \"F1\": 0.8241206030150755, \"Memory in Mb\": 0.7096090316772461, \"Time in s\": 8.452871 }, { \"step\": 500, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8557114228456913, \"F1\": 0.8317757009345793, \"Memory in Mb\": 0.7096090316772461, \"Time in s\": 9.291999 }, { \"step\": 525, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8530534351145038, \"F1\": 0.8253968253968255, \"Memory in Mb\": 0.7096090316772461, \"Time in s\": 10.171949 }, { \"step\": 550, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8579234972677595, \"F1\": 0.832618025751073, \"Memory in Mb\": 0.7096090316772461, \"Time in s\": 11.089958 }, { \"step\": 575, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8588850174216028, \"F1\": 0.8336755646817249, \"Memory in Mb\": 0.7096090316772461, \"Time in s\": 12.050434 }, { \"step\": 600, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8631051752921536, \"F1\": 0.8360000000000001, \"Memory in Mb\": 0.7096090316772461, \"Time in s\": 13.044778 }, { \"step\": 625, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8621794871794872, \"F1\": 0.83203125, \"Memory in Mb\": 0.7096090316772461, \"Time in s\": 14.08632 }, { \"step\": 650, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8659476117103235, \"F1\": 0.8391866913123845, \"Memory in Mb\": 0.7096319198608398, \"Time in s\": 15.163796 }, { \"step\": 675, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8679525222551929, \"F1\": 0.8446771378708552, \"Memory in Mb\": 0.7096319198608398, \"Time in s\": 16.281047 }, { \"step\": 700, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8726752503576538, \"F1\": 0.848381601362862, \"Memory in Mb\": 0.7096319198608398, \"Time in s\": 17.426965000000003 }, { \"step\": 725, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8756906077348067, \"F1\": 0.8543689320388349, \"Memory in Mb\": 0.7096319198608398, \"Time in s\": 18.607993000000004 }, { \"step\": 750, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Phishing\", \"Accuracy\": 0.87716955941255, \"F1\": 0.8566978193146417, \"Memory in Mb\": 0.7096319198608398, \"Time in s\": 19.829159000000004 }, { \"step\": 775, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8785529715762274, \"F1\": 0.8575757575757577, \"Memory in Mb\": 0.7096319198608398, \"Time in s\": 21.096679000000005 }, { \"step\": 800, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8785982478097623, \"F1\": 0.8592162554426704, \"Memory in Mb\": 0.7489309310913086, \"Time in s\": 22.41538000000001 }, { \"step\": 825, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8798543689320388, \"F1\": 0.8619246861924686, \"Memory in Mb\": 0.7852392196655273, \"Time in s\": 23.779028000000007 }, { \"step\": 850, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8798586572438163, \"F1\": 0.8614130434782608, \"Memory in Mb\": 0.7852849960327148, \"Time in s\": 25.18231100000001 }, { \"step\": 875, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8787185354691075, \"F1\": 0.8594164456233422, \"Memory in Mb\": 0.7853536605834961, \"Time in s\": 26.619257000000005 }, { \"step\": 900, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8787541713014461, \"F1\": 0.8589909443725743, \"Memory in Mb\": 0.7853765487670898, \"Time in s\": 28.105865000000005 }, { \"step\": 925, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8809523809523809, \"F1\": 0.8628428927680798, \"Memory in Mb\": 0.7853765487670898, \"Time in s\": 29.627597000000005 }, { \"step\": 950, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8798735511064278, \"F1\": 0.8629807692307693, \"Memory in Mb\": 0.7853765487670898, \"Time in s\": 31.194797000000005 }, { \"step\": 975, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8819301848049281, \"F1\": 0.8651817116060961, \"Memory in Mb\": 0.7853765487670898, \"Time in s\": 32.795573000000005 }, { \"step\": 1000, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8828828828828829, \"F1\": 0.8662857142857143, \"Memory in Mb\": 0.7870550155639648, \"Time in s\": 34.450223 }, { \"step\": 1025, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8828125, \"F1\": 0.8666666666666666, \"Memory in Mb\": 0.8609609603881836, \"Time in s\": 36.14349 }, { \"step\": 1050, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8846520495710201, \"F1\": 0.8691891891891892, \"Memory in Mb\": 0.8610067367553711, \"Time in s\": 37.870798 }, { \"step\": 1075, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8836126629422719, \"F1\": 0.8691099476439791, \"Memory in Mb\": 0.8625936508178711, \"Time in s\": 39.648817 }, { \"step\": 1100, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8844404003639672, \"F1\": 0.8702757916241062, \"Memory in Mb\": 0.8627080917358398, \"Time in s\": 41.461037000000005 }, { \"step\": 1125, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8861209964412812, \"F1\": 0.8732673267326733, \"Memory in Mb\": 0.8989248275756836, \"Time in s\": 43.309543000000005 }, { \"step\": 1150, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8842471714534378, \"F1\": 0.8707482993197277, \"Memory in Mb\": 0.8989477157592773, \"Time in s\": 45.19996400000001 }, { \"step\": 1175, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8816013628620102, \"F1\": 0.8677450047573739, \"Memory in Mb\": 0.8990621566772461, \"Time in s\": 47.14264000000001 }, { \"step\": 1200, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8798999165971643, \"F1\": 0.8654205607476635, \"Memory in Mb\": 0.8990621566772461, \"Time in s\": 49.13049000000001 }, { \"step\": 1225, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Phishing\", \"Accuracy\": 0.880718954248366, \"F1\": 0.8660550458715598, \"Memory in Mb\": 0.8990850448608398, \"Time in s\": 51.16391500000001 }, { \"step\": 1250, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8783026421136909, \"F1\": 0.8635547576301617, \"Memory in Mb\": 0.8991079330444336, \"Time in s\": 53.25500000000001 }, { \"step\": 1903, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.1564722061157226, \"Time in s\": 0.853094 }, { \"step\": 3806, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.1565408706665039, \"Time in s\": 2.059998 }, { \"step\": 5709, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.1564950942993164, \"Time in s\": 3.616244 }, { \"step\": 7612, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.1565179824829101, \"Time in s\": 5.5300910000000005 }, { \"step\": 9515, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.1565179824829101, \"Time in s\": 7.803705000000001 }, { \"step\": 11418, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.1564950942993164, \"Time in s\": 10.437897 }, { \"step\": 13321, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.1565637588500976, \"Time in s\": 13.429757000000002 }, { \"step\": 15224, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996715496288512, \"F1\": 0.761904761904762, \"Memory in Mb\": 0.3805198669433594, \"Time in s\": 17.782309 }, { \"step\": 17127, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997080462454748, \"F1\": 0.8, \"Memory in Mb\": 0.3727455139160156, \"Time in s\": 22.84111 }, { \"step\": 19030, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997372431551842, \"F1\": 0.8, \"Memory in Mb\": 0.3727455139160156, \"Time in s\": 28.521633 }, { \"step\": 20933, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997611312822472, \"F1\": 0.8, \"Memory in Mb\": 0.3727455139160156, \"Time in s\": 34.826752 }, { \"step\": 22836, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997810378804468, \"F1\": 0.8, \"Memory in Mb\": 0.3727455139160156, \"Time in s\": 41.750952 }, { \"step\": 24739, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997978818012774, \"F1\": 0.8, \"Memory in Mb\": 0.3649024963378906, \"Time in s\": 49.282837 }, { \"step\": 26642, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998123193573816, \"F1\": 0.8148148148148148, \"Memory in Mb\": 0.4159393310546875, \"Time in s\": 57.542832 }, { \"step\": 28545, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998248318385652, \"F1\": 0.8148148148148148, \"Memory in Mb\": 0.4159393310546875, \"Time in s\": 66.418254 }, { \"step\": 30448, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998357802082308, \"F1\": 0.8148148148148148, \"Memory in Mb\": 0.4159393310546875, \"Time in s\": 75.902957 }, { \"step\": 32351, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998454404945905, \"F1\": 0.8148148148148148, \"Memory in Mb\": 0.425140380859375, \"Time in s\": 86.000011 }, { \"step\": 34254, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998540273844628, \"F1\": 0.8148148148148148, \"Memory in Mb\": 0.4250946044921875, \"Time in s\": 96.707721 }, { \"step\": 36157, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999861710366191, \"F1\": 0.8148148148148148, \"Memory in Mb\": 0.4250030517578125, \"Time in s\": 108.022851 }, { \"step\": 38060, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998686250295594, \"F1\": 0.8148148148148148, \"Memory in Mb\": 0.417205810546875, \"Time in s\": 119.949225 }, { \"step\": 39963, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998748811370802, \"F1\": 0.8148148148148148, \"Memory in Mb\": 0.417205810546875, \"Time in s\": 132.490409 }, { \"step\": 41866, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998805684939688, \"F1\": 0.8148148148148148, \"Memory in Mb\": 0.417205810546875, \"Time in s\": 145.647858 }, { \"step\": 43769, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998857612867847, \"F1\": 0.8148148148148148, \"Memory in Mb\": 0.4172286987304687, \"Time in s\": 159.41857399999998 }, { \"step\": 45672, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998905213373912, \"F1\": 0.8148148148148148, \"Memory in Mb\": 0.4172286987304687, \"Time in s\": 173.80738499999998 }, { \"step\": 47575, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998738806911338, \"F1\": 0.7857142857142857, \"Memory in Mb\": 0.4640121459960937, \"Time in s\": 189.004291 }, { \"step\": 49478, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998787315318228, \"F1\": 0.7857142857142857, \"Memory in Mb\": 0.4561920166015625, \"Time in s\": 204.850148 }, { \"step\": 51381, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998637602179836, \"F1\": 0.787878787878788, \"Memory in Mb\": 0.5220794677734375, \"Time in s\": 221.427526 }, { \"step\": 53284, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998686260158024, \"F1\": 0.787878787878788, \"Memory in Mb\": 0.5220794677734375, \"Time in s\": 238.605883 }, { \"step\": 55187, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998550356974596, \"F1\": 0.7647058823529411, \"Memory in Mb\": 0.56781005859375, \"Time in s\": 256.499342 }, { \"step\": 57090, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9995445707579392, \"F1\": 0.5666666666666667, \"Memory in Mb\": 1.0016555786132812, \"Time in s\": 277.003919 }, { \"step\": 58993, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9995592622728504, \"F1\": 0.5666666666666667, \"Memory in Mb\": 1.056976318359375, \"Time in s\": 298.475709 }, { \"step\": 60896, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999573035553001, \"F1\": 0.5666666666666667, \"Memory in Mb\": 1.0613327026367188, \"Time in s\": 320.766705 }, { \"step\": 62799, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9995382018535622, \"F1\": 0.5538461538461538, \"Memory in Mb\": 1.28204345703125, \"Time in s\": 344.04407 }, { \"step\": 64702, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9995517843619108, \"F1\": 0.5538461538461538, \"Memory in Mb\": 1.2822036743164062, \"Time in s\": 368.015979 }, { \"step\": 66605, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999549576602006, \"F1\": 0.5454545454545455, \"Memory in Mb\": 1.2944259643554688, \"Time in s\": 392.68268599999993 }, { \"step\": 68508, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999562088545696, \"F1\": 0.5714285714285714, \"Memory in Mb\": 1.2944488525390625, \"Time in s\": 418.04215199999993 }, { \"step\": 70411, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9995739241585002, \"F1\": 0.5714285714285714, \"Memory in Mb\": 1.2945404052734375, \"Time in s\": 444.072429 }, { \"step\": 72314, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999571308063557, \"F1\": 0.5633802816901408, \"Memory in Mb\": 1.2945404052734375, \"Time in s\": 470.781043 }, { \"step\": 74217, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9995823003126012, \"F1\": 0.5633802816901408, \"Memory in Mb\": 1.294586181640625, \"Time in s\": 498.253604 }, { \"step\": 76120, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9995927429419724, \"F1\": 0.5633802816901408, \"Memory in Mb\": 1.2867202758789062, \"Time in s\": 526.392846 }, { \"step\": 78023, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9995898592704622, \"F1\": 0.5555555555555556, \"Memory in Mb\": 1.2990570068359375, \"Time in s\": 555.302544 }, { \"step\": 79926, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9995996246481076, \"F1\": 0.5555555555555556, \"Memory in Mb\": 1.2990341186523438, \"Time in s\": 584.881396 }, { \"step\": 81829, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996089358165908, \"F1\": 0.5555555555555556, \"Memory in Mb\": 1.2991256713867188, \"Time in s\": 615.117937 }, { \"step\": 83732, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996178237450885, \"F1\": 0.5555555555555556, \"Memory in Mb\": 1.2991943359375, \"Time in s\": 646.0958009999999 }, { \"step\": 85635, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996263166499288, \"F1\": 0.5555555555555556, \"Memory in Mb\": 1.2991714477539062, \"Time in s\": 677.7414719999999 }, { \"step\": 87538, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996344402938186, \"F1\": 0.5555555555555556, \"Memory in Mb\": 1.2991714477539062, \"Time in s\": 710.050307 }, { \"step\": 89441, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996086762075134, \"F1\": 0.5333333333333333, \"Memory in Mb\": 1.4483489990234375, \"Time in s\": 743.2331419999999 }, { \"step\": 91344, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999616828875776, \"F1\": 0.5333333333333333, \"Memory in Mb\": 1.4483261108398438, \"Time in s\": 777.273881 }, { \"step\": 93247, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996139244578855, \"F1\": 0.5263157894736841, \"Memory in Mb\": 1.4622306823730469, \"Time in s\": 812.316683 }, { \"step\": 95150, \"track\": \"Binary classification\", \"model\": \"AdaBoost\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996216460498796, \"F1\": 0.5263157894736841, \"Memory in Mb\": 1.4664268493652344, \"Time in s\": 848.066126 }, { \"step\": 106, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.4857142857142857, \"F1\": 0.4599999999999999, \"Memory in Mb\": 0.2364511489868164, \"Time in s\": 0.160512 }, { \"step\": 212, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5165876777251185, \"F1\": 0.4574468085106383, \"Memory in Mb\": 0.2372446060180664, \"Time in s\": 0.465512 }, { \"step\": 318, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5205047318611987, \"F1\": 0.4722222222222222, \"Memory in Mb\": 0.2378625869750976, \"Time in s\": 0.912656 }, { \"step\": 424, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5460992907801419, \"F1\": 0.4838709677419355, \"Memory in Mb\": 0.2379693984985351, \"Time in s\": 1.508959 }, { \"step\": 530, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.55765595463138, \"F1\": 0.455813953488372, \"Memory in Mb\": 0.2379922866821289, \"Time in s\": 2.237684 }, { \"step\": 636, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5543307086614173, \"F1\": 0.4259634888438134, \"Memory in Mb\": 0.2384653091430664, \"Time in s\": 3.113606 }, { \"step\": 742, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5748987854251012, \"F1\": 0.4220183486238532, \"Memory in Mb\": 0.2386941909790039, \"Time in s\": 4.124442 }, { \"step\": 848, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5785123966942148, \"F1\": 0.4232633279483037, \"Memory in Mb\": 0.2386255264282226, \"Time in s\": 5.284904 }, { \"step\": 954, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5844700944386149, \"F1\": 0.4193548387096774, \"Memory in Mb\": 0.2386026382446289, \"Time in s\": 6.590759 }, { \"step\": 1060, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5920679886685553, \"F1\": 0.4146341463414634, \"Memory in Mb\": 0.2383584976196289, \"Time in s\": 8.045558 }, { \"step\": 1166, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.590557939914163, \"F1\": 0.4015056461731493, \"Memory in Mb\": 0.2384576797485351, \"Time in s\": 9.640389 }, { \"step\": 1272, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5971675845790716, \"F1\": 0.4101382488479262, \"Memory in Mb\": 0.2387628555297851, \"Time in s\": 11.37949 }, { \"step\": 1378, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.599128540305011, \"F1\": 0.3973799126637554, \"Memory in Mb\": 0.2390680313110351, \"Time in s\": 13.26876 }, { \"step\": 1484, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5994605529332434, \"F1\": 0.3926380368098159, \"Memory in Mb\": 0.2390222549438476, \"Time in s\": 15.320885 }, { \"step\": 1590, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5997482693517936, \"F1\": 0.3896353166986563, \"Memory in Mb\": 0.2389993667602539, \"Time in s\": 17.530882 }, { \"step\": 1696, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6011799410029498, \"F1\": 0.3876811594202898, \"Memory in Mb\": 0.2390604019165039, \"Time in s\": 19.899158 }, { \"step\": 1802, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6013325930038868, \"F1\": 0.3904923599320882, \"Memory in Mb\": 0.2390832901000976, \"Time in s\": 22.423363 }, { \"step\": 1908, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6030414263240692, \"F1\": 0.396812749003984, \"Memory in Mb\": 0.2390832901000976, \"Time in s\": 25.102833 }, { \"step\": 2014, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5986090412319921, \"F1\": 0.3961136023916292, \"Memory in Mb\": 0.2390832901000976, \"Time in s\": 27.945051 }, { \"step\": 2120, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5969797074091553, \"F1\": 0.3994374120956399, \"Memory in Mb\": 0.2390832901000976, \"Time in s\": 30.943527 }, { \"step\": 2226, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.597752808988764, \"F1\": 0.4013377926421405, \"Memory in Mb\": 0.2390375137329101, \"Time in s\": 34.101409999999994 }, { \"step\": 2332, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5988845988845989, \"F1\": 0.4033184428844926, \"Memory in Mb\": 0.2390985488891601, \"Time in s\": 37.41135799999999 }, { \"step\": 2438, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5995075913007797, \"F1\": 0.4019607843137255, \"Memory in Mb\": 0.2391214370727539, \"Time in s\": 40.87347599999999 }, { \"step\": 2544, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6008651199370821, \"F1\": 0.4088526499708794, \"Memory in Mb\": 0.2394876480102539, \"Time in s\": 44.48936199999999 }, { \"step\": 2650, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6002265005662514, \"F1\": 0.4073866815892558, \"Memory in Mb\": 0.2396936416625976, \"Time in s\": 48.25778699999999 }, { \"step\": 2756, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5985480943738657, \"F1\": 0.4028077753779697, \"Memory in Mb\": 0.2396936416625976, \"Time in s\": 52.17665199999999 }, { \"step\": 2862, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.599790283117791, \"F1\": 0.4051948051948052, \"Memory in Mb\": 0.2396936416625976, \"Time in s\": 56.248781 }, { \"step\": 2968, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.599932591843613, \"F1\": 0.4026170105686965, \"Memory in Mb\": 0.2397165298461914, \"Time in s\": 60.47524299999999 }, { \"step\": 3074, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5977871786527823, \"F1\": 0.4023210831721469, \"Memory in Mb\": 0.2397165298461914, \"Time in s\": 64.856117 }, { \"step\": 3180, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5986159169550173, \"F1\": 0.4042950513538749, \"Memory in Mb\": 0.2397165298461914, \"Time in s\": 69.39380299999999 }, { \"step\": 3286, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5981735159817352, \"F1\": 0.4021739130434782, \"Memory in Mb\": 0.23760986328125, \"Time in s\": 74.079456 }, { \"step\": 3392, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5959893836626364, \"F1\": 0.4022687609075043, \"Memory in Mb\": 0.3125686645507812, \"Time in s\": 78.938547 }, { \"step\": 3498, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.597369173577352, \"F1\": 0.4023769100169779, \"Memory in Mb\": 0.3672904968261719, \"Time in s\": 83.975893 }, { \"step\": 3604, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6008881487649181, \"F1\": 0.4087171052631579, \"Memory in Mb\": 0.3972740173339844, \"Time in s\": 89.206915 }, { \"step\": 3710, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6012402264761392, \"F1\": 0.4086365453818472, \"Memory in Mb\": 0.4523735046386719, \"Time in s\": 94.644851 }, { \"step\": 3816, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6023591087811271, \"F1\": 0.4104158569762923, \"Memory in Mb\": 0.4865531921386719, \"Time in s\": 100.28768 }, { \"step\": 3922, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6052027543993879, \"F1\": 0.4145234493192133, \"Memory in Mb\": 0.5345191955566406, \"Time in s\": 106.160683 }, { \"step\": 4028, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.608393344921778, \"F1\": 0.4195804195804196, \"Memory in Mb\": 0.5653800964355469, \"Time in s\": 112.191076 }, { \"step\": 4134, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6121461408178079, \"F1\": 0.4260651629072682, \"Memory in Mb\": 0.5808448791503906, \"Time in s\": 118.35405 }, { \"step\": 4240, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6157112526539278, \"F1\": 0.4329968673860076, \"Memory in Mb\": 0.5852127075195312, \"Time in s\": 124.637983 }, { \"step\": 4346, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6193325661680092, \"F1\": 0.438560760353021, \"Memory in Mb\": 0.6001129150390625, \"Time in s\": 131.04469 }, { \"step\": 4452, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6218827229835991, \"F1\": 0.4421610871726881, \"Memory in Mb\": 0.6065597534179688, \"Time in s\": 137.573932 }, { \"step\": 4558, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6219003730524468, \"F1\": 0.4429356611703847, \"Memory in Mb\": 0.6456527709960938, \"Time in s\": 144.226405 }, { \"step\": 4664, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.623203945957538, \"F1\": 0.4455664247396655, \"Memory in Mb\": 0.6509552001953125, \"Time in s\": 151.005134 }, { \"step\": 4770, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6250786328370728, \"F1\": 0.446096654275093, \"Memory in Mb\": 0.7009811401367188, \"Time in s\": 157.911799 }, { \"step\": 4876, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6266666666666667, \"F1\": 0.4468085106382978, \"Memory in Mb\": 0.7141494750976562, \"Time in s\": 164.949721 }, { \"step\": 4982, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.629592451314997, \"F1\": 0.4530091906314853, \"Memory in Mb\": 0.73150634765625, \"Time in s\": 172.116039 }, { \"step\": 5088, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6298407705917043, \"F1\": 0.4527753560011624, \"Memory in Mb\": 0.7153549194335938, \"Time in s\": 179.401331 }, { \"step\": 5194, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6321971885230118, \"F1\": 0.456459874786568, \"Memory in Mb\": 0.7158050537109375, \"Time in s\": 186.806087 }, { \"step\": 5300, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6340819022457067, \"F1\": 0.4594368553108447, \"Memory in Mb\": 0.7224349975585938, \"Time in s\": 194.332292 }, { \"step\": 906, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8629834254143647, \"F1\": 0.8663793103448276, \"Memory in Mb\": 1.7905378341674805, \"Time in s\": 2.379335 }, { \"step\": 1812, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8884594146880177, \"F1\": 0.8674540682414698, \"Memory in Mb\": 2.571917533874512, \"Time in s\": 6.1243490000000005 }, { \"step\": 2718, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8759661391240339, \"F1\": 0.8536691272253583, \"Memory in Mb\": 2.0127248764038086, \"Time in s\": 11.743707 }, { \"step\": 3624, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8843499861992824, \"F1\": 0.8625778943916038, \"Memory in Mb\": 2.5987844467163086, \"Time in s\": 18.955423 }, { \"step\": 4530, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8869507617575624, \"F1\": 0.8584070796460177, \"Memory in Mb\": 3.065375328063965, \"Time in s\": 27.785827 }, { \"step\": 5436, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8833486660533578, \"F1\": 0.8529684601113172, \"Memory in Mb\": 2.4308347702026367, \"Time in s\": 38.240294 }, { \"step\": 6342, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.882983756505283, \"F1\": 0.8526023043305523, \"Memory in Mb\": 2.7551145553588867, \"Time in s\": 50.337362 }, { \"step\": 7248, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8842279563957499, \"F1\": 0.8536032106089687, \"Memory in Mb\": 2.720797538757324, \"Time in s\": 63.955505 }, { \"step\": 8154, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8842143996075065, \"F1\": 0.8612172890326375, \"Memory in Mb\": 2.593207359313965, \"Time in s\": 79.221878 }, { \"step\": 9060, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8847554917761342, \"F1\": 0.8655678599021375, \"Memory in Mb\": 2.3350706100463867, \"Time in s\": 95.984043 }, { \"step\": 9966, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.88509784244857, \"F1\": 0.8683454064619983, \"Memory in Mb\": 2.604697227478028, \"Time in s\": 114.346975 }, { \"step\": 10872, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8848312022812989, \"F1\": 0.8691745036572621, \"Memory in Mb\": 2.73319149017334, \"Time in s\": 134.274448 }, { \"step\": 11778, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.88222807166511, \"F1\": 0.8655877507510417, \"Memory in Mb\": 2.982542991638184, \"Time in s\": 155.860777 }, { \"step\": 12684, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.878498777891666, \"F1\": 0.8616572403267798, \"Memory in Mb\": 2.5146703720092773, \"Time in s\": 179.01547000000002 }, { \"step\": 13590, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8796820958127898, \"F1\": 0.8645738424583782, \"Memory in Mb\": 2.628962516784668, \"Time in s\": 203.671512 }, { \"step\": 14496, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8797516384960331, \"F1\": 0.8652701553683234, \"Memory in Mb\": 2.473284721374512, \"Time in s\": 229.977136 }, { \"step\": 15402, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8800077917018375, \"F1\": 0.8645558487247141, \"Memory in Mb\": 2.1737966537475586, \"Time in s\": 257.791658 }, { \"step\": 16308, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8778438707303612, \"F1\": 0.861262014208107, \"Memory in Mb\": 2.5011510848999023, \"Time in s\": 287.339735 }, { \"step\": 17214, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8773601347818509, \"F1\": 0.8583506676508086, \"Memory in Mb\": 2.443587303161621, \"Time in s\": 318.628337 }, { \"step\": 18120, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8774214912522766, \"F1\": 0.8581827469510249, \"Memory in Mb\": 2.8467397689819336, \"Time in s\": 351.823445 }, { \"step\": 19026, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8781603153745072, \"F1\": 0.8573362875430822, \"Memory in Mb\": 2.840205192565918, \"Time in s\": 386.938658 }, { \"step\": 19932, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8755707189804827, \"F1\": 0.8552923328276345, \"Memory in Mb\": 2.7734594345092773, \"Time in s\": 424.419923 }, { \"step\": 20838, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8726784086000864, \"F1\": 0.8517959890508909, \"Memory in Mb\": 3.382327079772949, \"Time in s\": 464.42461 }, { \"step\": 21744, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8735225129926873, \"F1\": 0.8505921981962403, \"Memory in Mb\": 2.277277946472168, \"Time in s\": 506.250356 }, { \"step\": 22650, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8713850501125877, \"F1\": 0.8461741564133707, \"Memory in Mb\": 2.6045217514038086, \"Time in s\": 549.67693 }, { \"step\": 23556, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8655062619401401, \"F1\": 0.8378212347701444, \"Memory in Mb\": 2.0687971115112305, \"Time in s\": 595.047504 }, { \"step\": 24462, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8629246555741793, \"F1\": 0.8334740501614105, \"Memory in Mb\": 1.965815544128418, \"Time in s\": 642.194018 }, { \"step\": 25368, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8587929199353491, \"F1\": 0.8286944045911048, \"Memory in Mb\": 1.5182180404663086, \"Time in s\": 691.397121 }, { \"step\": 26274, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8580672172953222, \"F1\": 0.8274010645683869, \"Memory in Mb\": 1.035365104675293, \"Time in s\": 742.026028 }, { \"step\": 27180, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8578682070716361, \"F1\": 0.8276446705037256, \"Memory in Mb\": 1.274672508239746, \"Time in s\": 793.972348 }, { \"step\": 28086, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8549403596225743, \"F1\": 0.8230542043085476, \"Memory in Mb\": 1.6423864364624023, \"Time in s\": 847.62915 }, { \"step\": 28992, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8532648063192025, \"F1\": 0.8196846388606307, \"Memory in Mb\": 2.145480155944824, \"Time in s\": 903.499383 }, { \"step\": 29898, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8507208081078369, \"F1\": 0.8162391402808086, \"Memory in Mb\": 1.650496482849121, \"Time in s\": 961.52369 }, { \"step\": 30804, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8489108203746388, \"F1\": 0.812746439204957, \"Memory in Mb\": 1.6065664291381836, \"Time in s\": 1021.4058 }, { \"step\": 31710, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8464789176574474, \"F1\": 0.8088731841382018, \"Memory in Mb\": 1.891444206237793, \"Time in s\": 1083.110109 }, { \"step\": 32616, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8443660892227502, \"F1\": 0.8061855670103092, \"Memory in Mb\": 1.5838193893432615, \"Time in s\": 1146.603723 }, { \"step\": 33522, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8444258822827482, \"F1\": 0.8069876753395758, \"Memory in Mb\": 2.1811208724975586, \"Time in s\": 1211.933286 }, { \"step\": 34428, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8422168646701716, \"F1\": 0.8034590057167668, \"Memory in Mb\": 2.362921714782715, \"Time in s\": 1279.642459 }, { \"step\": 35334, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8416777516769026, \"F1\": 0.8019121813031161, \"Memory in Mb\": 2.295699119567871, \"Time in s\": 1349.6385269999998 }, { \"step\": 36240, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8424625403570739, \"F1\": 0.8013915463558879, \"Memory in Mb\": 2.210890769958496, \"Time in s\": 1421.6820649999995 }, { \"step\": 37146, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8423206353479606, \"F1\": 0.8005720317341414, \"Memory in Mb\": 1.5720243453979492, \"Time in s\": 1496.1115339999997 }, { \"step\": 38052, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8416335970145331, \"F1\": 0.7984750183934186, \"Memory in Mb\": 1.657557487487793, \"Time in s\": 1572.0838559999995 }, { \"step\": 38958, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8402854429242498, \"F1\": 0.7969321148825065, \"Memory in Mb\": 1.542536735534668, \"Time in s\": 1649.6649279999997 }, { \"step\": 39864, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8404786393397387, \"F1\": 0.7989503303929938, \"Memory in Mb\": 1.814925193786621, \"Time in s\": 1728.6718509999996 }, { \"step\": 40770, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8419141995143369, \"F1\": 0.8025852298832972, \"Memory in Mb\": 1.6994237899780271, \"Time in s\": 1809.157296 }, { \"step\": 41676, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8428554289142172, \"F1\": 0.8053036834438267, \"Memory in Mb\": 2.179030418395996, \"Time in s\": 1890.982895 }, { \"step\": 42582, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8435217585308001, \"F1\": 0.8066061010652193, \"Memory in Mb\": 2.626959800720215, \"Time in s\": 1974.3648729999995 }, { \"step\": 43488, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8437004162163405, \"F1\": 0.8069418013463233, \"Memory in Mb\": 2.851019859313965, \"Time in s\": 2059.407496 }, { \"step\": 44394, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8424751650034915, \"F1\": 0.804298547561078, \"Memory in Mb\": 3.612540245056152, \"Time in s\": 2146.572713 }, { \"step\": 45300, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8419391156537672, \"F1\": 0.8040932472365109, \"Memory in Mb\": 3.2033262252807617, \"Time in s\": 2236.8483089999995 }, { \"step\": 25, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.7083333333333334, \"F1\": 0.7407407407407408, \"Memory in Mb\": 0.7285165786743164, \"Time in s\": 0.081182 }, { \"step\": 50, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8163265306122449, \"F1\": 0.8085106382978724, \"Memory in Mb\": 0.7291955947875977, \"Time in s\": 0.246708 }, { \"step\": 75, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8513513513513513, \"F1\": 0.8493150684931507, \"Memory in Mb\": 0.7295160293579102, \"Time in s\": 0.489882 }, { \"step\": 100, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8585858585858586, \"F1\": 0.8541666666666666, \"Memory in Mb\": 0.7297601699829102, \"Time in s\": 0.81137 }, { \"step\": 125, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8548387096774194, \"F1\": 0.85, \"Memory in Mb\": 0.7297601699829102, \"Time in s\": 1.210797 }, { \"step\": 150, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8523489932885906, \"F1\": 0.8533333333333335, \"Memory in Mb\": 0.7300043106079102, \"Time in s\": 1.687212 }, { \"step\": 175, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8620689655172413, \"F1\": 0.8536585365853658, \"Memory in Mb\": 0.7303934097290039, \"Time in s\": 2.243398 }, { \"step\": 200, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8592964824120602, \"F1\": 0.8510638297872339, \"Memory in Mb\": 0.7305307388305664, \"Time in s\": 2.888269 }, { \"step\": 225, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8526785714285714, \"F1\": 0.8405797101449276, \"Memory in Mb\": 0.77154541015625, \"Time in s\": 3.617105000000001 }, { \"step\": 250, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8473895582329317, \"F1\": 0.8347826086956521, \"Memory in Mb\": 0.7995452880859375, \"Time in s\": 4.427161000000001 }, { \"step\": 275, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8467153284671532, \"F1\": 0.8333333333333335, \"Memory in Mb\": 0.799774169921875, \"Time in s\": 5.319298000000001 }, { \"step\": 300, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8528428093645485, \"F1\": 0.837037037037037, \"Memory in Mb\": 0.799957275390625, \"Time in s\": 6.288776 }, { \"step\": 325, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8611111111111112, \"F1\": 0.8421052631578947, \"Memory in Mb\": 0.800323486328125, \"Time in s\": 7.336677 }, { \"step\": 350, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8653295128939829, \"F1\": 0.8438538205980067, \"Memory in Mb\": 0.8004684448242188, \"Time in s\": 8.465301 }, { \"step\": 375, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8663101604278075, \"F1\": 0.8427672955974843, \"Memory in Mb\": 0.8407363891601562, \"Time in s\": 9.675054 }, { \"step\": 400, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8671679197994987, \"F1\": 0.8417910447761194, \"Memory in Mb\": 0.8817596435546875, \"Time in s\": 10.973937 }, { \"step\": 425, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8679245283018868, \"F1\": 0.839080459770115, \"Memory in Mb\": 0.937408447265625, \"Time in s\": 12.361793 }, { \"step\": 450, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8708240534521158, \"F1\": 0.8406593406593408, \"Memory in Mb\": 0.9376602172851562, \"Time in s\": 13.826239 }, { \"step\": 475, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.869198312236287, \"F1\": 0.8402061855670103, \"Memory in Mb\": 0.9379119873046876, \"Time in s\": 15.36192 }, { \"step\": 500, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8677354709418837, \"F1\": 0.8413461538461539, \"Memory in Mb\": 0.9381179809570312, \"Time in s\": 16.969597999999998 }, { \"step\": 525, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8683206106870229, \"F1\": 0.8384074941451991, \"Memory in Mb\": 0.9381790161132812, \"Time in s\": 18.650676 }, { \"step\": 550, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8670309653916212, \"F1\": 0.8381374722838136, \"Memory in Mb\": 0.9382858276367188, \"Time in s\": 20.413452 }, { \"step\": 575, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.867595818815331, \"F1\": 0.8382978723404255, \"Memory in Mb\": 0.9383468627929688, \"Time in s\": 22.261479999999995 }, { \"step\": 600, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8697829716193656, \"F1\": 0.8381742738589212, \"Memory in Mb\": 0.9384689331054688, \"Time in s\": 24.204291999999995 }, { \"step\": 625, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8717948717948718, \"F1\": 0.8373983739837398, \"Memory in Mb\": 0.9792633056640624, \"Time in s\": 26.239624 }, { \"step\": 650, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8767334360554699, \"F1\": 0.846153846153846, \"Memory in Mb\": 0.9797210693359376, \"Time in s\": 28.350959 }, { \"step\": 675, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8753709198813057, \"F1\": 0.8478260869565216, \"Memory in Mb\": 1.0075225830078125, \"Time in s\": 30.546933 }, { \"step\": 700, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8798283261802575, \"F1\": 0.8515901060070671, \"Memory in Mb\": 0.9475822448730468, \"Time in s\": 32.825613999999995 }, { \"step\": 725, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8825966850828729, \"F1\": 0.8576214405360134, \"Memory in Mb\": 1.0479621887207031, \"Time in s\": 35.192032999999995 }, { \"step\": 750, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8865153538050734, \"F1\": 0.8631239935587761, \"Memory in Mb\": 1.0882606506347656, \"Time in s\": 37.652957 }, { \"step\": 775, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8875968992248062, \"F1\": 0.863849765258216, \"Memory in Mb\": 1.1435890197753906, \"Time in s\": 40.206814 }, { \"step\": 800, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8873591989987485, \"F1\": 0.8652694610778443, \"Memory in Mb\": 1.2515907287597656, \"Time in s\": 42.859537 }, { \"step\": 825, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8871359223300971, \"F1\": 0.8661870503597122, \"Memory in Mb\": 1.2530021667480469, \"Time in s\": 45.593545 }, { \"step\": 850, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8881036513545347, \"F1\": 0.8671328671328671, \"Memory in Mb\": 1.266559600830078, \"Time in s\": 48.414666 }, { \"step\": 875, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8901601830663616, \"F1\": 0.8688524590163934, \"Memory in Mb\": 1.294574737548828, \"Time in s\": 51.318085 }, { \"step\": 900, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8887652947719689, \"F1\": 0.8670212765957446, \"Memory in Mb\": 1.3499031066894531, \"Time in s\": 54.308453 }, { \"step\": 925, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8896103896103896, \"F1\": 0.8695652173913043, \"Memory in Mb\": 1.350116729736328, \"Time in s\": 57.385236 }, { \"step\": 950, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8893572181243414, \"F1\": 0.8708487084870848, \"Memory in Mb\": 1.3506507873535156, \"Time in s\": 60.544707 }, { \"step\": 975, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8901437371663244, \"F1\": 0.8718562874251498, \"Memory in Mb\": 1.3507575988769531, \"Time in s\": 63.787028 }, { \"step\": 1000, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8878878878878879, \"F1\": 0.8697674418604652, \"Memory in Mb\": 1.3509178161621094, \"Time in s\": 67.116503 }, { \"step\": 1025, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8876953125, \"F1\": 0.8700564971751412, \"Memory in Mb\": 1.3512077331542969, \"Time in s\": 70.53228 }, { \"step\": 1050, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8894184938036225, \"F1\": 0.8725274725274725, \"Memory in Mb\": 1.3513069152832031, \"Time in s\": 74.046134 }, { \"step\": 1075, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8901303538175046, \"F1\": 0.8742004264392325, \"Memory in Mb\": 1.3514289855957031, \"Time in s\": 77.61240799999999 }, { \"step\": 1100, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.89171974522293, \"F1\": 0.8761706555671176, \"Memory in Mb\": 1.3517951965332031, \"Time in s\": 81.22848299999998 }, { \"step\": 1125, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8932384341637011, \"F1\": 0.8790322580645162, \"Memory in Mb\": 1.3518562316894531, \"Time in s\": 84.88876699999999 }, { \"step\": 1150, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8938207136640557, \"F1\": 0.8794466403162056, \"Memory in Mb\": 1.3518562316894531, \"Time in s\": 88.60766799999999 }, { \"step\": 1175, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8926746166950597, \"F1\": 0.877906976744186, \"Memory in Mb\": 1.3519172668457031, \"Time in s\": 92.36705899999998 }, { \"step\": 1200, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8932443703085905, \"F1\": 0.8783269961977186, \"Memory in Mb\": 1.365093231201172, \"Time in s\": 96.17394699999998 }, { \"step\": 1225, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8929738562091504, \"F1\": 0.8779123951537745, \"Memory in Mb\": 1.4202919006347656, \"Time in s\": 100.0275 }, { \"step\": 1250, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8935148118494796, \"F1\": 0.8792007266121706, \"Memory in Mb\": 1.4205055236816406, \"Time in s\": 103.925929 }, { \"step\": 1903, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.217813491821289, \"Time in s\": 1.823771 }, { \"step\": 3806, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.218423843383789, \"Time in s\": 5.582317 }, { \"step\": 5709, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.2189655303955078, \"Time in s\": 10.437191 }, { \"step\": 7612, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.2189884185791015, \"Time in s\": 16.255529 }, { \"step\": 9515, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.2189884185791015, \"Time in s\": 22.995497 }, { \"step\": 11418, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.2195987701416015, \"Time in s\": 30.599323 }, { \"step\": 13321, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.2196216583251953, \"Time in s\": 39.036327 }, { \"step\": 15224, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999343099257702, \"F1\": 0.1666666666666666, \"Memory in Mb\": 0.5055475234985352, \"Time in s\": 48.440921 }, { \"step\": 17127, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9992993109891392, \"F1\": 0.1428571428571428, \"Memory in Mb\": 0.5140314102172852, \"Time in s\": 59.356571 }, { \"step\": 19030, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999369383572442, \"F1\": 0.1428571428571428, \"Memory in Mb\": 0.5010766983032227, \"Time in s\": 71.78490500000001 }, { \"step\": 20933, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9994267150773934, \"F1\": 0.1428571428571428, \"Memory in Mb\": 0.5033426284790039, \"Time in s\": 85.737205 }, { \"step\": 22836, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999474490913072, \"F1\": 0.1428571428571428, \"Memory in Mb\": 0.5038537979125977, \"Time in s\": 101.221325 }, { \"step\": 24739, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9995149163230658, \"F1\": 0.1428571428571428, \"Memory in Mb\": 0.5166254043579102, \"Time in s\": 118.224146 }, { \"step\": 26642, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9995495664577156, \"F1\": 0.25, \"Memory in Mb\": 0.5370950698852539, \"Time in s\": 136.728077 }, { \"step\": 28545, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999579596412556, \"F1\": 0.25, \"Memory in Mb\": 0.5375986099243164, \"Time in s\": 156.75763500000002 }, { \"step\": 30448, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996058724997536, \"F1\": 0.25, \"Memory in Mb\": 0.5378046035766602, \"Time in s\": 178.30216700000005 }, { \"step\": 32351, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999629057187017, \"F1\": 0.25, \"Memory in Mb\": 0.5502328872680664, \"Time in s\": 201.36650900000004 }, { \"step\": 34254, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996496657227104, \"F1\": 0.25, \"Memory in Mb\": 0.5628290176391602, \"Time in s\": 225.93287100000003 }, { \"step\": 36157, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996681048788584, \"F1\": 0.25, \"Memory in Mb\": 0.5643777847290039, \"Time in s\": 252.01061500000003 }, { \"step\": 38060, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996847000709423, \"F1\": 0.25, \"Memory in Mb\": 0.5652093887329102, \"Time in s\": 279.629747 }, { \"step\": 39963, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996997147289926, \"F1\": 0.25, \"Memory in Mb\": 0.5653314590454102, \"Time in s\": 308.76820100000003 }, { \"step\": 41866, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997133643855248, \"F1\": 0.25, \"Memory in Mb\": 0.5653314590454102, \"Time in s\": 339.41859300000004 }, { \"step\": 43769, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997258270882836, \"F1\": 0.25, \"Memory in Mb\": 0.5653085708618164, \"Time in s\": 371.57202200000006 }, { \"step\": 45672, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997372512097392, \"F1\": 0.25, \"Memory in Mb\": 0.5653696060180664, \"Time in s\": 405.2428510000001 }, { \"step\": 47575, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997477613822676, \"F1\": 0.25, \"Memory in Mb\": 0.5838403701782227, \"Time in s\": 440.4367680000001 }, { \"step\": 49478, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997574630636458, \"F1\": 0.25, \"Memory in Mb\": 0.5960397720336914, \"Time in s\": 477.1431370000001 }, { \"step\": 51381, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997469832619696, \"F1\": 0.3157894736842105, \"Memory in Mb\": 0.6703958511352539, \"Time in s\": 515.395289 }, { \"step\": 53284, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999756019743633, \"F1\": 0.3157894736842105, \"Memory in Mb\": 0.6706399917602539, \"Time in s\": 555.1871910000001 }, { \"step\": 55187, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997644330083716, \"F1\": 0.3157894736842105, \"Memory in Mb\": 0.6832361221313477, \"Time in s\": 596.5168480000001 }, { \"step\": 57090, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996321533044896, \"F1\": 0.3225806451612903, \"Memory in Mb\": 1.1210947036743164, \"Time in s\": 639.6973030000001 }, { \"step\": 58993, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996440195280716, \"F1\": 0.3225806451612903, \"Memory in Mb\": 1.145817756652832, \"Time in s\": 684.8963280000002 }, { \"step\": 60896, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996551441005008, \"F1\": 0.3225806451612903, \"Memory in Mb\": 1.1608476638793943, \"Time in s\": 732.0726040000002 }, { \"step\": 62799, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996337462976528, \"F1\": 0.303030303030303, \"Memory in Mb\": 1.2456941604614258, \"Time in s\": 781.2786070000002 }, { \"step\": 64702, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996445186318604, \"F1\": 0.303030303030303, \"Memory in Mb\": 1.246922492980957, \"Time in s\": 832.4886640000002 }, { \"step\": 66605, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996546753948712, \"F1\": 0.303030303030303, \"Memory in Mb\": 1.2487382888793943, \"Time in s\": 885.7141210000002 }, { \"step\": 68508, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996642678850336, \"F1\": 0.3783783783783784, \"Memory in Mb\": 1.266160011291504, \"Time in s\": 940.9452050000002 }, { \"step\": 70411, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.99967334185485, \"F1\": 0.3783783783783784, \"Memory in Mb\": 1.296757698059082, \"Time in s\": 998.2257750000002 }, { \"step\": 72314, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996819382407036, \"F1\": 0.3783783783783784, \"Memory in Mb\": 1.2987031936645508, \"Time in s\": 1057.5572380000003 }, { \"step\": 74217, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996900937803168, \"F1\": 0.3783783783783784, \"Memory in Mb\": 1.2991762161254885, \"Time in s\": 1118.9337870000004 }, { \"step\": 76120, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996978415375924, \"F1\": 0.3783783783783784, \"Memory in Mb\": 1.3004274368286133, \"Time in s\": 1182.3536270000004 }, { \"step\": 78023, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997052113506448, \"F1\": 0.3783783783783784, \"Memory in Mb\": 1.3022356033325195, \"Time in s\": 1247.8338970000004 }, { \"step\": 79926, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997122302158272, \"F1\": 0.3783783783783784, \"Memory in Mb\": 1.302800178527832, \"Time in s\": 1315.3680150000005 }, { \"step\": 81829, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997189226181749, \"F1\": 0.3783783783783784, \"Memory in Mb\": 1.3035783767700195, \"Time in s\": 1384.9414330000004 }, { \"step\": 83732, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997253108167824, \"F1\": 0.3783783783783784, \"Memory in Mb\": 1.3040666580200195, \"Time in s\": 1456.5304290000004 }, { \"step\": 85635, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997314150921364, \"F1\": 0.3783783783783784, \"Memory in Mb\": 1.3194093704223633, \"Time in s\": 1530.1954770000004 }, { \"step\": 87538, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997372539611822, \"F1\": 0.3783783783783784, \"Memory in Mb\": 1.3199357986450195, \"Time in s\": 1605.9064890000004 }, { \"step\": 89441, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999731663685152, \"F1\": 0.3684210526315789, \"Memory in Mb\": 1.3624582290649414, \"Time in s\": 1683.7024010000005 }, { \"step\": 91344, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997372540862464, \"F1\": 0.3684210526315789, \"Memory in Mb\": 1.3635034561157229, \"Time in s\": 1763.5593190000004 }, { \"step\": 93247, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997426163052572, \"F1\": 0.3684210526315789, \"Memory in Mb\": 1.3640680313110352, \"Time in s\": 1845.4940930000005 }, { \"step\": 95150, \"track\": \"Binary classification\", \"model\": \"Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997477640332532, \"F1\": 0.3684210526315789, \"Memory in Mb\": 1.3650827407836914, \"Time in s\": 1929.4936330000005 }, { \"step\": 106, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5142857142857142, \"F1\": 0.4516129032258064, \"Memory in Mb\": 0.1929693222045898, \"Time in s\": 0.378114 }, { \"step\": 212, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5402843601895735, \"F1\": 0.4756756756756757, \"Memory in Mb\": 0.1935796737670898, \"Time in s\": 1.064058 }, { \"step\": 318, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5394321766561514, \"F1\": 0.4930555555555555, \"Memory in Mb\": 0.1942129135131836, \"Time in s\": 1.944827 }, { \"step\": 424, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5531914893617021, \"F1\": 0.4932975871313673, \"Memory in Mb\": 0.1941900253295898, \"Time in s\": 3.02079 }, { \"step\": 530, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5614366729678639, \"F1\": 0.4703196347031963, \"Memory in Mb\": 0.1941900253295898, \"Time in s\": 4.293626 }, { \"step\": 636, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5763779527559055, \"F1\": 0.4836852207293666, \"Memory in Mb\": 0.4277210235595703, \"Time in s\": 5.781232999999999 }, { \"step\": 742, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5991902834008097, \"F1\": 0.4940374787052811, \"Memory in Mb\": 0.5387935638427734, \"Time in s\": 7.493556 }, { \"step\": 848, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6210153482880756, \"F1\": 0.5201793721973094, \"Memory in Mb\": 0.6348705291748047, \"Time in s\": 9.45639 }, { \"step\": 954, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6411332633788038, \"F1\": 0.5464190981432361, \"Memory in Mb\": 0.7018413543701172, \"Time in s\": 11.663967 }, { \"step\": 1060, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6515580736543909, \"F1\": 0.555956678700361, \"Memory in Mb\": 0.7448062896728516, \"Time in s\": 14.117771 }, { \"step\": 1166, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6626609442060086, \"F1\": 0.5732899022801302, \"Memory in Mb\": 0.8341083526611328, \"Time in s\": 16.817314 }, { \"step\": 1272, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6766325727773407, \"F1\": 0.5958702064896755, \"Memory in Mb\": 0.8756198883056641, \"Time in s\": 19.759644 }, { \"step\": 1378, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6877269426289034, \"F1\": 0.6062271062271062, \"Memory in Mb\": 0.961111068725586, \"Time in s\": 22.944053 }, { \"step\": 1484, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6999325691166555, \"F1\": 0.6238377007607777, \"Memory in Mb\": 1.0045452117919922, \"Time in s\": 26.370908 }, { \"step\": 1590, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.7073631214600378, \"F1\": 0.6375681995323461, \"Memory in Mb\": 1.1097278594970703, \"Time in s\": 30.041027 }, { \"step\": 1696, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.7162241887905605, \"F1\": 0.6496722505462491, \"Memory in Mb\": 1.175821304321289, \"Time in s\": 33.956331 }, { \"step\": 1802, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.7262631871182677, \"F1\": 0.6662153012863914, \"Memory in Mb\": 1.262613296508789, \"Time in s\": 38.117335 }, { \"step\": 1908, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.7315154693235448, \"F1\": 0.6767676767676768, \"Memory in Mb\": 1.3344478607177734, \"Time in s\": 42.524713 }, { \"step\": 2014, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.7386984600099354, \"F1\": 0.6894923258559622, \"Memory in Mb\": 1.391103744506836, \"Time in s\": 47.18564 }, { \"step\": 2120, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.7451628126474752, \"F1\": 0.7013274336283186, \"Memory in Mb\": 1.475076675415039, \"Time in s\": 52.095729 }, { \"step\": 2226, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.7501123595505618, \"F1\": 0.7073684210526315, \"Memory in Mb\": 1.496999740600586, \"Time in s\": 57.242367 }, { \"step\": 2332, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.7550407550407551, \"F1\": 0.7143571785892947, \"Memory in Mb\": 1.516103744506836, \"Time in s\": 62.636095000000005 }, { \"step\": 2438, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.7595404185473943, \"F1\": 0.7196172248803827, \"Memory in Mb\": 1.5630512237548828, \"Time in s\": 68.252973 }, { \"step\": 2544, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.7620920173023987, \"F1\": 0.725124943207633, \"Memory in Mb\": 1.623823165893555, \"Time in s\": 74.089247 }, { \"step\": 2650, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.7674594186485466, \"F1\": 0.7326388888888887, \"Memory in Mb\": 1.6640300750732422, \"Time in s\": 80.135514 }, { \"step\": 2756, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.7727767695099819, \"F1\": 0.7391666666666666, \"Memory in Mb\": 1.7581462860107422, \"Time in s\": 86.389582 }, { \"step\": 2862, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.777001048584411, \"F1\": 0.7435691318327974, \"Memory in Mb\": 1.8173961639404297, \"Time in s\": 92.855234 }, { \"step\": 2968, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.7809234917425009, \"F1\": 0.7474747474747475, \"Memory in Mb\": 1.919931411743164, \"Time in s\": 99.529297 }, { \"step\": 3074, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.784249918646274, \"F1\": 0.7521495327102804, \"Memory in Mb\": 2.025243759155273, \"Time in s\": 106.427437 }, { \"step\": 3180, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.7889273356401384, \"F1\": 0.7567959405581733, \"Memory in Mb\": 2.059762954711914, \"Time in s\": 113.53746300000002 }, { \"step\": 3286, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.7920852359208523, \"F1\": 0.7600983491394451, \"Memory in Mb\": 2.1231555938720703, \"Time in s\": 120.860592 }, { \"step\": 3392, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.7935712179298142, \"F1\": 0.7631935047361299, \"Memory in Mb\": 2.208223342895508, \"Time in s\": 128.39806900000002 }, { \"step\": 3498, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.7963969116385473, \"F1\": 0.7653263019116677, \"Memory in Mb\": 2.2967967987060547, \"Time in s\": 136.14712200000002 }, { \"step\": 3604, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.7993338884263114, \"F1\": 0.7677481529071635, \"Memory in Mb\": 2.342061996459961, \"Time in s\": 144.10985700000003 }, { \"step\": 3710, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8015637638177406, \"F1\": 0.7710018668326073, \"Memory in Mb\": 2.4194507598876958, \"Time in s\": 152.28979100000004 }, { \"step\": 3816, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8049803407601572, \"F1\": 0.7753623188405797, \"Memory in Mb\": 2.452432632446289, \"Time in s\": 160.68369700000002 }, { \"step\": 3922, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8066819688854884, \"F1\": 0.7769276044732195, \"Memory in Mb\": 2.484903335571289, \"Time in s\": 169.29472 }, { \"step\": 4028, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8080456915818227, \"F1\": 0.7781922525107604, \"Memory in Mb\": 2.555143356323242, \"Time in s\": 178.12388700000002 }, { \"step\": 4134, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8103072828453908, \"F1\": 0.7810055865921788, \"Memory in Mb\": 2.665616989135742, \"Time in s\": 187.169884 }, { \"step\": 4240, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8131634819532909, \"F1\": 0.7845484221980414, \"Memory in Mb\": 2.698610305786133, \"Time in s\": 196.43184 }, { \"step\": 4346, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8158803222094362, \"F1\": 0.7877984084880637, \"Memory in Mb\": 2.7711353302001958, \"Time in s\": 205.91274200000004 }, { \"step\": 4452, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8173444169849472, \"F1\": 0.78932365897901, \"Memory in Mb\": 2.792703628540039, \"Time in s\": 215.61058800000004 }, { \"step\": 4558, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8183015141540487, \"F1\": 0.7909090909090909, \"Memory in Mb\": 2.8151988983154297, \"Time in s\": 225.53181000000004 }, { \"step\": 4664, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8205018228608192, \"F1\": 0.7940959409594096, \"Memory in Mb\": 2.871114730834961, \"Time in s\": 235.67375100000004 }, { \"step\": 4770, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8209268190396309, \"F1\": 0.7941176470588236, \"Memory in Mb\": 2.9113216400146484, \"Time in s\": 246.03360500000005 }, { \"step\": 4876, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.822974358974359, \"F1\": 0.7958362905133666, \"Memory in Mb\": 3.014688491821289, \"Time in s\": 256.6143220000001 }, { \"step\": 4982, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.825135514956836, \"F1\": 0.7989845372720977, \"Memory in Mb\": 3.062814712524414, \"Time in s\": 267.41563500000007 }, { \"step\": 5088, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.825437389424022, \"F1\": 0.7994579945799458, \"Memory in Mb\": 3.1556224822998047, \"Time in s\": 278.43394300000006 }, { \"step\": 5194, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8266897746967071, \"F1\": 0.800796812749004, \"Memory in Mb\": 3.260141372680664, \"Time in s\": 289.67931000000004 }, { \"step\": 5300, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8282694848084544, \"F1\": 0.8026030368763557, \"Memory in Mb\": 3.31306266784668, \"Time in s\": 301.15239700000006 }, { \"step\": 906, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8883977900552487, \"F1\": 0.8861330326944759, \"Memory in Mb\": 2.6054086685180664, \"Time in s\": 3.368733 }, { \"step\": 1812, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9127553837658752, \"F1\": 0.8939597315436243, \"Memory in Mb\": 3.465878486633301, \"Time in s\": 9.891931 }, { \"step\": 2718, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9013617960986382, \"F1\": 0.8816254416961131, \"Memory in Mb\": 3.735013008117676, \"Time in s\": 19.447972 }, { \"step\": 3624, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9100193210046924, \"F1\": 0.8914780292942742, \"Memory in Mb\": 3.9933290481567374, \"Time in s\": 31.888989 }, { \"step\": 4530, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9125634797968648, \"F1\": 0.890728476821192, \"Memory in Mb\": 3.547215461730957, \"Time in s\": 47.051529 }, { \"step\": 5436, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.910027598896044, \"F1\": 0.8870408870408871, \"Memory in Mb\": 4.102688789367676, \"Time in s\": 65.23951100000001 }, { \"step\": 6342, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9101088156442202, \"F1\": 0.8873517786561265, \"Memory in Mb\": 4.165738105773926, \"Time in s\": 86.445256 }, { \"step\": 7248, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9079619152752864, \"F1\": 0.8839798225778397, \"Memory in Mb\": 3.951657295227051, \"Time in s\": 110.708644 }, { \"step\": 8154, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9089905556236968, \"F1\": 0.8907216494845361, \"Memory in Mb\": 4.59532642364502, \"Time in s\": 137.95385399999998 }, { \"step\": 9060, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9097030577326416, \"F1\": 0.8939040207522698, \"Memory in Mb\": 4.363041877746582, \"Time in s\": 168.15601999999998 }, { \"step\": 9966, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9079779227295536, \"F1\": 0.893631829254147, \"Memory in Mb\": 4.951889991760254, \"Time in s\": 201.32076 }, { \"step\": 10872, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.90920798454604, \"F1\": 0.896399706098457, \"Memory in Mb\": 4.796502113342285, \"Time in s\": 237.348168 }, { \"step\": 11778, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9081260083213042, \"F1\": 0.8950533462657614, \"Memory in Mb\": 5.402684211730957, \"Time in s\": 276.331972 }, { \"step\": 12684, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9066466924229284, \"F1\": 0.8936972526485905, \"Memory in Mb\": 4.1159868240356445, \"Time in s\": 318.342965 }, { \"step\": 13590, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9079402457870336, \"F1\": 0.896363184491757, \"Memory in Mb\": 5.441300392150879, \"Time in s\": 363.458518 }, { \"step\": 14496, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9079682649189376, \"F1\": 0.8968131188118812, \"Memory in Mb\": 6.5885820388793945, \"Time in s\": 411.598932 }, { \"step\": 15402, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.908252710862931, \"F1\": 0.8965062623599208, \"Memory in Mb\": 5.050324440002441, \"Time in s\": 462.698224 }, { \"step\": 16308, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9068498190960936, \"F1\": 0.8945797765285585, \"Memory in Mb\": 6.200932502746582, \"Time in s\": 516.871701 }, { \"step\": 17214, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9064079474815546, \"F1\": 0.8923775803326874, \"Memory in Mb\": 6.66331958770752, \"Time in s\": 574.184961 }, { \"step\": 18120, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9061758375186268, \"F1\": 0.8919399949148233, \"Memory in Mb\": 6.39217472076416, \"Time in s\": 634.677368 }, { \"step\": 19026, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9064388961892248, \"F1\": 0.8910515362957523, \"Memory in Mb\": 7.244908332824707, \"Time in s\": 698.371295 }, { \"step\": 19932, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9069790778184738, \"F1\": 0.8925092764378478, \"Memory in Mb\": 8.84801197052002, \"Time in s\": 765.4898820000001 }, { \"step\": 20838, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9057445889523444, \"F1\": 0.8911670176216338, \"Memory in Mb\": 6.796334266662598, \"Time in s\": 836.163928 }, { \"step\": 21744, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9054408315319872, \"F1\": 0.8892837910608508, \"Memory in Mb\": 8.134293556213379, \"Time in s\": 910.125477 }, { \"step\": 22650, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9049847675394056, \"F1\": 0.8878816296759404, \"Memory in Mb\": 6.271161079406738, \"Time in s\": 987.478572 }, { \"step\": 23556, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.903077902780726, \"F1\": 0.8852244733799206, \"Memory in Mb\": 4.8961381912231445, \"Time in s\": 1068.279683 }, { \"step\": 24462, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9014349372470464, \"F1\": 0.8823845065612956, \"Memory in Mb\": 4.712262153625488, \"Time in s\": 1152.538044 }, { \"step\": 25368, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8988055347498719, \"F1\": 0.8794439487155403, \"Memory in Mb\": 4.656708717346191, \"Time in s\": 1240.2539379999998 }, { \"step\": 26274, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8987934381304, \"F1\": 0.8792625891113836, \"Memory in Mb\": 4.200020790100098, \"Time in s\": 1331.2764919999995 }, { \"step\": 27180, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8987453548695684, \"F1\": 0.8798777826276736, \"Memory in Mb\": 3.921463966369629, \"Time in s\": 1425.5964879999997 }, { \"step\": 28086, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8965283959408937, \"F1\": 0.8766762858597862, \"Memory in Mb\": 3.647244453430176, \"Time in s\": 1523.2409069999997 }, { \"step\": 28992, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8964851160705046, \"F1\": 0.8761606074361409, \"Memory in Mb\": 3.9709863662719727, \"Time in s\": 1624.2218739999996 }, { \"step\": 29898, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8961434257617821, \"F1\": 0.8756059452746283, \"Memory in Mb\": 3.793328285217285, \"Time in s\": 1728.6222849999997 }, { \"step\": 30804, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8959516930169139, \"F1\": 0.8747704450435666, \"Memory in Mb\": 4.122292518615723, \"Time in s\": 1836.357538 }, { \"step\": 31710, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8945725188432306, \"F1\": 0.8729911477527449, \"Memory in Mb\": 3.9709787368774414, \"Time in s\": 1947.541881 }, { \"step\": 32616, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8943124329296336, \"F1\": 0.8729872139725119, \"Memory in Mb\": 4.574315071105957, \"Time in s\": 2061.9964669999995 }, { \"step\": 33522, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8941559022702187, \"F1\": 0.8730862784375448, \"Memory in Mb\": 5.331856727600098, \"Time in s\": 2179.80501 }, { \"step\": 34428, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8936299997095303, \"F1\": 0.8722973915469383, \"Memory in Mb\": 5.328598976135254, \"Time in s\": 2300.9518229999994 }, { \"step\": 35334, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.893612203888716, \"F1\": 0.8717196191516227, \"Memory in Mb\": 5.664120674133301, \"Time in s\": 2425.5167449999994 }, { \"step\": 36240, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8932641629184028, \"F1\": 0.8702709954386908, \"Memory in Mb\": 4.706181526184082, \"Time in s\": 2553.3715769999994 }, { \"step\": 37146, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.893067707632252, \"F1\": 0.8697875688434304, \"Memory in Mb\": 5.890534400939941, \"Time in s\": 2684.525425 }, { \"step\": 38052, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8926178024230638, \"F1\": 0.8686004630820685, \"Memory in Mb\": 5.698811531066895, \"Time in s\": 2819.0438189999995 }, { \"step\": 38958, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8924198475241933, \"F1\": 0.8686165710523841, \"Memory in Mb\": 5.71596622467041, \"Time in s\": 2956.962011 }, { \"step\": 39864, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8929583824599252, \"F1\": 0.8702763505913111, \"Memory in Mb\": 6.792008399963379, \"Time in s\": 3098.2064909999995 }, { \"step\": 40770, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8931541121930879, \"F1\": 0.8715801886792452, \"Memory in Mb\": 8.543190956115723, \"Time in s\": 3242.702748 }, { \"step\": 41676, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8934373125374925, \"F1\": 0.8727689442773243, \"Memory in Mb\": 7.9951677322387695, \"Time in s\": 3390.4303709999995 }, { \"step\": 42582, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8937554308259552, \"F1\": 0.8733411725180581, \"Memory in Mb\": 7.843605995178223, \"Time in s\": 3541.260541 }, { \"step\": 43488, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8933474371651298, \"F1\": 0.8727572016460905, \"Memory in Mb\": 8.17009449005127, \"Time in s\": 3695.381786 }, { \"step\": 44394, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8920550537246863, \"F1\": 0.8708216519301273, \"Memory in Mb\": 5.159916877746582, \"Time in s\": 3852.778305 }, { \"step\": 45300, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8923817302810216, \"F1\": 0.8714568226763348, \"Memory in Mb\": 4.89464282989502, \"Time in s\": 4013.28156 }, { \"step\": 25, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.75, \"F1\": 0.75, \"Memory in Mb\": 0.6839132308959961, \"Time in s\": 0.224986 }, { \"step\": 50, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8163265306122449, \"F1\": 0.8, \"Memory in Mb\": 0.6847753524780273, \"Time in s\": 0.690283 }, { \"step\": 75, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8378378378378378, \"F1\": 0.8333333333333334, \"Memory in Mb\": 0.6847753524780273, \"Time in s\": 1.396176 }, { \"step\": 100, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8484848484848485, \"F1\": 0.8421052631578947, \"Memory in Mb\": 0.6699657440185547, \"Time in s\": 2.3378490000000003 }, { \"step\": 125, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8467741935483871, \"F1\": 0.8403361344537815, \"Memory in Mb\": 0.9459667205810548, \"Time in s\": 3.5085690000000005 }, { \"step\": 150, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8456375838926175, \"F1\": 0.8456375838926175, \"Memory in Mb\": 0.9459896087646484, \"Time in s\": 4.905419 }, { \"step\": 175, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.867816091954023, \"F1\": 0.8588957055214724, \"Memory in Mb\": 1.1184329986572266, \"Time in s\": 6.555308 }, { \"step\": 200, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8693467336683417, \"F1\": 0.8617021276595744, \"Memory in Mb\": 1.313650131225586, \"Time in s\": 8.437938 }, { \"step\": 225, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8660714285714286, \"F1\": 0.8557692307692308, \"Memory in Mb\": 1.3411617279052734, \"Time in s\": 10.542565 }, { \"step\": 250, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8554216867469879, \"F1\": 0.8434782608695653, \"Memory in Mb\": 1.341230392456055, \"Time in s\": 12.865947 }, { \"step\": 275, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8576642335766423, \"F1\": 0.844621513944223, \"Memory in Mb\": 1.278768539428711, \"Time in s\": 15.438039 }, { \"step\": 300, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.862876254180602, \"F1\": 0.8464419475655431, \"Memory in Mb\": 1.497152328491211, \"Time in s\": 18.241575 }, { \"step\": 325, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8703703703703703, \"F1\": 0.851063829787234, \"Memory in Mb\": 1.5338878631591797, \"Time in s\": 21.265574 }, { \"step\": 350, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8710601719197708, \"F1\": 0.8494983277591974, \"Memory in Mb\": 1.6005840301513672, \"Time in s\": 24.507018 }, { \"step\": 375, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8716577540106952, \"F1\": 0.8481012658227849, \"Memory in Mb\": 1.880643844604492, \"Time in s\": 27.994389 }, { \"step\": 400, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8696741854636592, \"F1\": 0.8433734939759037, \"Memory in Mb\": 2.144338607788086, \"Time in s\": 31.712731 }, { \"step\": 425, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8702830188679245, \"F1\": 0.8405797101449276, \"Memory in Mb\": 2.182210922241211, \"Time in s\": 35.657971 }, { \"step\": 450, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8752783964365256, \"F1\": 0.845303867403315, \"Memory in Mb\": 2.182027816772461, \"Time in s\": 39.813046 }, { \"step\": 475, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8776371308016878, \"F1\": 0.8505154639175259, \"Memory in Mb\": 2.2095394134521484, \"Time in s\": 44.205113 }, { \"step\": 500, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.875751503006012, \"F1\": 0.8502415458937198, \"Memory in Mb\": 2.2298946380615234, \"Time in s\": 48.820324 }, { \"step\": 525, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8778625954198473, \"F1\": 0.8497652582159624, \"Memory in Mb\": 2.294797897338867, \"Time in s\": 53.667583 }, { \"step\": 550, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8743169398907104, \"F1\": 0.8463251670378619, \"Memory in Mb\": 2.4045467376708984, \"Time in s\": 58.748532 }, { \"step\": 575, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8763066202090593, \"F1\": 0.8479657387580299, \"Memory in Mb\": 2.459569931030273, \"Time in s\": 64.055891 }, { \"step\": 600, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8764607679465777, \"F1\": 0.8451882845188285, \"Memory in Mb\": 2.459569931030273, \"Time in s\": 69.582297 }, { \"step\": 625, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8782051282051282, \"F1\": 0.8442622950819672, \"Memory in Mb\": 2.459615707397461, \"Time in s\": 75.26312 }, { \"step\": 650, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8813559322033898, \"F1\": 0.850485436893204, \"Memory in Mb\": 2.390268325805664, \"Time in s\": 81.075862 }, { \"step\": 675, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8798219584569733, \"F1\": 0.8513761467889909, \"Memory in Mb\": 2.665342330932617, \"Time in s\": 87.004325 }, { \"step\": 700, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8841201716738197, \"F1\": 0.8550983899821109, \"Memory in Mb\": 2.685148239135742, \"Time in s\": 93.043775 }, { \"step\": 725, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8825966850828729, \"F1\": 0.8556876061120544, \"Memory in Mb\": 2.988882064819336, \"Time in s\": 99.203547 }, { \"step\": 750, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8838451268357811, \"F1\": 0.8576104746317513, \"Memory in Mb\": 3.061452865600586, \"Time in s\": 105.474091 }, { \"step\": 775, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8850129198966409, \"F1\": 0.8585055643879173, \"Memory in Mb\": 3.171110153198242, \"Time in s\": 111.86121 }, { \"step\": 800, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8848560700876095, \"F1\": 0.8601823708206686, \"Memory in Mb\": 3.2535533905029297, \"Time in s\": 118.359722 }, { \"step\": 825, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8822815533980582, \"F1\": 0.8583941605839417, \"Memory in Mb\": 3.308439254760742, \"Time in s\": 124.969526 }, { \"step\": 850, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8845700824499411, \"F1\": 0.8607954545454546, \"Memory in Mb\": 3.3186397552490234, \"Time in s\": 131.697623 }, { \"step\": 875, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.88558352402746, \"F1\": 0.8611111111111112, \"Memory in Mb\": 3.2835521697998047, \"Time in s\": 138.541741 }, { \"step\": 900, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8843159065628476, \"F1\": 0.859078590785908, \"Memory in Mb\": 3.3748836517333984, \"Time in s\": 145.497394 }, { \"step\": 925, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8852813852813853, \"F1\": 0.8612565445026178, \"Memory in Mb\": 3.4775447845458984, \"Time in s\": 152.56592600000002 }, { \"step\": 950, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8861959957850368, \"F1\": 0.864321608040201, \"Memory in Mb\": 3.507909774780273, \"Time in s\": 159.751347 }, { \"step\": 975, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8880903490759754, \"F1\": 0.8665850673194614, \"Memory in Mb\": 3.56281852722168, \"Time in s\": 167.044698 }, { \"step\": 1000, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8888888888888888, \"F1\": 0.867699642431466, \"Memory in Mb\": 3.645017623901367, \"Time in s\": 174.44997500000002 }, { \"step\": 1025, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.888671875, \"F1\": 0.8680555555555557, \"Memory in Mb\": 3.708791732788086, \"Time in s\": 181.96372800000003 }, { \"step\": 1050, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8903717826501429, \"F1\": 0.8706411698537682, \"Memory in Mb\": 3.783597946166992, \"Time in s\": 189.590748 }, { \"step\": 1075, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8910614525139665, \"F1\": 0.8724100327153763, \"Memory in Mb\": 3.893186569213867, \"Time in s\": 197.32835100000003 }, { \"step\": 1100, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8926296633303002, \"F1\": 0.8744680851063831, \"Memory in Mb\": 3.893209457397461, \"Time in s\": 205.17488800000004 }, { \"step\": 1125, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.891459074733096, \"F1\": 0.8742268041237113, \"Memory in Mb\": 3.893209457397461, \"Time in s\": 213.13117300000005 }, { \"step\": 1150, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8929503916449086, \"F1\": 0.8758829465186679, \"Memory in Mb\": 3.893255233764648, \"Time in s\": 221.19416900000004 }, { \"step\": 1175, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8918228279386712, \"F1\": 0.874381800197824, \"Memory in Mb\": 3.8934383392333984, \"Time in s\": 229.37121900000005 }, { \"step\": 1200, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8932443703085905, \"F1\": 0.8757281553398059, \"Memory in Mb\": 3.867467880249024, \"Time in s\": 237.66201400000008 }, { \"step\": 1225, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8946078431372549, \"F1\": 0.8772597526165558, \"Memory in Mb\": 3.903593063354492, \"Time in s\": 246.05749800000007 }, { \"step\": 1250, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8951160928742994, \"F1\": 0.8783658310120707, \"Memory in Mb\": 3.932668685913086, \"Time in s\": 254.56059300000007 }, { \"step\": 1903, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.1741485595703125, \"Time in s\": 4.196349 }, { \"step\": 3806, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.1747589111328125, \"Time in s\": 11.133978 }, { \"step\": 5709, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.1753692626953125, \"Time in s\": 20.515058 }, { \"step\": 7612, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.1753692626953125, \"Time in s\": 32.334394 }, { \"step\": 9515, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.1753692626953125, \"Time in s\": 46.585565 }, { \"step\": 11418, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.1759796142578125, \"Time in s\": 63.272850000000005 }, { \"step\": 13321, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.1759796142578125, \"Time in s\": 82.384207 }, { \"step\": 15224, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996058595546212, \"F1\": 0.625, \"Memory in Mb\": 0.4890699386596679, \"Time in s\": 104.078316 }, { \"step\": 17127, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996496554945696, \"F1\": 0.7000000000000001, \"Memory in Mb\": 0.4890470504760742, \"Time in s\": 128.646042 }, { \"step\": 19030, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999684691786221, \"F1\": 0.7000000000000001, \"Memory in Mb\": 0.4890470504760742, \"Time in s\": 156.063076 }, { \"step\": 20933, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997133575386968, \"F1\": 0.7000000000000001, \"Memory in Mb\": 0.4896574020385742, \"Time in s\": 186.340267 }, { \"step\": 22836, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999737245456536, \"F1\": 0.7000000000000001, \"Memory in Mb\": 0.4896574020385742, \"Time in s\": 219.481443 }, { \"step\": 24739, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997574581615328, \"F1\": 0.7000000000000001, \"Memory in Mb\": 0.4896574020385742, \"Time in s\": 255.483814 }, { \"step\": 26642, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997747832288578, \"F1\": 0.7272727272727273, \"Memory in Mb\": 0.4897031784057617, \"Time in s\": 294.351753 }, { \"step\": 28545, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999789798206278, \"F1\": 0.7272727272727273, \"Memory in Mb\": 0.4897031784057617, \"Time in s\": 336.077392 }, { \"step\": 30448, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998029362498768, \"F1\": 0.7272727272727273, \"Memory in Mb\": 0.4897031784057617, \"Time in s\": 380.664356 }, { \"step\": 32351, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998145285935084, \"F1\": 0.7272727272727273, \"Memory in Mb\": 0.4897031784057617, \"Time in s\": 428.135176 }, { \"step\": 34254, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998248328613552, \"F1\": 0.7272727272727273, \"Memory in Mb\": 0.4897031784057617, \"Time in s\": 478.442458 }, { \"step\": 36157, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998340524394292, \"F1\": 0.7272727272727273, \"Memory in Mb\": 0.4984426498413086, \"Time in s\": 531.6199799999999 }, { \"step\": 38060, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998423500354712, \"F1\": 0.7272727272727273, \"Memory in Mb\": 0.5112142562866211, \"Time in s\": 587.6570539999999 }, { \"step\": 39963, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998498573644964, \"F1\": 0.7272727272727273, \"Memory in Mb\": 0.5112142562866211, \"Time in s\": 646.546353 }, { \"step\": 41866, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998566821927624, \"F1\": 0.7272727272727273, \"Memory in Mb\": 0.5118017196655273, \"Time in s\": 708.303189 }, { \"step\": 43769, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998629135441418, \"F1\": 0.7272727272727273, \"Memory in Mb\": 0.5118017196655273, \"Time in s\": 772.911335 }, { \"step\": 45672, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998686256048696, \"F1\": 0.7272727272727273, \"Memory in Mb\": 0.5118017196655273, \"Time in s\": 840.389935 }, { \"step\": 47575, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998528608063229, \"F1\": 0.6956521739130435, \"Memory in Mb\": 0.6032171249389648, \"Time in s\": 910.729159 }, { \"step\": 49478, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.99985852012046, \"F1\": 0.6956521739130435, \"Memory in Mb\": 0.6032171249389648, \"Time in s\": 983.924468 }, { \"step\": 51381, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998248345659788, \"F1\": 0.6666666666666666, \"Memory in Mb\": 0.6488561630249023, \"Time in s\": 1059.968701 }, { \"step\": 53284, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999831090591746, \"F1\": 0.6666666666666666, \"Memory in Mb\": 0.6488561630249023, \"Time in s\": 1138.856016 }, { \"step\": 55187, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999836915159642, \"F1\": 0.6666666666666666, \"Memory in Mb\": 0.6487874984741211, \"Time in s\": 1220.600051 }, { \"step\": 57090, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997197358510396, \"F1\": 0.5789473684210525, \"Memory in Mb\": 0.915858268737793, \"Time in s\": 1305.292865 }, { \"step\": 58993, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997118253322484, \"F1\": 0.5641025641025641, \"Memory in Mb\": 0.9158124923706056, \"Time in s\": 1392.9564999999998 }, { \"step\": 60896, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997208309385008, \"F1\": 0.5641025641025641, \"Memory in Mb\": 0.9158353805541992, \"Time in s\": 1483.5784209999997 }, { \"step\": 62799, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996974425937132, \"F1\": 0.5365853658536585, \"Memory in Mb\": 1.0681943893432615, \"Time in s\": 1577.1896599999998 }, { \"step\": 64702, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997063414784934, \"F1\": 0.5365853658536585, \"Memory in Mb\": 1.0681486129760742, \"Time in s\": 1673.7731769999998 }, { \"step\": 66605, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999714731847937, \"F1\": 0.5365853658536585, \"Memory in Mb\": 1.0681257247924805, \"Time in s\": 1773.3049339999998 }, { \"step\": 68508, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997226560789408, \"F1\": 0.5777777777777777, \"Memory in Mb\": 1.0767507553100586, \"Time in s\": 1875.807102 }, { \"step\": 70411, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997301519670502, \"F1\": 0.5777777777777777, \"Memory in Mb\": 1.0766592025756836, \"Time in s\": 1981.276839 }, { \"step\": 72314, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997372533292768, \"F1\": 0.5777777777777777, \"Memory in Mb\": 1.0766363143920898, \"Time in s\": 2089.690205 }, { \"step\": 74217, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997439905141748, \"F1\": 0.5777777777777777, \"Memory in Mb\": 1.183516502380371, \"Time in s\": 2201.064573 }, { \"step\": 76120, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997503908354024, \"F1\": 0.5777777777777777, \"Memory in Mb\": 1.1835393905639648, \"Time in s\": 2315.37923 }, { \"step\": 78023, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999756478941837, \"F1\": 0.5777777777777777, \"Memory in Mb\": 1.183516502380371, \"Time in s\": 2432.635431 }, { \"step\": 79926, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999762277134814, \"F1\": 0.5777777777777777, \"Memory in Mb\": 1.1834936141967771, \"Time in s\": 2552.8498939999995 }, { \"step\": 81829, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997678056411008, \"F1\": 0.5777777777777777, \"Memory in Mb\": 1.183516502380371, \"Time in s\": 2676.010932 }, { \"step\": 83732, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997730828486464, \"F1\": 0.5777777777777777, \"Memory in Mb\": 1.184126853942871, \"Time in s\": 2802.105151 }, { \"step\": 85635, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997781255108952, \"F1\": 0.5777777777777777, \"Memory in Mb\": 1.1841497421264648, \"Time in s\": 2931.145124 }, { \"step\": 87538, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997829489244549, \"F1\": 0.5777777777777777, \"Memory in Mb\": 1.1841497421264648, \"Time in s\": 3063.115766 }, { \"step\": 89441, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999765205724508, \"F1\": 0.5531914893617021, \"Memory in Mb\": 1.323287010192871, \"Time in s\": 3198.03692 }, { \"step\": 91344, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997700973254656, \"F1\": 0.5531914893617021, \"Memory in Mb\": 1.3272314071655271, \"Time in s\": 3335.914266 }, { \"step\": 93247, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997747892671, \"F1\": 0.5531914893617021, \"Memory in Mb\": 1.3272314071655271, \"Time in s\": 3476.72485 }, { \"step\": 95150, \"track\": \"Binary classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997792935290964, \"F1\": 0.5531914893617021, \"Memory in Mb\": 1.327254295349121, \"Time in s\": 3620.355738 }, { \"step\": 106, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Bananas\", \"Accuracy\": 0.6, \"F1\": 0.5434782608695652, \"Memory in Mb\": 0.7756900787353516, \"Time in s\": 0.335809 }, { \"step\": 212, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Bananas\", \"Accuracy\": 0.7251184834123223, \"F1\": 0.6881720430107526, \"Memory in Mb\": 1.166391372680664, \"Time in s\": 1.061098 }, { \"step\": 318, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Bananas\", \"Accuracy\": 0.7539432176656151, \"F1\": 0.7253521126760563, \"Memory in Mb\": 1.6065692901611328, \"Time in s\": 2.189762 }, { \"step\": 424, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Bananas\", \"Accuracy\": 0.7825059101654847, \"F1\": 0.7566137566137565, \"Memory in Mb\": 2.022294044494629, \"Time in s\": 3.739465 }, { \"step\": 530, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Bananas\", \"Accuracy\": 0.7958412098298677, \"F1\": 0.7631578947368421, \"Memory in Mb\": 2.4158077239990234, \"Time in s\": 5.557825 }, { \"step\": 636, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Bananas\", \"Accuracy\": 0.7937007874015748, \"F1\": 0.7622504537205083, \"Memory in Mb\": 2.785458564758301, \"Time in s\": 7.636877 }, { \"step\": 742, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8029689608636977, \"F1\": 0.7675159235668789, \"Memory in Mb\": 3.1855859756469727, \"Time in s\": 9.989946 }, { \"step\": 848, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8110979929161747, \"F1\": 0.7790055248618785, \"Memory in Mb\": 3.637519836425781, \"Time in s\": 12.613619 }, { \"step\": 954, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8163693599160545, \"F1\": 0.7836835599505564, \"Memory in Mb\": 3.940545082092285, \"Time in s\": 15.512474 }, { \"step\": 1060, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8243626062322946, \"F1\": 0.7905405405405406, \"Memory in Mb\": 4.2790374755859375, \"Time in s\": 18.684266 }, { \"step\": 1166, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8240343347639485, \"F1\": 0.790602655771195, \"Memory in Mb\": 4.743890762329102, \"Time in s\": 22.147393 }, { \"step\": 1272, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8253343823760818, \"F1\": 0.7936802973977696, \"Memory in Mb\": 5.073901176452637, \"Time in s\": 25.9097 }, { \"step\": 1378, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8271604938271605, \"F1\": 0.7941176470588235, \"Memory in Mb\": 5.443793296813965, \"Time in s\": 29.969434 }, { \"step\": 1484, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8293998651382333, \"F1\": 0.7967871485943775, \"Memory in Mb\": 5.808208465576172, \"Time in s\": 34.339834 }, { \"step\": 1590, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8313404657016992, \"F1\": 0.7994011976047903, \"Memory in Mb\": 6.233636856079102, \"Time in s\": 39.030108 }, { \"step\": 1696, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8348082595870207, \"F1\": 0.8030942334739802, \"Memory in Mb\": 6.672585487365723, \"Time in s\": 44.033544 }, { \"step\": 1802, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8384230982787341, \"F1\": 0.8094302554027505, \"Memory in Mb\": 7.1109819412231445, \"Time in s\": 49.366899 }, { \"step\": 1908, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8363922391190352, \"F1\": 0.8095238095238096, \"Memory in Mb\": 7.617318153381348, \"Time in s\": 55.036325 }, { \"step\": 2014, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8335817188276204, \"F1\": 0.8080229226361031, \"Memory in Mb\": 8.179092407226562, \"Time in s\": 61.04992 }, { \"step\": 2120, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8357715903728173, \"F1\": 0.8127018299246501, \"Memory in Mb\": 8.54446792602539, \"Time in s\": 67.397947 }, { \"step\": 2226, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8368539325842697, \"F1\": 0.8143222506393862, \"Memory in Mb\": 9.00230598449707, \"Time in s\": 74.092607 }, { \"step\": 2332, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8365508365508365, \"F1\": 0.8142369575816674, \"Memory in Mb\": 9.503581047058104, \"Time in s\": 81.1143 }, { \"step\": 2438, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8379154698399671, \"F1\": 0.8160223567768979, \"Memory in Mb\": 9.875147819519045, \"Time in s\": 88.456785 }, { \"step\": 2544, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8375933936295714, \"F1\": 0.816525988449578, \"Memory in Mb\": 10.285362243652344, \"Time in s\": 96.129153 }, { \"step\": 2650, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8369195922989807, \"F1\": 0.8161702127659575, \"Memory in Mb\": 10.684521675109863, \"Time in s\": 104.109815 }, { \"step\": 2756, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8381125226860254, \"F1\": 0.8176614881439086, \"Memory in Mb\": 11.088122367858888, \"Time in s\": 112.408624 }, { \"step\": 2862, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8392170569730864, \"F1\": 0.8184688239936858, \"Memory in Mb\": 11.42569065093994, \"Time in s\": 121.031545 }, { \"step\": 2968, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Bananas\", \"Accuracy\": 0.840242669362993, \"F1\": 0.8188073394495413, \"Memory in Mb\": 11.86772918701172, \"Time in s\": 129.980691 }, { \"step\": 3074, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8395704523267166, \"F1\": 0.8186833394630378, \"Memory in Mb\": 12.388784408569336, \"Time in s\": 139.271806 }, { \"step\": 3180, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8417741428122051, \"F1\": 0.8205494113449874, \"Memory in Mb\": 12.724870681762695, \"Time in s\": 148.909629 }, { \"step\": 3286, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8423135464231355, \"F1\": 0.8207612456747405, \"Memory in Mb\": 13.150970458984377, \"Time in s\": 158.896732 }, { \"step\": 3392, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8428192273665586, \"F1\": 0.8221554888221555, \"Memory in Mb\": 13.55066967010498, \"Time in s\": 169.223096 }, { \"step\": 3498, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8441521303974836, \"F1\": 0.8228794280142996, \"Memory in Mb\": 13.954619407653809, \"Time in s\": 179.903449 }, { \"step\": 3604, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8454066056064391, \"F1\": 0.8234548335974643, \"Memory in Mb\": 14.297491073608398, \"Time in s\": 190.93427 }, { \"step\": 3710, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Bananas\", \"Accuracy\": 0.845510919385279, \"F1\": 0.8241791960724149, \"Memory in Mb\": 14.779010772705078, \"Time in s\": 202.327208 }, { \"step\": 3816, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8453473132372215, \"F1\": 0.8241954707985697, \"Memory in Mb\": 15.263079643249512, \"Time in s\": 214.078215 }, { \"step\": 3922, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8454475899005356, \"F1\": 0.8239395700174318, \"Memory in Mb\": 15.65628433227539, \"Time in s\": 226.208072 }, { \"step\": 4028, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8435559970201142, \"F1\": 0.8216308040770102, \"Memory in Mb\": 16.25907039642334, \"Time in s\": 238.717005 }, { \"step\": 4134, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8439390273409146, \"F1\": 0.8220689655172413, \"Memory in Mb\": 16.753341674804688, \"Time in s\": 251.598926 }, { \"step\": 4240, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8461901391837697, \"F1\": 0.8249194414607949, \"Memory in Mb\": 17.15353012084961, \"Time in s\": 264.978021 }, { \"step\": 4346, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8474108170310702, \"F1\": 0.8263033796175006, \"Memory in Mb\": 17.548202514648438, \"Time in s\": 278.744972 }, { \"step\": 4452, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8467760053920468, \"F1\": 0.8253968253968255, \"Memory in Mb\": 17.966373443603516, \"Time in s\": 292.921574 }, { \"step\": 4558, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Bananas\", \"Accuracy\": 0.847267939433838, \"F1\": 0.8265204386839482, \"Memory in Mb\": 18.466463088989254, \"Time in s\": 307.49799799999994 }, { \"step\": 4664, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8475230538280077, \"F1\": 0.8273014330823415, \"Memory in Mb\": 18.88692092895508, \"Time in s\": 322.48837999999995 }, { \"step\": 4770, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8483958901237156, \"F1\": 0.828143570240076, \"Memory in Mb\": 19.283724784851078, \"Time in s\": 337.9195859999999 }, { \"step\": 4876, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8488205128205129, \"F1\": 0.8279243520896566, \"Memory in Mb\": 19.64915180206299, \"Time in s\": 353.77839399999993 }, { \"step\": 4982, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8496285886368199, \"F1\": 0.8291904218928163, \"Memory in Mb\": 19.982958793640137, \"Time in s\": 370.058467 }, { \"step\": 5088, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Bananas\", \"Accuracy\": 0.850206408492235, \"F1\": 0.829682610639249, \"Memory in Mb\": 20.404964447021484, \"Time in s\": 386.77508599999993 }, { \"step\": 5194, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8501829385711535, \"F1\": 0.8296847635726795, \"Memory in Mb\": 20.84154510498047, \"Time in s\": 403.931326 }, { \"step\": 5300, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8503491224759389, \"F1\": 0.8299378082779326, \"Memory in Mb\": 21.28391456604004, \"Time in s\": 421.539661 }, { \"step\": 906, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9082872928176796, \"F1\": 0.905788876276958, \"Memory in Mb\": 2.982789993286133, \"Time in s\": 3.516385 }, { \"step\": 1812, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9276642738818331, \"F1\": 0.910089224433768, \"Memory in Mb\": 5.035035133361816, \"Time in s\": 9.67052 }, { \"step\": 2718, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9193963930806036, \"F1\": 0.9004092769440655, \"Memory in Mb\": 7.41602611541748, \"Time in s\": 18.410774 }, { \"step\": 3624, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9213359094672924, \"F1\": 0.9030941856511392, \"Memory in Mb\": 8.590222358703613, \"Time in s\": 29.484026 }, { \"step\": 4530, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Elec2\", \"Accuracy\": 0.919629057187017, \"F1\": 0.896825396825397, \"Memory in Mb\": 10.053829193115234, \"Time in s\": 42.932107 }, { \"step\": 5436, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9164673413063478, \"F1\": 0.8929245283018867, \"Memory in Mb\": 11.804065704345703, \"Time in s\": 58.899071000000006 }, { \"step\": 6342, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9165746727645482, \"F1\": 0.8945585010962727, \"Memory in Mb\": 14.095972061157228, \"Time in s\": 77.41200400000001 }, { \"step\": 7248, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9136194287291292, \"F1\": 0.8907885554780182, \"Memory in Mb\": 16.2540225982666, \"Time in s\": 98.532638 }, { \"step\": 8154, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9135287624187416, \"F1\": 0.8957254843957995, \"Memory in Mb\": 18.652454376220703, \"Time in s\": 122.39659 }, { \"step\": 9060, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9157743680317916, \"F1\": 0.9006122183144457, \"Memory in Mb\": 20.98922061920166, \"Time in s\": 148.829731 }, { \"step\": 9966, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9175112895132964, \"F1\": 0.9043741275011632, \"Memory in Mb\": 21.67603302001953, \"Time in s\": 177.92592100000002 }, { \"step\": 10872, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9189586974519364, \"F1\": 0.9074093536521284, \"Memory in Mb\": 23.620224952697757, \"Time in s\": 209.60222600000003 }, { \"step\": 11778, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9187399167869577, \"F1\": 0.907222491517208, \"Memory in Mb\": 26.53412914276123, \"Time in s\": 243.98565400000004 }, { \"step\": 12684, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9176062445793582, \"F1\": 0.906084299451784, \"Memory in Mb\": 30.32783317565918, \"Time in s\": 281.19943900000004 }, { \"step\": 13590, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9178747516373538, \"F1\": 0.9076464746772592, \"Memory in Mb\": 30.98683452606201, \"Time in s\": 321.28235000000006 }, { \"step\": 14496, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Elec2\", \"Accuracy\": 0.917764746464298, \"F1\": 0.9080388828884433, \"Memory in Mb\": 33.47672271728516, \"Time in s\": 364.2111060000001 }, { \"step\": 15402, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9181221998571522, \"F1\": 0.9079091506609216, \"Memory in Mb\": 36.0946741104126, \"Time in s\": 409.94528800000006 }, { \"step\": 16308, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9156803826577544, \"F1\": 0.90483770503149, \"Memory in Mb\": 39.82530307769776, \"Time in s\": 458.76832800000005 }, { \"step\": 17214, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9136118050310812, \"F1\": 0.9010184383944618, \"Memory in Mb\": 31.60810947418213, \"Time in s\": 510.754243 }, { \"step\": 18120, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9136265798333242, \"F1\": 0.9008803597441256, \"Memory in Mb\": 29.540003776550293, \"Time in s\": 565.7272700000001 }, { \"step\": 19026, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9144809461235216, \"F1\": 0.9008350094471872, \"Memory in Mb\": 30.251863479614254, \"Time in s\": 623.5764490000001 }, { \"step\": 19932, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9137022728413025, \"F1\": 0.9008188213585516, \"Memory in Mb\": 34.67849349975586, \"Time in s\": 684.5888420000001 }, { \"step\": 20838, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9112156260498152, \"F1\": 0.8982286280118825, \"Memory in Mb\": 25.08942031860352, \"Time in s\": 748.7394940000001 }, { \"step\": 21744, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9108678655199376, \"F1\": 0.8963414634146342, \"Memory in Mb\": 28.645745277404785, \"Time in s\": 815.8674180000002 }, { \"step\": 22650, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Elec2\", \"Accuracy\": 0.910062254404168, \"F1\": 0.8947232415111892, \"Memory in Mb\": 32.00656318664551, \"Time in s\": 886.1270340000001 }, { \"step\": 23556, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9082148163871788, \"F1\": 0.8923306772908367, \"Memory in Mb\": 29.995322227478027, \"Time in s\": 959.605197 }, { \"step\": 24462, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9068312824496136, \"F1\": 0.8901633813677768, \"Memory in Mb\": 31.142220497131348, \"Time in s\": 1036.2892310000002 }, { \"step\": 25368, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Elec2\", \"Accuracy\": 0.904876414238972, \"F1\": 0.8880745860197596, \"Memory in Mb\": 17.65717887878418, \"Time in s\": 1115.9640360000003 }, { \"step\": 26274, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9052639591976552, \"F1\": 0.8883004981375936, \"Memory in Mb\": 19.38848114013672, \"Time in s\": 1198.3281000000004 }, { \"step\": 27180, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Elec2\", \"Accuracy\": 0.905331321976526, \"F1\": 0.8887639963685098, \"Memory in Mb\": 20.18451499938965, \"Time in s\": 1283.4271300000005 }, { \"step\": 28086, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9040769093822324, \"F1\": 0.886768661735037, \"Memory in Mb\": 24.22576713562012, \"Time in s\": 1371.4128720000006 }, { \"step\": 28992, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9030388741333516, \"F1\": 0.8849883392659875, \"Memory in Mb\": 26.1504430770874, \"Time in s\": 1462.6499510000006 }, { \"step\": 29898, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9022644412482858, \"F1\": 0.8837708830548926, \"Memory in Mb\": 31.19912052154541, \"Time in s\": 1557.0334400000006 }, { \"step\": 30804, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9016004934584294, \"F1\": 0.8822684016313848, \"Memory in Mb\": 35.062607765197754, \"Time in s\": 1654.6421970000006 }, { \"step\": 31710, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Elec2\", \"Accuracy\": 0.9002491406225361, \"F1\": 0.8805739097602416, \"Memory in Mb\": 34.509202003479004, \"Time in s\": 1755.5826140000006 }, { \"step\": 32616, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8993407941131382, \"F1\": 0.8797127468581688, \"Memory in Mb\": 39.23386001586914, \"Time in s\": 1859.7925240000009 }, { \"step\": 33522, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8986307091077235, \"F1\": 0.879040296169728, \"Memory in Mb\": 43.53414344787598, \"Time in s\": 1967.3677710000009 }, { \"step\": 34428, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8976384814244633, \"F1\": 0.8779440288168467, \"Memory in Mb\": 46.41888236999512, \"Time in s\": 2078.506307000001 }, { \"step\": 35334, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8964990235756941, \"F1\": 0.8759119134063996, \"Memory in Mb\": 47.29628944396973, \"Time in s\": 2193.0138890000007 }, { \"step\": 36240, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8963271613455117, \"F1\": 0.874812568724801, \"Memory in Mb\": 50.92376518249512, \"Time in s\": 2310.752343000001 }, { \"step\": 37146, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8956252523892853, \"F1\": 0.8736433855881107, \"Memory in Mb\": 52.94390201568604, \"Time in s\": 2431.8466670000007 }, { \"step\": 38052, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8955612204672676, \"F1\": 0.8730756946662408, \"Memory in Mb\": 54.53785705566406, \"Time in s\": 2556.3609540000007 }, { \"step\": 38958, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Elec2\", \"Accuracy\": 0.895320481556588, \"F1\": 0.8731176104542625, \"Memory in Mb\": 58.273138999938965, \"Time in s\": 2684.2112230000007 }, { \"step\": 39864, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8957178335800116, \"F1\": 0.8745056603773586, \"Memory in Mb\": 61.72060203552246, \"Time in s\": 2815.6197070000007 }, { \"step\": 40770, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8957541269101523, \"F1\": 0.8755198875285573, \"Memory in Mb\": 62.47746276855469, \"Time in s\": 2950.394142000001 }, { \"step\": 41676, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8962447510497901, \"F1\": 0.8769003017707682, \"Memory in Mb\": 58.14933300018311, \"Time in s\": 3088.5413130000006 }, { \"step\": 42582, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8967614663817195, \"F1\": 0.8777598576274956, \"Memory in Mb\": 60.56365966796875, \"Time in s\": 3230.0396030000006 }, { \"step\": 43488, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Elec2\", \"Accuracy\": 0.896612780831053, \"F1\": 0.8775932480261367, \"Memory in Mb\": 59.69151401519776, \"Time in s\": 3375.0306610000007 }, { \"step\": 44394, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8962223774018426, \"F1\": 0.8767423816785724, \"Memory in Mb\": 64.22966861724854, \"Time in s\": 3523.5486860000005 }, { \"step\": 45300, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8967968387823131, \"F1\": 0.8776210046857412, \"Memory in Mb\": 42.88050365447998, \"Time in s\": 3675.4418880000007 }, { \"step\": 25, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Phishing\", \"Accuracy\": 0.6666666666666666, \"F1\": 0.7142857142857143, \"Memory in Mb\": 0.5762147903442383, \"Time in s\": 0.119361 }, { \"step\": 50, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Phishing\", \"Accuracy\": 0.7551020408163265, \"F1\": 0.7391304347826088, \"Memory in Mb\": 0.6475057601928711, \"Time in s\": 0.366938 }, { \"step\": 75, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Phishing\", \"Accuracy\": 0.7837837837837838, \"F1\": 0.7777777777777778, \"Memory in Mb\": 0.9396762847900392, \"Time in s\": 0.745521 }, { \"step\": 100, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8080808080808081, \"F1\": 0.7999999999999999, \"Memory in Mb\": 1.1059551239013672, \"Time in s\": 1.266772 }, { \"step\": 125, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8145161290322581, \"F1\": 0.8067226890756303, \"Memory in Mb\": 1.2883186340332031, \"Time in s\": 1.944005 }, { \"step\": 150, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8187919463087249, \"F1\": 0.8187919463087249, \"Memory in Mb\": 1.3393936157226562, \"Time in s\": 2.776317 }, { \"step\": 175, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8390804597701149, \"F1\": 0.8292682926829268, \"Memory in Mb\": 1.354720115661621, \"Time in s\": 3.764387 }, { \"step\": 200, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8391959798994975, \"F1\": 0.8297872340425532, \"Memory in Mb\": 1.492502212524414, \"Time in s\": 4.90761 }, { \"step\": 225, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Phishing\", \"Accuracy\": 0.84375, \"F1\": 0.8309178743961353, \"Memory in Mb\": 1.6093759536743164, \"Time in s\": 6.207708 }, { \"step\": 250, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8353413654618473, \"F1\": 0.8225108225108225, \"Memory in Mb\": 1.7083539962768557, \"Time in s\": 7.674896 }, { \"step\": 275, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8394160583941606, \"F1\": 0.8253968253968254, \"Memory in Mb\": 1.7150201797485352, \"Time in s\": 9.301078 }, { \"step\": 300, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Phishing\", \"Accuracy\": 0.842809364548495, \"F1\": 0.825278810408922, \"Memory in Mb\": 1.782989501953125, \"Time in s\": 11.084076 }, { \"step\": 325, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8518518518518519, \"F1\": 0.8309859154929577, \"Memory in Mb\": 1.8577651977539065, \"Time in s\": 13.027607 }, { \"step\": 350, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8567335243553008, \"F1\": 0.8344370860927152, \"Memory in Mb\": 1.8672637939453125, \"Time in s\": 15.126796 }, { \"step\": 375, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8529411764705882, \"F1\": 0.8286604361370716, \"Memory in Mb\": 2.0530452728271484, \"Time in s\": 17.394419 }, { \"step\": 400, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8546365914786967, \"F1\": 0.8284023668639053, \"Memory in Mb\": 2.076310157775879, \"Time in s\": 19.823605 }, { \"step\": 425, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8584905660377359, \"F1\": 0.8295454545454545, \"Memory in Mb\": 2.0878963470458984, \"Time in s\": 22.419713 }, { \"step\": 450, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8596881959910914, \"F1\": 0.8292682926829269, \"Memory in Mb\": 2.0654611587524414, \"Time in s\": 25.175931 }, { \"step\": 475, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8649789029535865, \"F1\": 0.8383838383838383, \"Memory in Mb\": 2.202821731567383, \"Time in s\": 28.096353 }, { \"step\": 500, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8677354709418837, \"F1\": 0.8443396226415094, \"Memory in Mb\": 2.377251625061035, \"Time in s\": 31.184831 }, { \"step\": 525, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8702290076335878, \"F1\": 0.8447488584474886, \"Memory in Mb\": 2.37432861328125, \"Time in s\": 34.435066 }, { \"step\": 550, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8706739526411658, \"F1\": 0.8466522678185745, \"Memory in Mb\": 2.44970703125, \"Time in s\": 37.85251 }, { \"step\": 575, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Phishing\", \"Accuracy\": 0.872822299651568, \"F1\": 0.8488612836438924, \"Memory in Mb\": 2.5103416442871094, \"Time in s\": 41.431292 }, { \"step\": 600, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8764607679465777, \"F1\": 0.8508064516129032, \"Memory in Mb\": 2.478057861328125, \"Time in s\": 45.177182 }, { \"step\": 625, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Phishing\", \"Accuracy\": 0.875, \"F1\": 0.8464566929133858, \"Memory in Mb\": 2.529691696166992, \"Time in s\": 49.08745 }, { \"step\": 650, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8782742681047766, \"F1\": 0.8528864059590316, \"Memory in Mb\": 2.6017770767211914, \"Time in s\": 53.16255700000001 }, { \"step\": 675, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8783382789317508, \"F1\": 0.856140350877193, \"Memory in Mb\": 2.631270408630371, \"Time in s\": 57.40937600000001 }, { \"step\": 700, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Phishing\", \"Accuracy\": 0.882689556509299, \"F1\": 0.8595890410958904, \"Memory in Mb\": 2.6406030654907227, \"Time in s\": 61.81678500000001 }, { \"step\": 725, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8825966850828729, \"F1\": 0.8617886178861789, \"Memory in Mb\": 2.701584815979004, \"Time in s\": 66.39355 }, { \"step\": 750, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8851802403204272, \"F1\": 0.8652037617554857, \"Memory in Mb\": 2.789071083068848, \"Time in s\": 71.086949 }, { \"step\": 775, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8863049095607235, \"F1\": 0.8658536585365854, \"Memory in Mb\": 2.950723648071289, \"Time in s\": 75.87878400000001 }, { \"step\": 800, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Phishing\", \"Accuracy\": 0.886107634543179, \"F1\": 0.8671532846715327, \"Memory in Mb\": 2.9481277465820312, \"Time in s\": 80.77073000000001 }, { \"step\": 825, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8871359223300971, \"F1\": 0.869198312236287, \"Memory in Mb\": 3.217336654663086, \"Time in s\": 85.76636800000001 }, { \"step\": 850, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8881036513545347, \"F1\": 0.8696844993141291, \"Memory in Mb\": 3.2494144439697266, \"Time in s\": 90.85872600000002 }, { \"step\": 875, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8901601830663616, \"F1\": 0.8713136729222519, \"Memory in Mb\": 3.264657974243164, \"Time in s\": 96.047686 }, { \"step\": 900, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8887652947719689, \"F1\": 0.8694516971279374, \"Memory in Mb\": 3.388858795166016, \"Time in s\": 101.334025 }, { \"step\": 925, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8906926406926406, \"F1\": 0.8729559748427673, \"Memory in Mb\": 3.3625974655151367, \"Time in s\": 106.715104 }, { \"step\": 950, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8914646996838778, \"F1\": 0.875453446191052, \"Memory in Mb\": 3.5129919052124023, \"Time in s\": 112.201489 }, { \"step\": 975, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Phishing\", \"Accuracy\": 0.893223819301848, \"F1\": 0.8773584905660378, \"Memory in Mb\": 3.552186965942383, \"Time in s\": 117.78607 }, { \"step\": 1000, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8928928928928929, \"F1\": 0.8771526980482205, \"Memory in Mb\": 3.671197891235352, \"Time in s\": 123.468781 }, { \"step\": 1025, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Phishing\", \"Accuracy\": 0.892578125, \"F1\": 0.8772321428571428, \"Memory in Mb\": 3.734159469604492, \"Time in s\": 129.25173700000002 }, { \"step\": 1050, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Phishing\", \"Accuracy\": 0.894184938036225, \"F1\": 0.8794788273615636, \"Memory in Mb\": 3.775693893432617, \"Time in s\": 135.127992 }, { \"step\": 1075, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8929236499068901, \"F1\": 0.879074658254469, \"Memory in Mb\": 3.8186750411987305, \"Time in s\": 141.101233 }, { \"step\": 1100, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8944494995450409, \"F1\": 0.8809034907597535, \"Memory in Mb\": 3.859647750854492, \"Time in s\": 147.172282 }, { \"step\": 1125, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8959074733096085, \"F1\": 0.8835820895522387, \"Memory in Mb\": 3.8923940658569336, \"Time in s\": 153.346878 }, { \"step\": 1150, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Phishing\", \"Accuracy\": 0.896431679721497, \"F1\": 0.8839024390243903, \"Memory in Mb\": 4.0131940841674805, \"Time in s\": 159.622141 }, { \"step\": 1175, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8952299829642248, \"F1\": 0.8822966507177035, \"Memory in Mb\": 4.097073554992676, \"Time in s\": 165.998758 }, { \"step\": 1200, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Phishing\", \"Accuracy\": 0.896580483736447, \"F1\": 0.8834586466165414, \"Memory in Mb\": 4.154815673828125, \"Time in s\": 172.479929 }, { \"step\": 1225, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8978758169934641, \"F1\": 0.8847926267281105, \"Memory in Mb\": 4.238761901855469, \"Time in s\": 179.059934 }, { \"step\": 1250, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"Phishing\", \"Accuracy\": 0.899119295436349, \"F1\": 0.8866906474820143, \"Memory in Mb\": 4.319509506225586, \"Time in s\": 185.738258 }, { \"step\": 1903, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.2471084594726562, \"Time in s\": 2.767854 }, { \"step\": 3806, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.2488327026367187, \"Time in s\": 8.302014 }, { \"step\": 5709, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.2500534057617187, \"Time in s\": 16.382949 }, { \"step\": 7612, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.2495498657226562, \"Time in s\": 26.80983 }, { \"step\": 9515, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.2495498657226562, \"Time in s\": 39.580928 }, { \"step\": 11418, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.2512741088867187, \"Time in s\": 54.705622 }, { \"step\": 13321, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.2512741088867187, \"Time in s\": 72.18382 }, { \"step\": 15224, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996058595546212, \"F1\": 0.625, \"Memory in Mb\": 0.6267004013061523, \"Time in s\": 92.151846 }, { \"step\": 17127, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996496554945696, \"F1\": 0.7000000000000001, \"Memory in Mb\": 0.6147451400756836, \"Time in s\": 114.903201 }, { \"step\": 19030, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999684691786221, \"F1\": 0.7000000000000001, \"Memory in Mb\": 0.6187658309936523, \"Time in s\": 140.427577 }, { \"step\": 20933, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997133575386968, \"F1\": 0.7000000000000001, \"Memory in Mb\": 0.6200857162475586, \"Time in s\": 168.711887 }, { \"step\": 22836, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999737245456536, \"F1\": 0.7000000000000001, \"Memory in Mb\": 0.6196355819702148, \"Time in s\": 199.762171 }, { \"step\": 24739, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997574581615328, \"F1\": 0.7000000000000001, \"Memory in Mb\": 0.619715690612793, \"Time in s\": 233.587883 }, { \"step\": 26642, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999737247100334, \"F1\": 0.6666666666666666, \"Memory in Mb\": 0.656519889831543, \"Time in s\": 270.202649 }, { \"step\": 28545, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999754764573991, \"F1\": 0.6666666666666666, \"Memory in Mb\": 0.6566305160522461, \"Time in s\": 309.590328 }, { \"step\": 30448, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999770092291523, \"F1\": 0.6666666666666666, \"Memory in Mb\": 0.6724729537963867, \"Time in s\": 351.755182 }, { \"step\": 32351, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997836166924264, \"F1\": 0.6666666666666666, \"Memory in Mb\": 0.6681547164916992, \"Time in s\": 396.69715 }, { \"step\": 34254, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997956383382476, \"F1\": 0.6666666666666666, \"Memory in Mb\": 0.669642448425293, \"Time in s\": 444.422506 }, { \"step\": 36157, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998063945126672, \"F1\": 0.6666666666666666, \"Memory in Mb\": 0.6692113876342773, \"Time in s\": 494.922369 }, { \"step\": 38060, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998160750413831, \"F1\": 0.6666666666666666, \"Memory in Mb\": 0.6613035202026367, \"Time in s\": 548.2094599999999 }, { \"step\": 39963, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998248335919124, \"F1\": 0.6666666666666666, \"Memory in Mb\": 0.6618070602416992, \"Time in s\": 604.285405 }, { \"step\": 41866, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998327958915562, \"F1\": 0.6666666666666666, \"Memory in Mb\": 0.6630735397338867, \"Time in s\": 663.137659 }, { \"step\": 43769, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998400658014988, \"F1\": 0.6666666666666666, \"Memory in Mb\": 0.6625699996948242, \"Time in s\": 724.773781 }, { \"step\": 45672, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999846729872348, \"F1\": 0.6666666666666666, \"Memory in Mb\": 0.6625699996948242, \"Time in s\": 789.184255 }, { \"step\": 47575, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998528608063229, \"F1\": 0.6666666666666666, \"Memory in Mb\": 0.7120122909545898, \"Time in s\": 856.394593 }, { \"step\": 49478, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"SMTP\", \"Accuracy\": 0.99985852012046, \"F1\": 0.6666666666666666, \"Memory in Mb\": 0.7120351791381836, \"Time in s\": 926.380828 }, { \"step\": 51381, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998442973919812, \"F1\": 0.6666666666666666, \"Memory in Mb\": 0.741633415222168, \"Time in s\": 999.155992 }, { \"step\": 53284, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998498583037742, \"F1\": 0.6666666666666666, \"Memory in Mb\": 0.7417364120483398, \"Time in s\": 1074.696379 }, { \"step\": 55187, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998550356974596, \"F1\": 0.6666666666666666, \"Memory in Mb\": 0.7564992904663086, \"Time in s\": 1153.024047 }, { \"step\": 57090, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997022193417296, \"F1\": 0.5142857142857143, \"Memory in Mb\": 1.0212621688842771, \"Time in s\": 1234.302477 }, { \"step\": 58993, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997118253322484, \"F1\": 0.5142857142857143, \"Memory in Mb\": 1.0188016891479492, \"Time in s\": 1318.479479 }, { \"step\": 60896, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997208309385008, \"F1\": 0.5142857142857143, \"Memory in Mb\": 1.0275907516479492, \"Time in s\": 1405.549891 }, { \"step\": 62799, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996974425937132, \"F1\": 0.4864864864864864, \"Memory in Mb\": 1.2421979904174805, \"Time in s\": 1495.555588 }, { \"step\": 64702, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997063414784934, \"F1\": 0.4864864864864864, \"Memory in Mb\": 1.258589744567871, \"Time in s\": 1588.500286 }, { \"step\": 66605, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999714731847937, \"F1\": 0.4864864864864864, \"Memory in Mb\": 1.266993522644043, \"Time in s\": 1684.370287 }, { \"step\": 68508, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997226560789408, \"F1\": 0.5365853658536586, \"Memory in Mb\": 1.3167448043823242, \"Time in s\": 1783.171155 }, { \"step\": 70411, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997301519670502, \"F1\": 0.5365853658536586, \"Memory in Mb\": 1.3264188766479492, \"Time in s\": 1884.908958 }, { \"step\": 72314, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997372533292768, \"F1\": 0.5365853658536586, \"Memory in Mb\": 1.3260221481323242, \"Time in s\": 1989.570925 }, { \"step\": 74217, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997439905141748, \"F1\": 0.5365853658536586, \"Memory in Mb\": 1.342616081237793, \"Time in s\": 2097.157749 }, { \"step\": 76120, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997503908354024, \"F1\": 0.5365853658536586, \"Memory in Mb\": 1.3624944686889648, \"Time in s\": 2207.674598 }, { \"step\": 78023, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999756478941837, \"F1\": 0.5365853658536586, \"Memory in Mb\": 1.357996940612793, \"Time in s\": 2321.128083 }, { \"step\": 79926, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999762277134814, \"F1\": 0.5365853658536586, \"Memory in Mb\": 1.3575201034545898, \"Time in s\": 2437.510157 }, { \"step\": 81829, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997678056411008, \"F1\": 0.5365853658536586, \"Memory in Mb\": 1.3575468063354492, \"Time in s\": 2556.8217050000003 }, { \"step\": 83732, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997730828486464, \"F1\": 0.5365853658536586, \"Memory in Mb\": 1.3463621139526367, \"Time in s\": 2679.067711 }, { \"step\": 85635, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997781255108952, \"F1\": 0.5365853658536586, \"Memory in Mb\": 1.3506765365600586, \"Time in s\": 2804.239644 }, { \"step\": 87538, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997829489244549, \"F1\": 0.5365853658536586, \"Memory in Mb\": 1.350123405456543, \"Time in s\": 2932.339248 }, { \"step\": 89441, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997763864042932, \"F1\": 0.5238095238095238, \"Memory in Mb\": 1.4778623580932615, \"Time in s\": 3063.373685 }, { \"step\": 91344, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999781045071872, \"F1\": 0.5238095238095238, \"Memory in Mb\": 1.489375114440918, \"Time in s\": 3197.337821 }, { \"step\": 93247, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997855135877142, \"F1\": 0.5238095238095238, \"Memory in Mb\": 1.5083913803100586, \"Time in s\": 3334.235023 }, { \"step\": 95150, \"track\": \"Binary classification\", \"model\": \"Stacking\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997898033610444, \"F1\": 0.5238095238095238, \"Memory in Mb\": 1.5167646408081057, \"Time in s\": 3474.068956 }, { \"step\": 106, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Bananas\", \"Accuracy\": 0.7142857142857143, \"F1\": 0.6590909090909091, \"Memory in Mb\": 0.0813856124877929, \"Time in s\": 0.066609 }, { \"step\": 212, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Bananas\", \"Accuracy\": 0.7819905213270142, \"F1\": 0.7444444444444445, \"Memory in Mb\": 0.0819120407104492, \"Time in s\": 0.219203 }, { \"step\": 318, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Bananas\", \"Accuracy\": 0.7949526813880127, \"F1\": 0.7583643122676579, \"Memory in Mb\": 0.0815153121948242, \"Time in s\": 0.458077 }, { \"step\": 424, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Bananas\", \"Accuracy\": 0.806146572104019, \"F1\": 0.7696629213483147, \"Memory in Mb\": 0.0815153121948242, \"Time in s\": 0.7833680000000001 }, { \"step\": 530, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Bananas\", \"Accuracy\": 0.7977315689981096, \"F1\": 0.7446300715990454, \"Memory in Mb\": 0.0820188522338867, \"Time in s\": 1.194727 }, { \"step\": 636, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Bananas\", \"Accuracy\": 0.7984251968503937, \"F1\": 0.7460317460317459, \"Memory in Mb\": 0.0815153121948242, \"Time in s\": 1.6920620000000002 }, { \"step\": 742, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Bananas\", \"Accuracy\": 0.805668016194332, \"F1\": 0.75, \"Memory in Mb\": 0.0815153121948242, \"Time in s\": 2.274961 }, { \"step\": 848, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8110979929161747, \"F1\": 0.7597597597597598, \"Memory in Mb\": 0.0820188522338867, \"Time in s\": 2.9435640000000003 }, { \"step\": 954, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8174186778593914, \"F1\": 0.7661290322580646, \"Memory in Mb\": 0.0815153121948242, \"Time in s\": 3.697699 }, { \"step\": 1060, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8253068932955618, \"F1\": 0.774114774114774, \"Memory in Mb\": 0.0815153121948242, \"Time in s\": 4.5372580000000005 }, { \"step\": 1166, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8266094420600858, \"F1\": 0.7770419426048565, \"Memory in Mb\": 0.0820188522338867, \"Time in s\": 5.437299 }, { \"step\": 1272, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8284815106215578, \"F1\": 0.7811244979919679, \"Memory in Mb\": 0.0815153121948242, \"Time in s\": 6.385956 }, { \"step\": 1378, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8264342774146696, \"F1\": 0.7764265668849394, \"Memory in Mb\": 0.0820188522338867, \"Time in s\": 7.383082 }, { \"step\": 1484, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8287255563047876, \"F1\": 0.7795138888888888, \"Memory in Mb\": 0.0820188522338867, \"Time in s\": 8.428656 }, { \"step\": 1590, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Bananas\", \"Accuracy\": 0.830081812460667, \"F1\": 0.7822580645161291, \"Memory in Mb\": 0.0815153121948242, \"Time in s\": 9.522527 }, { \"step\": 1696, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8348082595870207, \"F1\": 0.7881996974281392, \"Memory in Mb\": 0.0820188522338867, \"Time in s\": 10.665041 }, { \"step\": 1802, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8367573570238757, \"F1\": 0.7929577464788733, \"Memory in Mb\": 0.0820188522338867, \"Time in s\": 11.856044 }, { \"step\": 1908, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8342947037231253, \"F1\": 0.7931937172774869, \"Memory in Mb\": 0.0815153121948242, \"Time in s\": 13.095519 }, { \"step\": 2014, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8301043219076006, \"F1\": 0.7901840490797546, \"Memory in Mb\": 0.0820188522338867, \"Time in s\": 14.384085 }, { \"step\": 2120, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8310523831996225, \"F1\": 0.7935409457900807, \"Memory in Mb\": 0.0820188522338867, \"Time in s\": 15.721188 }, { \"step\": 2226, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8301123595505618, \"F1\": 0.792535675082327, \"Memory in Mb\": 0.0815153121948242, \"Time in s\": 17.109526000000002 }, { \"step\": 2332, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8301158301158301, \"F1\": 0.792887029288703, \"Memory in Mb\": 0.0820188522338867, \"Time in s\": 18.546805000000003 }, { \"step\": 2438, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8297086581862946, \"F1\": 0.7921882824236354, \"Memory in Mb\": 0.0820188522338867, \"Time in s\": 20.032509 }, { \"step\": 2544, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8297286669288242, \"F1\": 0.7933174224343675, \"Memory in Mb\": 0.0815153121948242, \"Time in s\": 21.566602 }, { \"step\": 2650, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8289920724801813, \"F1\": 0.7932450935645824, \"Memory in Mb\": 0.0820188522338867, \"Time in s\": 23.149486 }, { \"step\": 2756, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8294010889292196, \"F1\": 0.7942206654991244, \"Memory in Mb\": 0.0820188522338867, \"Time in s\": 24.780488 }, { \"step\": 2862, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8304788535477106, \"F1\": 0.7949260042283298, \"Memory in Mb\": 0.0815153121948242, \"Time in s\": 26.460055 }, { \"step\": 2968, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8308055274688237, \"F1\": 0.7944307944307943, \"Memory in Mb\": 0.0820188522338867, \"Time in s\": 28.187982 }, { \"step\": 3074, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8285063455906281, \"F1\": 0.7924379677038204, \"Memory in Mb\": 0.0820188522338867, \"Time in s\": 29.964311 }, { \"step\": 3180, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8307643913180245, \"F1\": 0.7941851568477429, \"Memory in Mb\": 0.0815153121948242, \"Time in s\": 31.788844 }, { \"step\": 3286, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8310502283105022, \"F1\": 0.7939101373932418, \"Memory in Mb\": 0.0820188522338867, \"Time in s\": 33.661829 }, { \"step\": 3392, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8307283987024476, \"F1\": 0.7948534667619728, \"Memory in Mb\": 0.0820188522338867, \"Time in s\": 35.582957 }, { \"step\": 3498, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8301401201029454, \"F1\": 0.7933194154488518, \"Memory in Mb\": 0.0973453521728515, \"Time in s\": 37.553729 }, { \"step\": 3604, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8320843741326672, \"F1\": 0.7952622673434856, \"Memory in Mb\": 0.097848892211914, \"Time in s\": 39.573419 }, { \"step\": 3710, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8306821245618765, \"F1\": 0.7939632545931758, \"Memory in Mb\": 0.0973453521728515, \"Time in s\": 41.642476 }, { \"step\": 3816, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8311926605504587, \"F1\": 0.794904458598726, \"Memory in Mb\": 0.0973453521728515, \"Time in s\": 43.760553 }, { \"step\": 3922, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Bananas\", \"Accuracy\": 0.831165519000255, \"F1\": 0.7945375543140905, \"Memory in Mb\": 0.1073513031005859, \"Time in s\": 45.928839 }, { \"step\": 4028, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8301465110504097, \"F1\": 0.7932285368802902, \"Memory in Mb\": 0.1130123138427734, \"Time in s\": 48.146786 }, { \"step\": 4134, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8296636825550447, \"F1\": 0.7929411764705883, \"Memory in Mb\": 0.1130123138427734, \"Time in s\": 50.413841 }, { \"step\": 4240, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8310922387355508, \"F1\": 0.7955454026270702, \"Memory in Mb\": 0.1135158538818359, \"Time in s\": 52.731009 }, { \"step\": 4346, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8317606444188723, \"F1\": 0.7965488449763429, \"Memory in Mb\": 0.1130123138427734, \"Time in s\": 55.097904 }, { \"step\": 4452, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8312738710402157, \"F1\": 0.7955349850258643, \"Memory in Mb\": 0.1130123138427734, \"Time in s\": 57.514886 }, { \"step\": 4558, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8294930875576036, \"F1\": 0.7937350676931244, \"Memory in Mb\": 0.1135158538818359, \"Time in s\": 59.981717 }, { \"step\": 4664, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8286510829937809, \"F1\": 0.7933798810447376, \"Memory in Mb\": 0.1130123138427734, \"Time in s\": 62.498268 }, { \"step\": 4770, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8280561962675613, \"F1\": 0.7922998986828774, \"Memory in Mb\": 0.1229228973388671, \"Time in s\": 65.06543599999999 }, { \"step\": 4876, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8274871794871795, \"F1\": 0.7908480477493159, \"Memory in Mb\": 0.1229686737060546, \"Time in s\": 67.68219599999999 }, { \"step\": 4982, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8285484842401124, \"F1\": 0.7928190198932558, \"Memory in Mb\": 0.1224651336669921, \"Time in s\": 70.34849499999999 }, { \"step\": 5088, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8287792412030667, \"F1\": 0.7930624851508671, \"Memory in Mb\": 0.1229686737060546, \"Time in s\": 73.06406199999998 }, { \"step\": 5194, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8297708453687657, \"F1\": 0.7943229409027454, \"Memory in Mb\": 0.1229686737060546, \"Time in s\": 75.82921599999997 }, { \"step\": 5300, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Bananas\", \"Accuracy\": 0.8301566333270428, \"F1\": 0.7949886104783599, \"Memory in Mb\": 0.1224651336669921, \"Time in s\": 78.64422599999997 }, { \"step\": 906, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8806629834254144, \"F1\": 0.8820960698689956, \"Memory in Mb\": 0.2990808486938476, \"Time in s\": 0.533765 }, { \"step\": 1812, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8901159580342353, \"F1\": 0.8654496281271129, \"Memory in Mb\": 0.3327569961547851, \"Time in s\": 1.6121 }, { \"step\": 2718, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8884799411115201, \"F1\": 0.8614540466392318, \"Memory in Mb\": 0.3573274612426758, \"Time in s\": 3.262924 }, { \"step\": 3624, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8948385316036434, \"F1\": 0.8697435897435898, \"Memory in Mb\": 0.3568239212036133, \"Time in s\": 5.47711 }, { \"step\": 4530, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8922499448001766, \"F1\": 0.8587145338737695, \"Memory in Mb\": 0.3567705154418945, \"Time in s\": 8.248427 }, { \"step\": 5436, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8833486660533578, \"F1\": 0.846116504854369, \"Memory in Mb\": 0.3567705154418945, \"Time in s\": 11.585141 }, { \"step\": 6342, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8836145718340955, \"F1\": 0.8482730263157895, \"Memory in Mb\": 0.357274055480957, \"Time in s\": 15.487845 }, { \"step\": 7248, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8802263005381538, \"F1\": 0.8418367346938775, \"Memory in Mb\": 0.357274055480957, \"Time in s\": 19.950209 }, { \"step\": 8154, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8828652029927634, \"F1\": 0.8526461965746027, \"Memory in Mb\": 0.357274055480957, \"Time in s\": 24.978187 }, { \"step\": 9060, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8872944033557788, \"F1\": 0.8620456695041211, \"Memory in Mb\": 0.4238061904907226, \"Time in s\": 30.568141 }, { \"step\": 9966, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8887104867034621, \"F1\": 0.8668187822745287, \"Memory in Mb\": 0.4239206314086914, \"Time in s\": 36.726803 }, { \"step\": 10872, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8916383037439058, \"F1\": 0.8724003466204506, \"Memory in Mb\": 0.4239206314086914, \"Time in s\": 43.450649000000006 }, { \"step\": 11778, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8908890209730831, \"F1\": 0.871229582122457, \"Memory in Mb\": 0.508366584777832, \"Time in s\": 50.737672 }, { \"step\": 12684, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8903256327367343, \"F1\": 0.8711917770163904, \"Memory in Mb\": 0.508366584777832, \"Time in s\": 58.59177 }, { \"step\": 13590, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8921186253587461, \"F1\": 0.8751702997275205, \"Memory in Mb\": 0.508366584777832, \"Time in s\": 67.02087300000001 }, { \"step\": 14496, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8925146602276647, \"F1\": 0.8765842839036756, \"Memory in Mb\": 0.5096101760864258, \"Time in s\": 76.01746500000002 }, { \"step\": 15402, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8924095837932602, \"F1\": 0.875628612174435, \"Memory in Mb\": 0.5091333389282227, \"Time in s\": 85.58361400000001 }, { \"step\": 16308, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8862451707855522, \"F1\": 0.8668437298112124, \"Memory in Mb\": 0.5348939895629883, \"Time in s\": 95.717046 }, { \"step\": 17214, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Elec2\", \"Accuracy\": 0.882646836693197, \"F1\": 0.8594880356149137, \"Memory in Mb\": 0.5348939895629883, \"Time in s\": 106.420525 }, { \"step\": 18120, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Elec2\", \"Accuracy\": 0.882664606214471, \"F1\": 0.8596143687268886, \"Memory in Mb\": 0.5429277420043945, \"Time in s\": 117.69460800000002 }, { \"step\": 19026, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8833114323258869, \"F1\": 0.858634742740703, \"Memory in Mb\": 0.5442209243774414, \"Time in s\": 129.542011 }, { \"step\": 19932, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8807385479905675, \"F1\": 0.8562095457020145, \"Memory in Mb\": 0.6346635818481445, \"Time in s\": 141.970262 }, { \"step\": 20838, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8784853865719633, \"F1\": 0.8535739070090216, \"Memory in Mb\": 0.6924257278442383, \"Time in s\": 154.986156 }, { \"step\": 21744, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8788115715402658, \"F1\": 0.8517414055027289, \"Memory in Mb\": 0.7508554458618164, \"Time in s\": 168.584532 }, { \"step\": 22650, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8773014261115281, \"F1\": 0.8483988871310894, \"Memory in Mb\": 0.7274637222290039, \"Time in s\": 182.766068 }, { \"step\": 23556, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8728507747824241, \"F1\": 0.8417102690132656, \"Memory in Mb\": 0.7538461685180664, \"Time in s\": 197.535569 }, { \"step\": 24462, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8720820898573239, \"F1\": 0.8400224960376297, \"Memory in Mb\": 0.7539834976196289, \"Time in s\": 212.893707 }, { \"step\": 25368, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8691607206212796, \"F1\": 0.8364944085915562, \"Memory in Mb\": 0.7539834976196289, \"Time in s\": 228.841108 }, { \"step\": 26274, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8692954744414417, \"F1\": 0.8360233024543979, \"Memory in Mb\": 0.7539834976196289, \"Time in s\": 245.378297 }, { \"step\": 27180, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8690901063320946, \"F1\": 0.8360217531569729, \"Memory in Mb\": 0.7534799575805664, \"Time in s\": 262.499248 }, { \"step\": 28086, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8664055545664946, \"F1\": 0.8316280739544067, \"Memory in Mb\": 0.7535486221313477, \"Time in s\": 280.20757899999995 }, { \"step\": 28992, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8639232865372012, \"F1\": 0.826859776168532, \"Memory in Mb\": 0.7540521621704102, \"Time in s\": 298.510007 }, { \"step\": 29898, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Elec2\", \"Accuracy\": 0.86353145800582, \"F1\": 0.8261017816042963, \"Memory in Mb\": 0.8122949600219727, \"Time in s\": 317.403419 }, { \"step\": 30804, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8633899295523163, \"F1\": 0.8248272416951128, \"Memory in Mb\": 0.8122949600219727, \"Time in s\": 336.895126 }, { \"step\": 31710, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8610804503453279, \"F1\": 0.8213199204964914, \"Memory in Mb\": 0.8808259963989258, \"Time in s\": 356.985514 }, { \"step\": 32616, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8589299402115591, \"F1\": 0.8183648493940232, \"Memory in Mb\": 0.9061365127563475, \"Time in s\": 377.673556 }, { \"step\": 33522, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8585961039348469, \"F1\": 0.8181399631675875, \"Memory in Mb\": 0.9644479751586914, \"Time in s\": 398.957872 }, { \"step\": 34428, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8569436779272083, \"F1\": 0.8155499794015206, \"Memory in Mb\": 0.989060401916504, \"Time in s\": 420.833858 }, { \"step\": 35334, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8570741233407863, \"F1\": 0.8144610184436769, \"Memory in Mb\": 1.055558204650879, \"Time in s\": 443.304179 }, { \"step\": 36240, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8577223433317697, \"F1\": 0.8140239503679124, \"Memory in Mb\": 1.0826387405395508, \"Time in s\": 466.364607 }, { \"step\": 37146, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8573966886525778, \"F1\": 0.812978851110405, \"Memory in Mb\": 1.1978578567504885, \"Time in s\": 490.019084 }, { \"step\": 38052, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8572967858926178, \"F1\": 0.8125388386384037, \"Memory in Mb\": 1.198460578918457, \"Time in s\": 514.261258 }, { \"step\": 38958, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8578432630849399, \"F1\": 0.8141610738255034, \"Memory in Mb\": 1.198460578918457, \"Time in s\": 539.085152 }, { \"step\": 39864, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8585906730552141, \"F1\": 0.816998344317112, \"Memory in Mb\": 1.232090950012207, \"Time in s\": 564.495213 }, { \"step\": 40770, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8591822217861611, \"F1\": 0.8196298972635019, \"Memory in Mb\": 1.2325944900512695, \"Time in s\": 590.495221 }, { \"step\": 41676, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Elec2\", \"Accuracy\": 0.859868026394721, \"F1\": 0.8219620754832023, \"Memory in Mb\": 1.2570466995239258, \"Time in s\": 617.0763880000001 }, { \"step\": 42582, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8602663159625185, \"F1\": 0.82283230109576, \"Memory in Mb\": 1.2570466995239258, \"Time in s\": 644.2465100000001 }, { \"step\": 43488, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8601651068135305, \"F1\": 0.8228301721877459, \"Memory in Mb\": 1.2570466995239258, \"Time in s\": 671.9994270000001 }, { \"step\": 44394, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8584912035681301, \"F1\": 0.8195968066165068, \"Memory in Mb\": 1.2565431594848633, \"Time in s\": 700.3413830000001 }, { \"step\": 45300, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8588710567562198, \"F1\": 0.8202547305086175, \"Memory in Mb\": 1.3147859573364258, \"Time in s\": 729.273758 }, { \"step\": 25, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Phishing\", \"Accuracy\": 0.5833333333333334, \"F1\": 0.7058823529411764, \"Memory in Mb\": 0.1548337936401367, \"Time in s\": 0.027523 }, { \"step\": 50, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Phishing\", \"Accuracy\": 0.7346938775510204, \"F1\": 0.7636363636363637, \"Memory in Mb\": 0.1710462570190429, \"Time in s\": 0.082708 }, { \"step\": 75, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Phishing\", \"Accuracy\": 0.7837837837837838, \"F1\": 0.8048780487804877, \"Memory in Mb\": 0.1877622604370117, \"Time in s\": 0.17051 }, { \"step\": 100, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8080808080808081, \"F1\": 0.819047619047619, \"Memory in Mb\": 0.2039480209350586, \"Time in s\": 0.296263 }, { \"step\": 125, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8145161290322581, \"F1\": 0.8217054263565893, \"Memory in Mb\": 0.2043638229370117, \"Time in s\": 0.463136 }, { \"step\": 150, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8187919463087249, \"F1\": 0.830188679245283, \"Memory in Mb\": 0.2048673629760742, \"Time in s\": 0.670871 }, { \"step\": 175, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8390804597701149, \"F1\": 0.8390804597701148, \"Memory in Mb\": 0.2043638229370117, \"Time in s\": 0.919601 }, { \"step\": 200, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8391959798994975, \"F1\": 0.8383838383838383, \"Memory in Mb\": 0.2048635482788086, \"Time in s\": 1.21153 }, { \"step\": 225, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8348214285714286, \"F1\": 0.8294930875576038, \"Memory in Mb\": 0.2048902511596679, \"Time in s\": 1.544633 }, { \"step\": 250, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8353413654618473, \"F1\": 0.8298755186721991, \"Memory in Mb\": 0.2043867111206054, \"Time in s\": 1.91889 }, { \"step\": 275, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8357664233576643, \"F1\": 0.8288973384030419, \"Memory in Mb\": 0.2053709030151367, \"Time in s\": 2.334116 }, { \"step\": 300, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8394648829431438, \"F1\": 0.8285714285714285, \"Memory in Mb\": 0.2048673629760742, \"Time in s\": 2.790538 }, { \"step\": 325, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8487654320987654, \"F1\": 0.8338983050847458, \"Memory in Mb\": 0.2053709030151367, \"Time in s\": 3.287923 }, { \"step\": 350, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8538681948424068, \"F1\": 0.8360128617363344, \"Memory in Mb\": 0.2053709030151367, \"Time in s\": 3.826274 }, { \"step\": 375, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8529411764705882, \"F1\": 0.8318042813455658, \"Memory in Mb\": 0.2048673629760742, \"Time in s\": 4.40584 }, { \"step\": 400, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8546365914786967, \"F1\": 0.8313953488372093, \"Memory in Mb\": 0.2053709030151367, \"Time in s\": 5.028368 }, { \"step\": 425, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8561320754716981, \"F1\": 0.8291316526610645, \"Memory in Mb\": 0.2048673629760742, \"Time in s\": 5.69196 }, { \"step\": 450, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8596881959910914, \"F1\": 0.8310991957104559, \"Memory in Mb\": 0.2053709030151367, \"Time in s\": 6.39676 }, { \"step\": 475, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8586497890295358, \"F1\": 0.8312342569269521, \"Memory in Mb\": 0.2053709030151367, \"Time in s\": 7.142548 }, { \"step\": 500, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8597194388777555, \"F1\": 0.835680751173709, \"Memory in Mb\": 0.2048673629760742, \"Time in s\": 7.929462 }, { \"step\": 525, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8606870229007634, \"F1\": 0.8337129840546698, \"Memory in Mb\": 0.2053709030151367, \"Time in s\": 8.757545 }, { \"step\": 550, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8615664845173042, \"F1\": 0.8362068965517241, \"Memory in Mb\": 0.2048673629760742, \"Time in s\": 9.62628 }, { \"step\": 575, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8641114982578397, \"F1\": 0.8388429752066116, \"Memory in Mb\": 0.2048673629760742, \"Time in s\": 10.535573 }, { \"step\": 600, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8664440734557596, \"F1\": 0.8387096774193549, \"Memory in Mb\": 0.2053709030151367, \"Time in s\": 11.487542 }, { \"step\": 625, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8669871794871795, \"F1\": 0.8362919132149902, \"Memory in Mb\": 0.2048673629760742, \"Time in s\": 12.480171000000002 }, { \"step\": 650, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8705701078582434, \"F1\": 0.8432835820895523, \"Memory in Mb\": 0.2053709030151367, \"Time in s\": 13.513483000000004 }, { \"step\": 675, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8724035608308606, \"F1\": 0.8485915492957745, \"Memory in Mb\": 0.2048673629760742, \"Time in s\": 14.588001000000002 }, { \"step\": 700, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Phishing\", \"Accuracy\": 0.876967095851216, \"F1\": 0.8522336769759451, \"Memory in Mb\": 0.2048673629760742, \"Time in s\": 15.703553000000005 }, { \"step\": 725, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8784530386740331, \"F1\": 0.8562091503267973, \"Memory in Mb\": 0.2053709030151367, \"Time in s\": 16.863107000000003 }, { \"step\": 750, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8785046728971962, \"F1\": 0.8571428571428572, \"Memory in Mb\": 0.2048673629760742, \"Time in s\": 18.064417 }, { \"step\": 775, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8785529715762274, \"F1\": 0.8567073170731707, \"Memory in Mb\": 0.2053709030151367, \"Time in s\": 19.306597000000004 }, { \"step\": 800, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8785982478097623, \"F1\": 0.8583941605839417, \"Memory in Mb\": 0.1428241729736328, \"Time in s\": 20.591420000000003 }, { \"step\": 825, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8786407766990292, \"F1\": 0.8595505617977528, \"Memory in Mb\": 0.2701892852783203, \"Time in s\": 21.920167000000003 }, { \"step\": 850, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8798586572438163, \"F1\": 0.8602739726027396, \"Memory in Mb\": 0.2707157135009765, \"Time in s\": 23.289893000000003 }, { \"step\": 875, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8832951945080092, \"F1\": 0.8636363636363635, \"Memory in Mb\": 0.270212173461914, \"Time in s\": 24.700235000000003 }, { \"step\": 900, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Phishing\", \"Accuracy\": 0.882091212458287, \"F1\": 0.8619791666666667, \"Memory in Mb\": 0.2707386016845703, \"Time in s\": 26.151721 }, { \"step\": 925, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8841991341991342, \"F1\": 0.8657465495608533, \"Memory in Mb\": 0.2707386016845703, \"Time in s\": 27.644342 }, { \"step\": 950, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8840885142255005, \"F1\": 0.8671497584541062, \"Memory in Mb\": 0.2702350616455078, \"Time in s\": 29.178064000000003 }, { \"step\": 975, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8860369609856262, \"F1\": 0.8692579505300354, \"Memory in Mb\": 0.2707386016845703, \"Time in s\": 30.753129 }, { \"step\": 1000, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8868868868868869, \"F1\": 0.870264064293915, \"Memory in Mb\": 0.2702350616455078, \"Time in s\": 32.369334 }, { \"step\": 1025, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Phishing\", \"Accuracy\": 0.88671875, \"F1\": 0.8705357142857143, \"Memory in Mb\": 0.2707386016845703, \"Time in s\": 34.02633 }, { \"step\": 1050, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Phishing\", \"Accuracy\": 0.888465204957102, \"F1\": 0.8729641693811074, \"Memory in Mb\": 0.2707386016845703, \"Time in s\": 35.724225000000004 }, { \"step\": 1075, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8873370577281192, \"F1\": 0.8727655099894847, \"Memory in Mb\": 0.2702350616455078, \"Time in s\": 37.462729 }, { \"step\": 1100, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8889899909008189, \"F1\": 0.8747433264887065, \"Memory in Mb\": 0.2707386016845703, \"Time in s\": 39.24172900000001 }, { \"step\": 1125, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8905693950177936, \"F1\": 0.8776119402985074, \"Memory in Mb\": 0.2702350616455078, \"Time in s\": 41.061817000000005 }, { \"step\": 1150, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8912097476066144, \"F1\": 0.8780487804878049, \"Memory in Mb\": 0.2702350616455078, \"Time in s\": 42.922291 }, { \"step\": 1175, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8901192504258943, \"F1\": 0.8765550239234451, \"Memory in Mb\": 0.2707386016845703, \"Time in s\": 44.822872 }, { \"step\": 1200, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8915763135946623, \"F1\": 0.8778195488721804, \"Memory in Mb\": 0.2702350616455078, \"Time in s\": 46.765498 }, { \"step\": 1225, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8913398692810458, \"F1\": 0.8774193548387096, \"Memory in Mb\": 0.2707386016845703, \"Time in s\": 48.748369 }, { \"step\": 1250, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"Phishing\", \"Accuracy\": 0.8903122497998399, \"F1\": 0.8769092542677449, \"Memory in Mb\": 0.2707386016845703, \"Time in s\": 50.77338400000001 }, { \"step\": 1903, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.0777397155761718, \"Time in s\": 1.484811 }, { \"step\": 3806, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.0782432556152343, \"Time in s\": 4.457186 }, { \"step\": 5709, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.0782432556152343, \"Time in s\": 8.746386000000001 }, { \"step\": 7612, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.0777397155761718, \"Time in s\": 13.887875 }, { \"step\": 9515, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.0777397155761718, \"Time in s\": 19.865436000000003 }, { \"step\": 11418, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.0782432556152343, \"Time in s\": 26.640571 }, { \"step\": 13321, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.0782432556152343, \"Time in s\": 34.167751 }, { \"step\": 15224, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998029297773108, \"F1\": 0.8421052631578948, \"Memory in Mb\": 0.1108655929565429, \"Time in s\": 42.434428 }, { \"step\": 17127, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998248277472848, \"F1\": 0.8695652173913044, \"Memory in Mb\": 0.1108655929565429, \"Time in s\": 51.484422 }, { \"step\": 19030, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998423458931104, \"F1\": 0.8695652173913044, \"Memory in Mb\": 0.1113691329956054, \"Time in s\": 61.31819 }, { \"step\": 20933, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998566787693484, \"F1\": 0.8695652173913044, \"Memory in Mb\": 0.1113691329956054, \"Time in s\": 71.93436700000001 }, { \"step\": 22836, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999868622728268, \"F1\": 0.8695652173913044, \"Memory in Mb\": 0.1108655929565429, \"Time in s\": 83.32890400000001 }, { \"step\": 24739, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998787290807664, \"F1\": 0.8695652173913044, \"Memory in Mb\": 0.1108655929565429, \"Time in s\": 95.504963 }, { \"step\": 26642, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998873916144289, \"F1\": 0.88, \"Memory in Mb\": 0.1113920211791992, \"Time in s\": 108.464893 }, { \"step\": 28545, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999894899103139, \"F1\": 0.88, \"Memory in Mb\": 0.1113920211791992, \"Time in s\": 122.206969 }, { \"step\": 30448, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9999014681249384, \"F1\": 0.88, \"Memory in Mb\": 0.1108884811401367, \"Time in s\": 136.730455 }, { \"step\": 32351, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9999072642967544, \"F1\": 0.88, \"Memory in Mb\": 0.1113920211791992, \"Time in s\": 152.048808 }, { \"step\": 34254, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9999124164306776, \"F1\": 0.88, \"Memory in Mb\": 0.1113920211791992, \"Time in s\": 168.149805 }, { \"step\": 36157, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9999170262197146, \"F1\": 0.88, \"Memory in Mb\": 0.1108884811401367, \"Time in s\": 185.035032 }, { \"step\": 38060, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9999211750177356, \"F1\": 0.88, \"Memory in Mb\": 0.1028890609741211, \"Time in s\": 202.707805 }, { \"step\": 39963, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999924928682248, \"F1\": 0.88, \"Memory in Mb\": 0.1033926010131836, \"Time in s\": 221.166297 }, { \"step\": 41866, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9999283410963812, \"F1\": 0.88, \"Memory in Mb\": 0.1033926010131836, \"Time in s\": 240.410098 }, { \"step\": 43769, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999931456772071, \"F1\": 0.88, \"Memory in Mb\": 0.1028890609741211, \"Time in s\": 260.439908 }, { \"step\": 45672, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9999343128024348, \"F1\": 0.88, \"Memory in Mb\": 0.1028890609741211, \"Time in s\": 281.255775 }, { \"step\": 47575, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9999369403455668, \"F1\": 0.88, \"Memory in Mb\": 0.1155691146850586, \"Time in s\": 302.85620600000004 }, { \"step\": 49478, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9999393657659116, \"F1\": 0.88, \"Memory in Mb\": 0.1155691146850586, \"Time in s\": 325.23865100000006 }, { \"step\": 51381, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9999221486959906, \"F1\": 0.8666666666666666, \"Memory in Mb\": 0.1243124008178711, \"Time in s\": 348.4004960000001 }, { \"step\": 53284, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9999249291518872, \"F1\": 0.8666666666666666, \"Memory in Mb\": 0.1243124008178711, \"Time in s\": 372.3442280000001 }, { \"step\": 55187, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9999275178487298, \"F1\": 0.8666666666666666, \"Memory in Mb\": 0.1248159408569336, \"Time in s\": 397.0676850000001 }, { \"step\": 57090, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998073183975896, \"F1\": 0.7317073170731707, \"Memory in Mb\": 0.1517705917358398, \"Time in s\": 422.5782570000001 }, { \"step\": 58993, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998135340385136, \"F1\": 0.7317073170731707, \"Memory in Mb\": 0.1512670516967773, \"Time in s\": 448.87363900000014 }, { \"step\": 60896, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998193611955004, \"F1\": 0.7317073170731707, \"Memory in Mb\": 0.1517705917358398, \"Time in s\": 475.95484300000015 }, { \"step\": 62799, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997929870378036, \"F1\": 0.6976744186046512, \"Memory in Mb\": 0.1610174179077148, \"Time in s\": 503.8239590000002 }, { \"step\": 64702, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997990757484428, \"F1\": 0.6976744186046512, \"Memory in Mb\": 0.1605138778686523, \"Time in s\": 532.4818360000002 }, { \"step\": 66605, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998048165275358, \"F1\": 0.6976744186046512, \"Memory in Mb\": 0.1605138778686523, \"Time in s\": 561.9301430000002 }, { \"step\": 68508, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998102383698017, \"F1\": 0.7234042553191489, \"Memory in Mb\": 0.1610174179077148, \"Time in s\": 592.1790100000002 }, { \"step\": 70411, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"SMTP\", \"Accuracy\": 0.99981536713535, \"F1\": 0.7234042553191489, \"Memory in Mb\": 0.1610174179077148, \"Time in s\": 623.2264690000002 }, { \"step\": 72314, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998202259621368, \"F1\": 0.7234042553191489, \"Memory in Mb\": 0.1605138778686523, \"Time in s\": 655.0760590000002 }, { \"step\": 74217, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998113614314972, \"F1\": 0.7083333333333333, \"Memory in Mb\": 0.1605138778686523, \"Time in s\": 687.7245370000002 }, { \"step\": 76120, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999816077457665, \"F1\": 0.7083333333333333, \"Memory in Mb\": 0.1610174179077148, \"Time in s\": 721.1718600000002 }, { \"step\": 78023, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998205634308271, \"F1\": 0.7083333333333333, \"Memory in Mb\": 0.1610174179077148, \"Time in s\": 755.4176530000002 }, { \"step\": 79926, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998248357835472, \"F1\": 0.7083333333333333, \"Memory in Mb\": 0.1605138778686523, \"Time in s\": 790.4648560000002 }, { \"step\": 81829, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998289094197584, \"F1\": 0.7083333333333333, \"Memory in Mb\": 0.1605138778686523, \"Time in s\": 826.3125100000002 }, { \"step\": 83732, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998327978884762, \"F1\": 0.7083333333333333, \"Memory in Mb\": 0.1610174179077148, \"Time in s\": 862.9590210000002 }, { \"step\": 85635, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999836513534344, \"F1\": 0.7083333333333333, \"Memory in Mb\": 0.1653127670288086, \"Time in s\": 900.4018990000002 }, { \"step\": 87538, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9998400676285456, \"F1\": 0.7083333333333333, \"Memory in Mb\": 0.1648092269897461, \"Time in s\": 938.635169 }, { \"step\": 89441, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9997875670840788, \"F1\": 0.6415094339622641, \"Memory in Mb\": 0.1745138168334961, \"Time in s\": 977.660251 }, { \"step\": 91344, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999781045071872, \"F1\": 0.6296296296296297, \"Memory in Mb\": 0.1745138168334961, \"Time in s\": 1017.476761 }, { \"step\": 93247, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996782703815712, \"F1\": 0.53125, \"Memory in Mb\": 0.1740102767944336, \"Time in s\": 1058.085363 }, { \"step\": 95150, \"track\": \"Binary classification\", \"model\": \"Voting\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9996847050415664, \"F1\": 0.53125, \"Memory in Mb\": 0.1740102767944336, \"Time in s\": 1099.4855200000002 }, { \"step\": 106, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5333333333333333, \"F1\": 0.5242718446601942, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.003183 }, { \"step\": 212, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5876777251184834, \"F1\": 0.5538461538461539, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.008041 }, { \"step\": 318, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5457413249211357, \"F1\": 0.5102040816326531, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.0146489999999999 }, { \"step\": 424, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5460992907801419, \"F1\": 0.5025906735751295, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.0229629999999999 }, { \"step\": 530, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5671077504725898, \"F1\": 0.5096359743040686, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.032934 }, { \"step\": 636, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5464566929133858, \"F1\": 0.4875444839857651, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.044653 }, { \"step\": 742, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5573549257759784, \"F1\": 0.4875, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.058023 }, { \"step\": 848, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5501770956316411, \"F1\": 0.4816326530612245, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.073029 }, { \"step\": 954, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5487932843651626, \"F1\": 0.4794188861985472, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.08975 }, { \"step\": 1060, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5448536355051936, \"F1\": 0.4679911699779249, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.108101 }, { \"step\": 1166, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Bananas\", \"Accuracy\": 0.534763948497854, \"F1\": 0.4590818363273453, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.128117 }, { \"step\": 1272, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5287175452399685, \"F1\": 0.456935630099728, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.149836 }, { \"step\": 1378, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5286855482933914, \"F1\": 0.4523206751054852, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.173181 }, { \"step\": 1484, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5252865812542145, \"F1\": 0.4491392801251955, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.19815 }, { \"step\": 1590, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5204531151667715, \"F1\": 0.4437956204379563, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.224847 }, { \"step\": 1696, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5227138643067847, \"F1\": 0.4455106237148732, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.253189 }, { \"step\": 1802, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Bananas\", \"Accuracy\": 0.524153248195447, \"F1\": 0.4523961661341854, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.283166 }, { \"step\": 1908, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5233350812794966, \"F1\": 0.456664674237896, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.3148919999999999 }, { \"step\": 2014, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5171385991058122, \"F1\": 0.4563758389261745, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.3482999999999999 }, { \"step\": 2120, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5143935818782445, \"F1\": 0.4581358609794628, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.3833669999999999 }, { \"step\": 2226, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5114606741573033, \"F1\": 0.4545910687405921, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.4201549999999999 }, { \"step\": 2332, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Bananas\", \"Accuracy\": 0.510939510939511, \"F1\": 0.4550669216061185, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.4585819999999999 }, { \"step\": 2438, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5104636848584325, \"F1\": 0.4530032095369097, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.4986469999999999 }, { \"step\": 2544, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5084545812033032, \"F1\": 0.4546247818499127, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.5404629999999999 }, { \"step\": 2650, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5096262740656852, \"F1\": 0.458072590738423, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.5839479999999999 }, { \"step\": 2756, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5092558983666061, \"F1\": 0.4574638844301765, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.6291049999999999 }, { \"step\": 2862, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5103110800419434, \"F1\": 0.4563445867287544, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.6760149999999999 }, { \"step\": 2968, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5133131108864173, \"F1\": 0.457957957957958, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.7245799999999999 }, { \"step\": 3074, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5099251545720794, \"F1\": 0.4563176895306859, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.7747889999999998 }, { \"step\": 3180, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5102233406731677, \"F1\": 0.4538758330410382, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.8267639999999998 }, { \"step\": 3286, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5095890410958904, \"F1\": 0.4522271336280176, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.8803849999999999 }, { \"step\": 3392, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5107637864936597, \"F1\": 0.4558871761233191, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.9357289999999998 }, { \"step\": 3498, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5124392336288247, \"F1\": 0.4557931694861155, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.9927189999999998 }, { \"step\": 3604, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5134610047182903, \"F1\": 0.4544039838157485, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 1.0513469999999998 }, { \"step\": 3710, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5122674575357239, \"F1\": 0.4546276756104914, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 1.1117049999999995 }, { \"step\": 3816, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Bananas\", \"Accuracy\": 0.510615989515072, \"F1\": 0.4536142815335089, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 1.1737269999999995 }, { \"step\": 3922, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5090538128028564, \"F1\": 0.4507845934379457, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 1.2373829999999997 }, { \"step\": 4028, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5108020859200397, \"F1\": 0.452473596442468, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 1.3028159999999998 }, { \"step\": 4134, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5102830873457537, \"F1\": 0.4517876489707476, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 1.3698969999999997 }, { \"step\": 4240, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5102618542108988, \"F1\": 0.4525316455696203, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 1.4386049999999997 }, { \"step\": 4346, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5074798619102416, \"F1\": 0.4490216271884655, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 1.5090079999999997 }, { \"step\": 4452, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5099977533138621, \"F1\": 0.4513207547169811, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 1.5810569999999995 }, { \"step\": 4558, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5099846390168971, \"F1\": 0.4539007092198581, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 1.6547379999999996 }, { \"step\": 4664, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5099721209521767, \"F1\": 0.4553039332538737, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 1.7301709999999997 }, { \"step\": 4770, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5110085971901867, \"F1\": 0.4556489262371615, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 1.807246 }, { \"step\": 4876, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5109743589743589, \"F1\": 0.4539624370132845, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 1.885955 }, { \"step\": 4982, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5099377635013049, \"F1\": 0.453792794808682, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 1.9663889999999995 }, { \"step\": 5088, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5099272655789266, \"F1\": 0.4536489151873767, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 2.0484569999999995 }, { \"step\": 5194, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5097246293086848, \"F1\": 0.4531786941580756, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 2.1321769999999995 }, { \"step\": 5300, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Bananas\", \"Accuracy\": 0.5095301000188714, \"F1\": 0.4529572721532309, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 2.2176369999999994 }, { \"step\": 906, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8530386740331491, \"F1\": 0.8500563697857948, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.021647 }, { \"step\": 1812, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8619547211485368, \"F1\": 0.8287671232876712, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.064308 }, { \"step\": 2718, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8450496871549503, \"F1\": 0.80958842152872, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.127937 }, { \"step\": 3624, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8418437758763456, \"F1\": 0.8056968463886063, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.212647 }, { \"step\": 4530, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8388165157871494, \"F1\": 0.7960893854748604, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.318513 }, { \"step\": 5436, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8413983440662374, \"F1\": 0.7995348837209302, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.445933 }, { \"step\": 6342, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8370919413341744, \"F1\": 0.7958094485076103, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.594503 }, { \"step\": 7248, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8359321098385539, \"F1\": 0.7948231233822259, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.763985 }, { \"step\": 8154, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8352753587636453, \"F1\": 0.8021799970540581, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.954384 }, { \"step\": 9060, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8358538470029805, \"F1\": 0.8069081937410726, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 1.166026 }, { \"step\": 9966, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8372303060712494, \"F1\": 0.8118765947575969, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 1.399073 }, { \"step\": 10872, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8368135406126391, \"F1\": 0.8140461215932915, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 1.653093 }, { \"step\": 11778, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8374798335739153, \"F1\": 0.8150724637681159, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 1.928122 }, { \"step\": 12684, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8384451628163684, \"F1\": 0.8161177420802298, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 2.224333 }, { \"step\": 13590, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Elec2\", \"Accuracy\": 0.842004562513798, \"F1\": 0.8223417459660736, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 2.541433 }, { \"step\": 14496, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8448430493273542, \"F1\": 0.8264794383149447, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 2.879854 }, { \"step\": 15402, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8460489578598792, \"F1\": 0.8270983738058776, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 3.239289 }, { \"step\": 16308, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Elec2\", \"Accuracy\": 0.844851904090268, \"F1\": 0.8251313243019076, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 3.619613 }, { \"step\": 17214, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8443618195549875, \"F1\": 0.8222177981286084, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 4.020822 }, { \"step\": 18120, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8450797505381091, \"F1\": 0.8227792158595871, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 4.443085 }, { \"step\": 19026, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8462023653088042, \"F1\": 0.8224083515416363, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 4.886686 }, { \"step\": 19932, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Elec2\", \"Accuracy\": 0.847523957653906, \"F1\": 0.8255753888538139, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 5.35136 }, { \"step\": 20838, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Elec2\", \"Accuracy\": 0.84661899505687, \"F1\": 0.8249917862227577, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 5.836996 }, { \"step\": 21744, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8452835395299637, \"F1\": 0.8209495422610177, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 6.343734 }, { \"step\": 22650, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8444081416398075, \"F1\": 0.8188733552631579, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 6.871264 }, { \"step\": 23556, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8451284228401613, \"F1\": 0.8194595664654062, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 7.419847 }, { \"step\": 24462, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8464903315481788, \"F1\": 0.8198781599270878, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 7.989367 }, { \"step\": 25368, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8462963692986951, \"F1\": 0.8199492034172247, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 8.579944 }, { \"step\": 26274, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8477524454763445, \"F1\": 0.8213168944876262, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 9.191269 }, { \"step\": 27180, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8495529636851982, \"F1\": 0.8240457851026293, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 9.823483 }, { \"step\": 28086, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8509880719245149, \"F1\": 0.825107610012955, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 10.476909 }, { \"step\": 28992, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8521265220240765, \"F1\": 0.8258237516759436, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 11.151512 }, { \"step\": 29898, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8531959728400843, \"F1\": 0.8268160833366216, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 11.847222 }, { \"step\": 30804, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8537480115573158, \"F1\": 0.8267107743201139, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 12.564065 }, { \"step\": 31710, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8530385694913116, \"F1\": 0.8259895444361464, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 13.301932 }, { \"step\": 32616, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8536869538555879, \"F1\": 0.8269760696156635, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 14.060873999999998 }, { \"step\": 33522, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8541511291429253, \"F1\": 0.8276032300151628, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 14.840823 }, { \"step\": 34428, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8549684840386905, \"F1\": 0.8286724084685859, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 15.641785 }, { \"step\": 35334, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8555175048821215, \"F1\": 0.8284321962695346, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 16.463894999999997 }, { \"step\": 36240, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8545213720025387, \"F1\": 0.8259146744155329, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 17.307088999999998 }, { \"step\": 37146, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Elec2\", \"Accuracy\": 0.854354556467896, \"F1\": 0.8252696854208386, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 18.171425 }, { \"step\": 38052, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8545636119944285, \"F1\": 0.8247736052181622, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 19.056747 }, { \"step\": 38958, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8548142824139435, \"F1\": 0.8254213223038459, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 19.962946 }, { \"step\": 39864, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8546521837292728, \"F1\": 0.8262981172802495, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 20.890172 }, { \"step\": 40770, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8540067207927592, \"F1\": 0.8267652366261132, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 21.838151 }, { \"step\": 41676, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8537012597480504, \"F1\": 0.8274320002264302, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 22.807005 }, { \"step\": 42582, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8536201592259458, \"F1\": 0.8277177368086459, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 23.796527 }, { \"step\": 43488, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Elec2\", \"Accuracy\": 0.853473451836181, \"F1\": 0.8276626818845675, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 24.806653 }, { \"step\": 44394, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8533777847858897, \"F1\": 0.8271686890948196, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 25.837451 }, { \"step\": 45300, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Elec2\", \"Accuracy\": 0.8533521711296055, \"F1\": 0.8273155007928462, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 26.888684 }, { \"step\": 25, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Phishing\", \"Accuracy\": 0.625, \"F1\": 0.64, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.002343 }, { \"step\": 50, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Phishing\", \"Accuracy\": 0.6530612244897959, \"F1\": 0.6222222222222223, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.006017 }, { \"step\": 75, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Phishing\", \"Accuracy\": 0.5675675675675675, \"F1\": 0.5555555555555556, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.0109819999999999 }, { \"step\": 100, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Phishing\", \"Accuracy\": 0.5555555555555556, \"F1\": 0.5416666666666666, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.017228 }, { \"step\": 125, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Phishing\", \"Accuracy\": 0.5241935483870968, \"F1\": 0.5123966942148761, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.024779 }, { \"step\": 150, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Phishing\", \"Accuracy\": 0.5234899328859061, \"F1\": 0.5298013245033113, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.033621 }, { \"step\": 175, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Phishing\", \"Accuracy\": 0.5229885057471264, \"F1\": 0.496969696969697, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.043754 }, { \"step\": 200, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Phishing\", \"Accuracy\": 0.507537688442211, \"F1\": 0.4787234042553192, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.055183 }, { \"step\": 225, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Phishing\", \"Accuracy\": 0.5, \"F1\": 0.4509803921568627, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.067905 }, { \"step\": 250, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Phishing\", \"Accuracy\": 0.5180722891566265, \"F1\": 0.4782608695652174, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.082 }, { \"step\": 275, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Phishing\", \"Accuracy\": 0.5218978102189781, \"F1\": 0.4738955823293172, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.097411 }, { \"step\": 300, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Phishing\", \"Accuracy\": 0.5217391304347826, \"F1\": 0.460377358490566, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.114126 }, { \"step\": 325, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Phishing\", \"Accuracy\": 0.5216049382716049, \"F1\": 0.4483985765124554, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.132158 }, { \"step\": 350, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Phishing\", \"Accuracy\": 0.5329512893982808, \"F1\": 0.4511784511784511, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.151512 }, { \"step\": 375, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Phishing\", \"Accuracy\": 0.5267379679144385, \"F1\": 0.4380952380952381, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.172161 }, { \"step\": 400, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Phishing\", \"Accuracy\": 0.5263157894736842, \"F1\": 0.4324324324324324, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.194113 }, { \"step\": 425, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Phishing\", \"Accuracy\": 0.5424528301886793, \"F1\": 0.436046511627907, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.217363 }, { \"step\": 450, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Phishing\", \"Accuracy\": 0.5367483296213809, \"F1\": 0.4222222222222222, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.241916 }, { \"step\": 475, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Phishing\", \"Accuracy\": 0.5358649789029536, \"F1\": 0.4329896907216494, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.267769 }, { \"step\": 500, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Phishing\", \"Accuracy\": 0.5370741482965932, \"F1\": 0.4460431654676259, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.2950149999999999 }, { \"step\": 525, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Phishing\", \"Accuracy\": 0.5400763358778626, \"F1\": 0.4382284382284382, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.323563 }, { \"step\": 550, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Phishing\", \"Accuracy\": 0.5391621129326047, \"F1\": 0.4415011037527593, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.3534009999999999 }, { \"step\": 575, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Phishing\", \"Accuracy\": 0.5418118466898955, \"F1\": 0.4416135881104034, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.384549 }, { \"step\": 600, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Phishing\", \"Accuracy\": 0.5509181969949917, \"F1\": 0.443064182194617, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.4169989999999999 }, { \"step\": 625, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Phishing\", \"Accuracy\": 0.5560897435897436, \"F1\": 0.4358452138492871, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.4507429999999999 }, { \"step\": 650, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Phishing\", \"Accuracy\": 0.551617873651772, \"F1\": 0.4393063583815029, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.4857869999999999 }, { \"step\": 675, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Phishing\", \"Accuracy\": 0.5459940652818991, \"F1\": 0.4436363636363636, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.522164 }, { \"step\": 700, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Phishing\", \"Accuracy\": 0.5464949928469242, \"F1\": 0.4389380530973452, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.55984 }, { \"step\": 725, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Phishing\", \"Accuracy\": 0.5441988950276243, \"F1\": 0.4463087248322148, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.598852 }, { \"step\": 750, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Phishing\", \"Accuracy\": 0.5367156208277704, \"F1\": 0.4412238325281803, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.639119 }, { \"step\": 775, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Phishing\", \"Accuracy\": 0.5310077519379846, \"F1\": 0.4336973478939157, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.680612 }, { \"step\": 800, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Phishing\", \"Accuracy\": 0.5294117647058824, \"F1\": 0.4388059701492537, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.723331 }, { \"step\": 825, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Phishing\", \"Accuracy\": 0.5266990291262136, \"F1\": 0.4396551724137931, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.7672859999999999 }, { \"step\": 850, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Phishing\", \"Accuracy\": 0.5241460541813898, \"F1\": 0.4341736694677871, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.812452 }, { \"step\": 875, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Phishing\", \"Accuracy\": 0.522883295194508, \"F1\": 0.4311050477489768, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.858842 }, { \"step\": 900, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Phishing\", \"Accuracy\": 0.5272525027808677, \"F1\": 0.4340878828229028, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.906455 }, { \"step\": 925, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Phishing\", \"Accuracy\": 0.5227272727272727, \"F1\": 0.4338896020539153, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 0.955289 }, { \"step\": 950, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Phishing\", \"Accuracy\": 0.5205479452054794, \"F1\": 0.438964241676942, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 1.005349 }, { \"step\": 975, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Phishing\", \"Accuracy\": 0.5174537987679672, \"F1\": 0.4337349397590361, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 1.056718 }, { \"step\": 1000, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Phishing\", \"Accuracy\": 0.5185185185185185, \"F1\": 0.4361078546307151, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 1.109312 }, { \"step\": 1025, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Phishing\", \"Accuracy\": 0.517578125, \"F1\": 0.4386363636363636, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 1.1631 }, { \"step\": 1050, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Phishing\", \"Accuracy\": 0.5138226882745471, \"F1\": 0.4370860927152318, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 1.218179 }, { \"step\": 1075, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Phishing\", \"Accuracy\": 0.5111731843575419, \"F1\": 0.4372990353697749, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 1.274568 }, { \"step\": 1100, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Phishing\", \"Accuracy\": 0.5122838944494995, \"F1\": 0.4393305439330544, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 1.332245 }, { \"step\": 1125, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Phishing\", \"Accuracy\": 0.5124555160142349, \"F1\": 0.4453441295546558, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 1.391212 }, { \"step\": 1150, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Phishing\", \"Accuracy\": 0.5143603133159269, \"F1\": 0.4464285714285714, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 1.451489 }, { \"step\": 1175, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Phishing\", \"Accuracy\": 0.5187393526405452, \"F1\": 0.4509232264334305, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 1.513052 }, { \"step\": 1200, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Phishing\", \"Accuracy\": 0.5187656380316931, \"F1\": 0.448901623686724, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 1.5759610000000002 }, { \"step\": 1225, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Phishing\", \"Accuracy\": 0.5171568627450981, \"F1\": 0.4471468662301216, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 1.640178 }, { \"step\": 1250, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Phishing\", \"Accuracy\": 0.5156124899919936, \"F1\": 0.4474885844748858, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 1.705799 }, { \"step\": 1903, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.0004835128784179, \"Time in s\": 0.070354 }, { \"step\": 3806, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.0004835128784179, \"Time in s\": 0.210001 }, { \"step\": 5709, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.0004835128784179, \"Time in s\": 0.417744 }, { \"step\": 7612, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.0004835128784179, \"Time in s\": 0.694871 }, { \"step\": 9515, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.0004835128784179, \"Time in s\": 1.039777 }, { \"step\": 11418, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.0004835128784179, \"Time in s\": 1.454499 }, { \"step\": 13321, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"SMTP\", \"Accuracy\": 1.0, \"F1\": 0.0, \"Memory in Mb\": 0.0004835128784179, \"Time in s\": 1.938249 }, { \"step\": 15224, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9985548183669448, \"F1\": 0.0, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 2.491975 }, { \"step\": 17127, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9984818404764684, \"F1\": 0.0, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 3.11529 }, { \"step\": 19030, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9986336644069578, \"F1\": 0.0, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 3.807739 }, { \"step\": 20933, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9987578826676858, \"F1\": 0.0, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 4.570752 }, { \"step\": 22836, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9988613969783228, \"F1\": 0.0, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 5.403311 }, { \"step\": 24739, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9989489853666425, \"F1\": 0.0, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 6.304936 }, { \"step\": 26642, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9989489884013364, \"F1\": 0.0, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 7.275472 }, { \"step\": 28545, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9990190582959642, \"F1\": 0.0, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 8.315878999999999 }, { \"step\": 30448, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999080369166092, \"F1\": 0.0, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 9.425081 }, { \"step\": 32351, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9991344667697064, \"F1\": 0.0, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 10.602661 }, { \"step\": 34254, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999182553352991, \"F1\": 0.0, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 11.849262 }, { \"step\": 36157, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9992255780506694, \"F1\": 0.0, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 13.164257 }, { \"step\": 38060, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9992643001655324, \"F1\": 0.0, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 14.54421 }, { \"step\": 39963, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9992993343676492, \"F1\": 0.0, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 15.991924 }, { \"step\": 41866, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9993311835662247, \"F1\": 0.0, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 17.508838 }, { \"step\": 43769, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9993602632059952, \"F1\": 0.0, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 19.096664 }, { \"step\": 45672, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9993869194893916, \"F1\": 0.0, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 20.752142 }, { \"step\": 47575, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9994114432252912, \"F1\": 0.0, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 22.476952 }, { \"step\": 49478, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"SMTP\", \"Accuracy\": 0.99943408048184, \"F1\": 0.0, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 24.271157 }, { \"step\": 51381, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"SMTP\", \"Accuracy\": 0.99941611521993, \"F1\": 0.0625, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 26.132842000000004 }, { \"step\": 53284, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9994369686391532, \"F1\": 0.0625, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 28.063918000000005 }, { \"step\": 55187, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9994563838654732, \"F1\": 0.0625, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 30.065042000000005 }, { \"step\": 57090, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9994394717020793, \"F1\": 0.36, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 32.13415200000001 }, { \"step\": 58993, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9994575535665852, \"F1\": 0.36, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 34.27360200000001 }, { \"step\": 60896, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9994745052960012, \"F1\": 0.36, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 36.48320000000001 }, { \"step\": 62799, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9994585814834868, \"F1\": 0.3703703703703703, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 38.76173800000001 }, { \"step\": 64702, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9994745058036196, \"F1\": 0.3703703703703703, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 41.10958900000001 }, { \"step\": 66605, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"SMTP\", \"Accuracy\": 0.99948952014894, \"F1\": 0.3703703703703703, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 43.52762700000001 }, { \"step\": 68508, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9994745062548352, \"F1\": 0.3793103448275862, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 46.01474500000001 }, { \"step\": 70411, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9994887089902004, \"F1\": 0.3793103448275862, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 48.570884000000014 }, { \"step\": 72314, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9995021642028404, \"F1\": 0.3793103448275862, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 51.19747000000002 }, { \"step\": 74217, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9995149293952786, \"F1\": 0.3793103448275862, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 53.894680000000015 }, { \"step\": 76120, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"SMTP\", \"Accuracy\": 0.99952705631971, \"F1\": 0.3793103448275862, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 56.660361000000016 }, { \"step\": 78023, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"SMTP\", \"Accuracy\": 0.99953859167927, \"F1\": 0.3793103448275862, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 59.49662100000002 }, { \"step\": 79926, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999549577729121, \"F1\": 0.3793103448275862, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 62.40230700000002 }, { \"step\": 81829, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9995600527936648, \"F1\": 0.3793103448275862, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 65.37601400000003 }, { \"step\": 83732, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9995700517132244, \"F1\": 0.3793103448275862, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 68.41937500000003 }, { \"step\": 85635, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9995796062311698, \"F1\": 0.3793103448275862, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 71.53305000000003 }, { \"step\": 87538, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999588745330546, \"F1\": 0.3793103448275862, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 74.71604400000002 }, { \"step\": 89441, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9995751341681576, \"F1\": 0.3666666666666666, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 77.96763000000003 }, { \"step\": 91344, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"SMTP\", \"Accuracy\": 0.9995839856365568, \"F1\": 0.3666666666666666, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 81.28881300000003 }, { \"step\": 93247, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999592475816657, \"F1\": 0.3666666666666666, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 84.68057600000003 }, { \"step\": 95150, \"track\": \"Binary classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"SMTP\", \"Accuracy\": 0.999600626385984, \"F1\": 0.3666666666666666, \"Memory in Mb\": 0.0005102157592773, \"Time in s\": 88.14010200000003 } ] }, \"params\": [ { \"name\": \"models\", \"select\": { \"type\": \"point\", \"fields\": [ \"model\" ] }, \"bind\": \"legend\" }, { \"name\": \"Dataset\", \"value\": \"Bananas\", \"bind\": { \"input\": \"select\", \"options\": [ \"Bananas\", \"Elec2\", \"Phishing\", \"SMTP\" ] } }, { \"name\": \"grid\", \"select\": \"interval\", \"bind\": \"scales\" } ], \"transform\": [ { \"filter\": { \"field\": \"dataset\", \"equal\": { \"expr\": \"Dataset\" } } } ], \"repeat\": { \"row\": [ \"Accuracy\", \"F1\", \"Memory in Mb\", \"Time in s\" ] }, \"spec\": { \"width\": \"container\", \"mark\": \"line\", \"encoding\": { \"x\": { \"field\": \"step\", \"type\": \"quantitative\", \"axis\": { \"titleFontSize\": 18, \"labelFontSize\": 18, \"title\": \"Instance\" } }, \"y\": { \"field\": { \"repeat\": \"row\" }, \"type\": \"quantitative\", \"axis\": { \"titleFontSize\": 18, \"labelFontSize\": 18 } }, \"color\": { \"field\": \"model\", \"type\": \"ordinal\", \"scale\": { \"scheme\": \"category20b\" }, \"title\": \"Models\", \"legend\": { \"titleFontSize\": 18, \"labelFontSize\": 18, \"labelLimit\": 500 } }, \"opacity\": { \"condition\": { \"param\": \"models\", \"value\": 1 }, \"value\": 0.2 } } } }

    "},{"location":"benchmarks/Binary%20classification/#datasets","title":"Datasets","text":"Bananas

    Bananas dataset.

    An artificial dataset where instances belongs to several clusters with a banana shape. There are two attributes that correspond to the x and y axis, respectively.

    Name  Bananas                                               \nTask  Binary classification\n

    Samples 5,300 Features 2 Sparse False Path /home/kulbach/projects/river/river/datasets/banana.zip

    Elec2

    Electricity prices in New South Wales.

    This is a binary classification task, where the goal is to predict if the price of electricity will go up or down.

    This data was collected from the Australian New South Wales Electricity Market. In this market, prices are not fixed and are affected by demand and supply of the market. They are set every five minutes. Electricity transfers to/from the neighboring state of Victoria were done to alleviate fluctuations.

      Name  Elec2                                                      \n  Task  Binary classification\n

    Samples 45,312 Features 8 Sparse False Path /home/kulbach/river_data/Elec2/electricity.csv URL https://maxhalford.github.io/files/datasets/electricity.zip Size 2.95 MB Downloaded True

    Phishing

    Phishing websites.

    This dataset contains features from web pages that are classified as phishing or not.

    Name  Phishing                                                   \nTask  Binary classification\n

    Samples 1,250 Features 9 Sparse False Path /home/kulbach/projects/river/river/datasets/phishing.csv.gz

    SMTP

    SMTP dataset from the KDD 1999 cup.

    The goal is to predict whether or not an SMTP connection is anomalous or not. The dataset only contains 2,211 (0.4%) positive labels.

      Name  SMTP                                                \n  Task  Binary classification\n

    Samples 95,156 Features 3 Sparse False Path /home/kulbach/river_data/SMTP/smtp.csv URL https://maxhalford.github.io/files/datasets/smtp.zip Size 5.23 MB Downloaded True

    "},{"location":"benchmarks/Binary%20classification/#models","title":"Models","text":"Logistic regression

    Pipeline (\n  StandardScaler (\n    with_std=True\n  ),\n  LogisticRegression (\n    optimizer=SGD (\n      lr=Constant (\n        learning_rate=0.005\n      )\n    )\n    loss=Log (\n      weight_pos=1.\n      weight_neg=1.\n    )\n    l2=0.\n    l1=0.\n    intercept_init=0.\n    intercept_lr=Constant (\n      learning_rate=0.01\n    )\n    clip_gradient=1e+12\n    initializer=Zeros ()\n  )\n)

    ALMA

    Pipeline (\n  StandardScaler (\n    with_std=True\n  ),\n  ALMAClassifier (\n    p=2\n    alpha=0.9\n    B=1.111111\n    C=1.414214\n  )\n)

    sklearn SGDClassifier

    Pipeline (\n  StandardScaler (\n    with_std=True\n  ),\n  SKL2RiverClassifier (\n    estimator=SGDClassifier(eta0=0.005, learning_rate='constant', loss='log', penalty='none')\n    classes=[False, True]\n  )\n)

    Vowpal Wabbit logistic regression

    VW2RiverClassifier ()

    Naive Bayes

    GaussianNB ()

    Hoeffding Tree

    HoeffdingTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n)

    Hoeffding Adaptive Tree

    HoeffdingAdaptiveTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  bootstrap_sampling=True\n  drift_window_threshold=300\n  drift_detector=ADWIN (\n    delta=0.002\n    clock=32\n    max_buckets=5\n    min_window_length=5\n    grace_period=10\n  )\n  switch_significance=0.05\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n  seed=42\n)

    Adaptive Random Forest

    []

    Streaming Random Patches

    SRPClassifier (\n  model=HoeffdingTreeClassifier (\n    grace_period=50\n    max_depth=inf\n    split_criterion=\"info_gain\"\n    delta=0.01\n    tau=0.05\n    leaf_prediction=\"nba\"\n    nb_threshold=0\n    nominal_attributes=None\n    splitter=GaussianSplitter (\n      n_splits=10\n    )\n    binary_split=False\n    max_size=100.\n    memory_estimate_period=1000000\n    stop_mem_management=False\n    remove_poor_attrs=False\n    merit_preprune=True\n  )\n  n_models=10\n  subspace_size=0.6\n  training_method=\"patches\"\n  lam=6\n  drift_detector=ADWIN (\n    delta=1e-05\n    clock=32\n    max_buckets=5\n    min_window_length=5\n    grace_period=10\n  )\n  warning_detector=ADWIN (\n    delta=0.0001\n    clock=32\n    max_buckets=5\n    min_window_length=5\n    grace_period=10\n  )\n  disable_detector=\"off\"\n  disable_weighted_vote=False\n  seed=None\n  metric=Accuracy (\n    cm=ConfusionMatrix (\n      classes=[]\n    )\n  )\n)

    k-Nearest Neighbors

    Pipeline (\n  StandardScaler (\n    with_std=True\n  ),\n  KNNClassifier (\n    n_neighbors=5\n    window_size=100\n    min_distance_keep=0.\n    weighted=True\n    cleanup_every=0\n    distance_func=functools.partial(, p=2)\n    softmax=False\n  )\n)\n

    \nADWIN Bagging\n

    [HoeffdingTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n), HoeffdingTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n), HoeffdingTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n), HoeffdingTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n), HoeffdingTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n), HoeffdingTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n), HoeffdingTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n), HoeffdingTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n), HoeffdingTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n), HoeffdingTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n)]

    \n

    \nAdaBoost\n

    [HoeffdingTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n), HoeffdingTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n), HoeffdingTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n), HoeffdingTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n), HoeffdingTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n), HoeffdingTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n), HoeffdingTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n), HoeffdingTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n), HoeffdingTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n), HoeffdingTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n)]

    \n

    \nBagging\n

    [HoeffdingAdaptiveTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  bootstrap_sampling=False\n  drift_window_threshold=300\n  drift_detector=ADWIN (\n    delta=0.002\n    clock=32\n    max_buckets=5\n    min_window_length=5\n    grace_period=10\n  )\n  switch_significance=0.05\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n  seed=None\n), HoeffdingAdaptiveTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  bootstrap_sampling=False\n  drift_window_threshold=300\n  drift_detector=ADWIN (\n    delta=0.002\n    clock=32\n    max_buckets=5\n    min_window_length=5\n    grace_period=10\n  )\n  switch_significance=0.05\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n  seed=None\n), HoeffdingAdaptiveTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  bootstrap_sampling=False\n  drift_window_threshold=300\n  drift_detector=ADWIN (\n    delta=0.002\n    clock=32\n    max_buckets=5\n    min_window_length=5\n    grace_period=10\n  )\n  switch_significance=0.05\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n  seed=None\n), HoeffdingAdaptiveTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  bootstrap_sampling=False\n  drift_window_threshold=300\n  drift_detector=ADWIN (\n    delta=0.002\n    clock=32\n    max_buckets=5\n    min_window_length=5\n    grace_period=10\n  )\n  switch_significance=0.05\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n  seed=None\n), HoeffdingAdaptiveTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  bootstrap_sampling=False\n  drift_window_threshold=300\n  drift_detector=ADWIN (\n    delta=0.002\n    clock=32\n    max_buckets=5\n    min_window_length=5\n    grace_period=10\n  )\n  switch_significance=0.05\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n  seed=None\n), HoeffdingAdaptiveTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  bootstrap_sampling=False\n  drift_window_threshold=300\n  drift_detector=ADWIN (\n    delta=0.002\n    clock=32\n    max_buckets=5\n    min_window_length=5\n    grace_period=10\n  )\n  switch_significance=0.05\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n  seed=None\n), HoeffdingAdaptiveTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  bootstrap_sampling=False\n  drift_window_threshold=300\n  drift_detector=ADWIN (\n    delta=0.002\n    clock=32\n    max_buckets=5\n    min_window_length=5\n    grace_period=10\n  )\n  switch_significance=0.05\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n  seed=None\n), HoeffdingAdaptiveTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  bootstrap_sampling=False\n  drift_window_threshold=300\n  drift_detector=ADWIN (\n    delta=0.002\n    clock=32\n    max_buckets=5\n    min_window_length=5\n    grace_period=10\n  )\n  switch_significance=0.05\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n  seed=None\n), HoeffdingAdaptiveTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  bootstrap_sampling=False\n  drift_window_threshold=300\n  drift_detector=ADWIN (\n    delta=0.002\n    clock=32\n    max_buckets=5\n    min_window_length=5\n    grace_period=10\n  )\n  switch_significance=0.05\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n  seed=None\n), HoeffdingAdaptiveTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  bootstrap_sampling=False\n  drift_window_threshold=300\n  drift_detector=ADWIN (\n    delta=0.002\n    clock=32\n    max_buckets=5\n    min_window_length=5\n    grace_period=10\n  )\n  switch_significance=0.05\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n  seed=None\n)]

    \n

    \nLeveraging Bagging\n

    [HoeffdingTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n), HoeffdingTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n), HoeffdingTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n), HoeffdingTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n), HoeffdingTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n), HoeffdingTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n), HoeffdingTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n), HoeffdingTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n), HoeffdingTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n), HoeffdingTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n)]

    \n

    \nStacking\n

    [Pipeline (\n  StandardScaler (\n    with_std=True\n  ),\n  SoftmaxRegression (\n    optimizer=SGD (\n      lr=Constant (\n        learning_rate=0.01\n      )\n    )\n    loss=CrossEntropy (\n      class_weight={}\n    )\n    l2=0\n  )\n), GaussianNB (), HoeffdingTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n), Pipeline (\n  StandardScaler (\n    with_std=True\n  ),\n  KNNClassifier (\n    n_neighbors=5\n    window_size=100\n    min_distance_keep=0.\n    weighted=True\n    cleanup_every=0\n    distance_func=functools.partial(, p=2)\n    softmax=False\n  )\n)]\n

    \nVoting\n

    VotingClassifier (\n  models=[Pipeline (\n  StandardScaler (\n    with_std=True\n  ),\n  SoftmaxRegression (\n    optimizer=SGD (\n      lr=Constant (\n        learning_rate=0.01\n      )\n    )\n    loss=CrossEntropy (\n      class_weight={}\n    )\n    l2=0\n  )\n), GaussianNB (), HoeffdingTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n), Pipeline (\n  StandardScaler (\n    with_std=True\n  ),\n  KNNClassifier (\n    n_neighbors=5\n    window_size=100\n    min_distance_keep=0.\n    weighted=True\n    cleanup_every=0\n    distance_func=functools.partial(, p=2)\n    softmax=False\n  )\n)]\n  use_probabilities=True\n)\n

    \n[baseline] Last Class\n

    NoChangeClassifier ()

    \n

    "},{"location":"benchmarks/Binary%20classification/#environment","title":"Environment","text":"
    Python implementation: CPython\nPython version       : 3.11.5\nIPython version      : 8.15.0\n\nriver       : 0.19.0\nnumpy       : 1.26.0\nscikit-learn: 1.3.0\npandas      : 2.1.0\nscipy       : 1.11.2\n\nCompiler    : GCC 11.4.0\nOS          : Linux\nRelease     : 6.2.0-1011-azure\nMachine     : x86_64\nProcessor   : x86_64\nCPU cores   : 2\nArchitecture: 64bit\n
    "},{"location":"benchmarks/Multiclass%20classification/","title":"Multiclass classification","text":"TableChart Model Dataset Accuracy MicroF1 MacroF1 Memory in Mb Time in s ADWIN Bagging ImageSegments 0.777729 0.777729 0.764912 4.14768 482.736 ADWIN Bagging Insects 0.579424 0.579424 0.570136 15.4446 12525.9 ADWIN Bagging Keystroke 0.805824 0.805824 0.80625 32.1812 8923.61 AdaBoost ImageSegments 0.805133 0.805133 0.798078 4.12853 438.219 AdaBoost Insects 0.554082 0.554082 0.543927 28.2902 12481.3 AdaBoost Keystroke 0.842492 0.842492 0.843635 177.385 12366.9 Adaptive Random Forest ImageSegments 0.819052 0.819052 0.814425 4.66081 227.541 Adaptive Random Forest Insects 0.744257 0.744257 0.741932 0.369647 4404.71 Adaptive Random Forest Keystroke 0.969851 0.969851 0.969867 2.33717 937.846 Bagging ImageSegments 0.77686 0.77686 0.764461 4.18729 482.036 Bagging Insects 0.606053 0.606053 0.598222 3.75006 14067.2 Bagging Keystroke 0.667974 0.667974 0.668853 50.4872 13509.1 Hoeffding Adaptive Tree ImageSegments 0.774685 0.774685 0.763496 0.425819 53.9974 Hoeffding Adaptive Tree Insects 0.611962 0.611962 0.602993 0.147679 1507.07 Hoeffding Adaptive Tree Keystroke 0.723712 0.723712 0.722393 0.727901 1274.73 Hoeffding Tree ImageSegments 0.77599 0.77599 0.763027 0.419177 39.4879 Hoeffding Tree Insects 0.537018 0.537018 0.527071 2.5392 921.351 Hoeffding Tree Keystroke 0.648218 0.648218 0.647249 5.09806 914.037 Leveraging Bagging ImageSegments 0.778164 0.778164 0.765914 4.13275 1135.16 Leveraging Bagging Insects 0.691547 0.691547 0.686411 18.1413 32334.1 Leveraging Bagging Keystroke 0.95039 0.95039 0.950468 10.4201 7265.02 Naive Bayes ImageSegments 0.731622 0.731622 0.730042 0.390004 38.4724 Naive Bayes Insects 0.506847 0.506847 0.493003 0.611693 557.606 Naive Bayes Keystroke 0.652532 0.652532 0.651577 4.86901 473.747 Stacking ImageSegments 0.849065 0.849065 0.847922 5.29567 399.289 Stacking Insects 0.752154 0.752154 0.750251 11.339 9741.14 Stacking Keystroke 0.976518 0.976518 0.976517 12.2203 4556.33 Streaming Random Patches ImageSegments 0.754676 0.754676 0.752727 10.4257 832.07 Streaming Random Patches Insects 0.739578 0.739578 0.737512 8.34194 26942.3 Streaming Random Patches Keystroke 0.953233 0.953233 0.953239 74.5521 5886.48 Voting ImageSegments 0.803393 0.803393 0.794975 0.951658 146.236 Voting Insects 0.647929 0.647929 0.635943 3.38862 3141.99 Voting Keystroke 0.793274 0.793274 0.798424 10.3088 2173.75 [baseline] Last Class ImageSegments 0.14789 0.14789 0.147887 0.00136757 2.67732 [baseline] Last Class Insects 0.289115 0.289115 0.289295 0.00138664 59.1503 [baseline] Last Class Keystroke 0.997549 0.997549 0.997549 0.00504208 24.227 k-Nearest Neighbors ImageSegments 0.819922 0.819922 0.815895 0.12676 38.8794 k-Nearest Neighbors Insects 0.686547 0.686547 0.683661 0.216656 1254.78 k-Nearest Neighbors Keystroke 0.984509 0.984509 0.984508 0.214242 515.415

    Try reloading the page if something is buggy

    { \"$schema\": \"https://vega.github.io/schema/vega-lite/v5.json\", \"data\": { \"values\": [ { \"step\": 46, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.4666666666666667, \"MicroF1\": 0.4666666666666667, \"MacroF1\": 0.4009102009102009, \"Memory in Mb\": 0.3899507522583008, \"Time in s\": 0.163216 }, { \"step\": 92, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.5604395604395604, \"MicroF1\": 0.5604395604395604, \"MacroF1\": 0.5279334700387331, \"Memory in Mb\": 0.3899507522583008, \"Time in s\": 0.349738 }, { \"step\": 138, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.5474452554744526, \"MicroF1\": 0.5474452554744526, \"MacroF1\": 0.5191892873237388, \"Memory in Mb\": 0.3899774551391601, \"Time in s\": 0.5584899999999999 }, { \"step\": 184, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.5573770491803278, \"MicroF1\": 0.5573770491803278, \"MacroF1\": 0.5225713529323662, \"Memory in Mb\": 0.3899507522583008, \"Time in s\": 0.789485 }, { \"step\": 230, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.5545851528384279, \"MicroF1\": 0.5545851528384279, \"MacroF1\": 0.5217226223148511, \"Memory in Mb\": 0.3899774551391601, \"Time in s\": 1.042858 }, { \"step\": 276, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.56, \"MicroF1\": 0.56, \"MacroF1\": 0.5450388711329708, \"Memory in Mb\": 0.3899774551391601, \"Time in s\": 1.324703 }, { \"step\": 322, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.5825545171339563, \"MicroF1\": 0.5825545171339563, \"MacroF1\": 0.5566705826058684, \"Memory in Mb\": 0.3900041580200195, \"Time in s\": 1.637036 }, { \"step\": 368, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.5940054495912807, \"MicroF1\": 0.5940054495912807, \"MacroF1\": 0.5613773296963412, \"Memory in Mb\": 0.3900041580200195, \"Time in s\": 1.979491 }, { \"step\": 414, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.5980629539951574, \"MicroF1\": 0.5980629539951574, \"MacroF1\": 0.5624927052752284, \"Memory in Mb\": 0.3900041580200195, \"Time in s\": 2.352111 }, { \"step\": 460, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.599128540305011, \"MicroF1\": 0.599128540305011, \"MacroF1\": 0.5669821167583783, \"Memory in Mb\": 0.3899774551391601, \"Time in s\": 2.754918 }, { \"step\": 506, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.6099009900990099, \"MicroF1\": 0.6099009900990099, \"MacroF1\": 0.5922286190986811, \"Memory in Mb\": 0.3900041580200195, \"Time in s\": 3.188186 }, { \"step\": 552, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.6116152450090744, \"MicroF1\": 0.6116152450090744, \"MacroF1\": 0.5983340184133136, \"Memory in Mb\": 0.3899507522583008, \"Time in s\": 3.651555 }, { \"step\": 598, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.6180904522613065, \"MicroF1\": 0.6180904522613065, \"MacroF1\": 0.611527101723203, \"Memory in Mb\": 0.3899774551391601, \"Time in s\": 4.145135 }, { \"step\": 644, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.6158631415241057, \"MicroF1\": 0.6158631415241057, \"MacroF1\": 0.6113311881078581, \"Memory in Mb\": 0.3899774551391601, \"Time in s\": 4.668896 }, { \"step\": 690, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.6182873730043541, \"MicroF1\": 0.6182873730043541, \"MacroF1\": 0.615018998714676, \"Memory in Mb\": 0.3899774551391601, \"Time in s\": 5.223075 }, { \"step\": 736, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.617687074829932, \"MicroF1\": 0.617687074829932, \"MacroF1\": 0.6157912419016742, \"Memory in Mb\": 0.3899774551391601, \"Time in s\": 5.807397 }, { \"step\": 782, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.6274007682458387, \"MicroF1\": 0.6274007682458387, \"MacroF1\": 0.6216325704223051, \"Memory in Mb\": 0.3899774551391601, \"Time in s\": 6.422078 }, { \"step\": 828, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.6324062877871826, \"MicroF1\": 0.6324062877871826, \"MacroF1\": 0.6280704917469789, \"Memory in Mb\": 0.3899774551391601, \"Time in s\": 7.066915 }, { \"step\": 874, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.6426116838487973, \"MicroF1\": 0.6426116838487973, \"MacroF1\": 0.6349558095046656, \"Memory in Mb\": 0.3899774551391601, \"Time in s\": 7.742184 }, { \"step\": 920, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.6485310119695321, \"MicroF1\": 0.6485310119695321, \"MacroF1\": 0.6384515982514894, \"Memory in Mb\": 0.3899774551391601, \"Time in s\": 8.447577 }, { \"step\": 966, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.6507772020725389, \"MicroF1\": 0.6507772020725389, \"MacroF1\": 0.6399118827528387, \"Memory in Mb\": 0.3899774551391601, \"Time in s\": 9.183146 }, { \"step\": 1012, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.6508407517309595, \"MicroF1\": 0.6508407517309595, \"MacroF1\": 0.6387857120889422, \"Memory in Mb\": 0.3899774551391601, \"Time in s\": 9.95137 }, { \"step\": 1058, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.6537369914853358, \"MicroF1\": 0.6537369914853358, \"MacroF1\": 0.6398811322847952, \"Memory in Mb\": 0.3899774551391601, \"Time in s\": 10.747402 }, { \"step\": 1104, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.658204895738894, \"MicroF1\": 0.658204895738894, \"MacroF1\": 0.6463297068165035, \"Memory in Mb\": 0.3899774551391601, \"Time in s\": 11.559914 }, { \"step\": 1150, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.6640557006092254, \"MicroF1\": 0.6640557006092254, \"MacroF1\": 0.6508930463144657, \"Memory in Mb\": 0.3900041580200195, \"Time in s\": 12.388643000000002 }, { \"step\": 1196, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.6702928870292887, \"MicroF1\": 0.6702928870292887, \"MacroF1\": 0.6599370641329335, \"Memory in Mb\": 0.3900041580200195, \"Time in s\": 13.233598000000002 }, { \"step\": 1242, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.6736502820306205, \"MicroF1\": 0.6736502820306205, \"MacroF1\": 0.669511465798708, \"Memory in Mb\": 0.3900041580200195, \"Time in s\": 14.094776000000005 }, { \"step\": 1288, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.6822066822066822, \"MicroF1\": 0.6822066822066822, \"MacroF1\": 0.6790074545382362, \"Memory in Mb\": 0.3900041580200195, \"Time in s\": 14.972203000000004 }, { \"step\": 1334, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.6841710427606902, \"MicroF1\": 0.6841710427606902, \"MacroF1\": 0.6834974476087325, \"Memory in Mb\": 0.3900041580200195, \"Time in s\": 15.866030000000004 }, { \"step\": 1380, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.6874546773023931, \"MicroF1\": 0.6874546773023931, \"MacroF1\": 0.6876766922721351, \"Memory in Mb\": 0.3900041580200195, \"Time in s\": 16.775981000000005 }, { \"step\": 1426, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.6919298245614035, \"MicroF1\": 0.6919298245614035, \"MacroF1\": 0.6930786661709784, \"Memory in Mb\": 0.3900041580200195, \"Time in s\": 17.702176000000005 }, { \"step\": 1472, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.698844323589395, \"MicroF1\": 0.698844323589395, \"MacroF1\": 0.6985606658027722, \"Memory in Mb\": 0.3899774551391601, \"Time in s\": 18.644575000000003 }, { \"step\": 1518, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7027027027027027, \"MicroF1\": 0.7027027027027027, \"MacroF1\": 0.7017787722939461, \"Memory in Mb\": 0.3900041580200195, \"Time in s\": 19.603248000000004 }, { \"step\": 1564, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7056941778630839, \"MicroF1\": 0.7056941778630839, \"MacroF1\": 0.7062915374924865, \"Memory in Mb\": 0.3899774551391601, \"Time in s\": 20.578282000000005 }, { \"step\": 1610, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7078931013051585, \"MicroF1\": 0.7078931013051585, \"MacroF1\": 0.7081385387673029, \"Memory in Mb\": 0.3899774551391601, \"Time in s\": 21.573844000000005 }, { \"step\": 1656, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7093655589123867, \"MicroF1\": 0.7093655589123867, \"MacroF1\": 0.7109488618373111, \"Memory in Mb\": 0.3899507522583008, \"Time in s\": 22.586424000000004 }, { \"step\": 1702, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7101704879482658, \"MicroF1\": 0.7101704879482658, \"MacroF1\": 0.7132092257742534, \"Memory in Mb\": 0.3899774551391601, \"Time in s\": 23.615335000000005 }, { \"step\": 1748, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7143674871207785, \"MicroF1\": 0.7143674871207784, \"MacroF1\": 0.7178399485500211, \"Memory in Mb\": 0.3899507522583008, \"Time in s\": 24.660526000000004 }, { \"step\": 1794, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7172336865588399, \"MicroF1\": 0.7172336865588399, \"MacroF1\": 0.7191260584555578, \"Memory in Mb\": 0.3899774551391601, \"Time in s\": 25.721983000000005 }, { \"step\": 1840, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7199564980967917, \"MicroF1\": 0.7199564980967917, \"MacroF1\": 0.7217017555070445, \"Memory in Mb\": 0.3900041580200195, \"Time in s\": 26.79968000000001 }, { \"step\": 1886, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7204244031830239, \"MicroF1\": 0.7204244031830238, \"MacroF1\": 0.7234495525792994, \"Memory in Mb\": 0.3900041580200195, \"Time in s\": 27.893629000000004 }, { \"step\": 1932, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7219057483169342, \"MicroF1\": 0.7219057483169342, \"MacroF1\": 0.723848351214801, \"Memory in Mb\": 0.3900041580200195, \"Time in s\": 29.003837000000004 }, { \"step\": 1978, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.723823975720789, \"MicroF1\": 0.723823975720789, \"MacroF1\": 0.725139923863974, \"Memory in Mb\": 0.3900041580200195, \"Time in s\": 30.130512000000003 }, { \"step\": 2024, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.726643598615917, \"MicroF1\": 0.726643598615917, \"MacroF1\": 0.7268553573885639, \"Memory in Mb\": 0.3900041580200195, \"Time in s\": 31.273399000000005 }, { \"step\": 2070, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7269212179797003, \"MicroF1\": 0.7269212179797003, \"MacroF1\": 0.7276782991451582, \"Memory in Mb\": 0.3900041580200195, \"Time in s\": 32.432577 }, { \"step\": 2116, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7286052009456265, \"MicroF1\": 0.7286052009456266, \"MacroF1\": 0.7283656039279266, \"Memory in Mb\": 0.3900041580200195, \"Time in s\": 33.608017000000004 }, { \"step\": 2162, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7306802406293382, \"MicroF1\": 0.7306802406293383, \"MacroF1\": 0.7303992643507475, \"Memory in Mb\": 0.3900041580200195, \"Time in s\": 34.7997 }, { \"step\": 2208, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.733574988672406, \"MicroF1\": 0.733574988672406, \"MacroF1\": 0.7322842940126589, \"Memory in Mb\": 0.3900041580200195, \"Time in s\": 36.007612 }, { \"step\": 2254, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7314691522414558, \"MicroF1\": 0.7314691522414558, \"MacroF1\": 0.7300322879925133, \"Memory in Mb\": 0.3900041580200195, \"Time in s\": 37.231763 }, { \"step\": 2300, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7316224445411048, \"MicroF1\": 0.7316224445411048, \"MacroF1\": 0.7300416811383057, \"Memory in Mb\": 0.3900041580200195, \"Time in s\": 38.472431 }, { \"step\": 1056, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Insects\", \"Accuracy\": 0.623696682464455, \"MicroF1\": 0.623696682464455, \"MacroF1\": 0.5870724729616661, \"Memory in Mb\": 0.6116933822631836, \"Time in s\": 0.909568 }, { \"step\": 2112, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Insects\", \"Accuracy\": 0.6148744670772146, \"MicroF1\": 0.6148744670772146, \"MacroF1\": 0.5800776869595597, \"Memory in Mb\": 0.6116933822631836, \"Time in s\": 2.67356 }, { \"step\": 3168, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Insects\", \"Accuracy\": 0.6065677297126618, \"MicroF1\": 0.6065677297126618, \"MacroF1\": 0.5714781230184183, \"Memory in Mb\": 0.6116933822631836, \"Time in s\": 5.143102000000001 }, { \"step\": 4224, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Insects\", \"Accuracy\": 0.6043097324177126, \"MicroF1\": 0.6043097324177126, \"MacroF1\": 0.5697541737710122, \"Memory in Mb\": 0.6116933822631836, \"Time in s\": 7.993857 }, { \"step\": 5280, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Insects\", \"Accuracy\": 0.6088274294373934, \"MicroF1\": 0.6088274294373934, \"MacroF1\": 0.5727560614138387, \"Memory in Mb\": 0.6116933822631836, \"Time in s\": 11.225513 }, { \"step\": 6336, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Insects\", \"Accuracy\": 0.6023677979479084, \"MicroF1\": 0.6023677979479084, \"MacroF1\": 0.5679597008529512, \"Memory in Mb\": 0.6116933822631836, \"Time in s\": 14.839337 }, { \"step\": 7392, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Insects\", \"Accuracy\": 0.5995129211202814, \"MicroF1\": 0.5995129211202814, \"MacroF1\": 0.5652603100832261, \"Memory in Mb\": 0.6116933822631836, \"Time in s\": 18.839998 }, { \"step\": 8448, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Insects\", \"Accuracy\": 0.6019888717888008, \"MicroF1\": 0.6019888717888008, \"MacroF1\": 0.5673514925692325, \"Memory in Mb\": 0.6116933822631836, \"Time in s\": 23.223853 }, { \"step\": 9504, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Insects\", \"Accuracy\": 0.5993896664211301, \"MicroF1\": 0.5993896664211301, \"MacroF1\": 0.5644951651039589, \"Memory in Mb\": 0.6116933822631836, \"Time in s\": 27.990643 }, { \"step\": 10560, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Insects\", \"Accuracy\": 0.5994885879344635, \"MicroF1\": 0.5994885879344635, \"MacroF1\": 0.5645655385998631, \"Memory in Mb\": 0.6116933822631836, \"Time in s\": 33.140509 }, { \"step\": 11616, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Insects\", \"Accuracy\": 0.5972449418854929, \"MicroF1\": 0.5972449418854929, \"MacroF1\": 0.5631227877868952, \"Memory in Mb\": 0.6116933822631836, \"Time in s\": 38.672833 }, { \"step\": 12672, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Insects\", \"Accuracy\": 0.6001894088864336, \"MicroF1\": 0.6001894088864336, \"MacroF1\": 0.5684733590606373, \"Memory in Mb\": 0.6116933822631836, \"Time in s\": 44.58831000000001 }, { \"step\": 13728, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Insects\", \"Accuracy\": 0.6120783856632913, \"MicroF1\": 0.6120783856632913, \"MacroF1\": 0.5935173038317552, \"Memory in Mb\": 0.6116933822631836, \"Time in s\": 50.89270200000001 }, { \"step\": 14784, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Insects\", \"Accuracy\": 0.6024487587093282, \"MicroF1\": 0.6024487587093282, \"MacroF1\": 0.5841270876002981, \"Memory in Mb\": 0.6116933822631836, \"Time in s\": 57.581734 }, { \"step\": 15840, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Insects\", \"Accuracy\": 0.5676494728202538, \"MicroF1\": 0.5676494728202538, \"MacroF1\": 0.5507155080701159, \"Memory in Mb\": 0.6116933822631836, \"Time in s\": 64.65553700000001 }, { \"step\": 16896, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Insects\", \"Accuracy\": 0.5418762947617638, \"MicroF1\": 0.5418762947617638, \"MacroF1\": 0.5256197352354143, \"Memory in Mb\": 0.6116933822631836, \"Time in s\": 72.114698 }, { \"step\": 17952, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Insects\", \"Accuracy\": 0.5232020500250683, \"MicroF1\": 0.5232020500250683, \"MacroF1\": 0.5066898143269706, \"Memory in Mb\": 0.6116933822631836, \"Time in s\": 79.958388 }, { \"step\": 19008, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Insects\", \"Accuracy\": 0.5118640500868101, \"MicroF1\": 0.5118640500868101, \"MacroF1\": 0.4926543583964285, \"Memory in Mb\": 0.6116933822631836, \"Time in s\": 88.190503 }, { \"step\": 20064, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Insects\", \"Accuracy\": 0.5103922643672432, \"MicroF1\": 0.5103922643672432, \"MacroF1\": 0.4900586962359796, \"Memory in Mb\": 0.6116933822631836, \"Time in s\": 96.808684 }, { \"step\": 21120, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Insects\", \"Accuracy\": 0.5115772527108291, \"MicroF1\": 0.5115772527108291, \"MacroF1\": 0.4910837640903744, \"Memory in Mb\": 0.6116933822631836, \"Time in s\": 105.81178 }, { \"step\": 22176, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Insects\", \"Accuracy\": 0.5140022547914318, \"MicroF1\": 0.5140022547914318, \"MacroF1\": 0.4932541888231956, \"Memory in Mb\": 0.6116933822631836, \"Time in s\": 115.205863 }, { \"step\": 23232, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Insects\", \"Accuracy\": 0.5154319659076234, \"MicroF1\": 0.5154319659076234, \"MacroF1\": 0.4943013417599926, \"Memory in Mb\": 0.6116933822631836, \"Time in s\": 124.990845 }, { \"step\": 24288, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Insects\", \"Accuracy\": 0.5184254951208466, \"MicroF1\": 0.5184254951208466, \"MacroF1\": 0.4965832238311332, \"Memory in Mb\": 0.6116933822631836, \"Time in s\": 135.166218 }, { \"step\": 25344, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Insects\", \"Accuracy\": 0.5225111470623052, \"MicroF1\": 0.5225111470623052, \"MacroF1\": 0.499893079239698, \"Memory in Mb\": 0.6116933822631836, \"Time in s\": 145.739141 }, { \"step\": 26400, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Insects\", \"Accuracy\": 0.5257396113489148, \"MicroF1\": 0.5257396113489148, \"MacroF1\": 0.5022487669255871, \"Memory in Mb\": 0.6116933822631836, \"Time in s\": 156.702601 }, { \"step\": 27456, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Insects\", \"Accuracy\": 0.5301402294663996, \"MicroF1\": 0.5301402294663996, \"MacroF1\": 0.5051550433324518, \"Memory in Mb\": 0.6116933822631836, \"Time in s\": 168.057909 }, { \"step\": 28512, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Insects\", \"Accuracy\": 0.5277261407877661, \"MicroF1\": 0.5277261407877661, \"MacroF1\": 0.5036945145235057, \"Memory in Mb\": 0.6116933822631836, \"Time in s\": 179.80420999999998 }, { \"step\": 29568, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Insects\", \"Accuracy\": 0.5204450908107011, \"MicroF1\": 0.5204450908107011, \"MacroF1\": 0.4989008712312767, \"Memory in Mb\": 0.6116933822631836, \"Time in s\": 191.944501 }, { \"step\": 30624, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Insects\", \"Accuracy\": 0.5147111648107632, \"MicroF1\": 0.5147111648107632, \"MacroF1\": 0.495826840073632, \"Memory in Mb\": 0.6116933822631836, \"Time in s\": 204.478499 }, { \"step\": 31680, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Insects\", \"Accuracy\": 0.5105590454244137, \"MicroF1\": 0.5105590454244137, \"MacroF1\": 0.4941101813344875, \"Memory in Mb\": 0.6116933822631836, \"Time in s\": 217.402092 }, { \"step\": 32736, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Insects\", \"Accuracy\": 0.5075607148312204, \"MicroF1\": 0.5075607148312204, \"MacroF1\": 0.4931947798921405, \"Memory in Mb\": 0.6116933822631836, \"Time in s\": 230.716201 }, { \"step\": 33792, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Insects\", \"Accuracy\": 0.5044538486579266, \"MicroF1\": 0.5044538486579266, \"MacroF1\": 0.4905626123916189, \"Memory in Mb\": 0.6116933822631836, \"Time in s\": 244.420884 }, { \"step\": 34848, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Insects\", \"Accuracy\": 0.5020231296811777, \"MicroF1\": 0.5020231296811777, \"MacroF1\": 0.487879842488124, \"Memory in Mb\": 0.6116933822631836, \"Time in s\": 258.51509 }, { \"step\": 35904, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Insects\", \"Accuracy\": 0.4998746622844887, \"MicroF1\": 0.4998746622844887, \"MacroF1\": 0.4853435061152475, \"Memory in Mb\": 0.6116933822631836, \"Time in s\": 273.003699 }, { \"step\": 36960, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Insects\", \"Accuracy\": 0.4967937444194918, \"MicroF1\": 0.4967937444194918, \"MacroF1\": 0.4819418474093529, \"Memory in Mb\": 0.6116933822631836, \"Time in s\": 287.883522 }, { \"step\": 38016, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Insects\", \"Accuracy\": 0.4955938445350519, \"MicroF1\": 0.4955938445350519, \"MacroF1\": 0.4801892436835747, \"Memory in Mb\": 0.6116933822631836, \"Time in s\": 303.152298 }, { \"step\": 39072, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Insects\", \"Accuracy\": 0.4940237004427836, \"MicroF1\": 0.4940237004427836, \"MacroF1\": 0.478380783820526, \"Memory in Mb\": 0.6116933822631836, \"Time in s\": 318.807697 }, { \"step\": 40128, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Insects\", \"Accuracy\": 0.493508111745209, \"MicroF1\": 0.493508111745209, \"MacroF1\": 0.4785213801670671, \"Memory in Mb\": 0.6116933822631836, \"Time in s\": 334.85223 }, { \"step\": 41184, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Insects\", \"Accuracy\": 0.4936988563242114, \"MicroF1\": 0.4936988563242114, \"MacroF1\": 0.4794201499427274, \"Memory in Mb\": 0.6116933822631836, \"Time in s\": 351.286644 }, { \"step\": 42240, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Insects\", \"Accuracy\": 0.4938800634484718, \"MicroF1\": 0.4938800634484718, \"MacroF1\": 0.4802377497532936, \"Memory in Mb\": 0.6116933822631836, \"Time in s\": 368.105611 }, { \"step\": 43296, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Insects\", \"Accuracy\": 0.4943757939715902, \"MicroF1\": 0.4943757939715902, \"MacroF1\": 0.4812132921167227, \"Memory in Mb\": 0.6116933822631836, \"Time in s\": 385.310693 }, { \"step\": 44352, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Insects\", \"Accuracy\": 0.494036211133909, \"MicroF1\": 0.494036211133909, \"MacroF1\": 0.4812388919618418, \"Memory in Mb\": 0.6116933822631836, \"Time in s\": 402.906414 }, { \"step\": 45408, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Insects\", \"Accuracy\": 0.4944832294580131, \"MicroF1\": 0.4944832294580131, \"MacroF1\": 0.4818441874360224, \"Memory in Mb\": 0.6116933822631836, \"Time in s\": 420.888505 }, { \"step\": 46464, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Insects\", \"Accuracy\": 0.4945225232981082, \"MicroF1\": 0.4945225232981082, \"MacroF1\": 0.4820791268335544, \"Memory in Mb\": 0.6116933822631836, \"Time in s\": 439.259743 }, { \"step\": 47520, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Insects\", \"Accuracy\": 0.4956333256171216, \"MicroF1\": 0.4956333256171216, \"MacroF1\": 0.4833168636021498, \"Memory in Mb\": 0.6116933822631836, \"Time in s\": 458.017368 }, { \"step\": 48576, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Insects\", \"Accuracy\": 0.4970869788986104, \"MicroF1\": 0.4970869788986104, \"MacroF1\": 0.4846703771634363, \"Memory in Mb\": 0.6116933822631836, \"Time in s\": 477.16088800000006 }, { \"step\": 49632, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Insects\", \"Accuracy\": 0.4987608551107171, \"MicroF1\": 0.4987608551107171, \"MacroF1\": 0.4862426724473749, \"Memory in Mb\": 0.6116933822631836, \"Time in s\": 496.692936 }, { \"step\": 50688, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Insects\", \"Accuracy\": 0.5009568528419516, \"MicroF1\": 0.5009568528419516, \"MacroF1\": 0.4881725476999718, \"Memory in Mb\": 0.6116933822631836, \"Time in s\": 516.6094800000001 }, { \"step\": 51744, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Insects\", \"Accuracy\": 0.5034497419940862, \"MicroF1\": 0.5034497419940862, \"MacroF1\": 0.4903712806540024, \"Memory in Mb\": 0.6116933822631836, \"Time in s\": 536.9146260000001 }, { \"step\": 52800, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Insects\", \"Accuracy\": 0.5068467205818292, \"MicroF1\": 0.5068467205818292, \"MacroF1\": 0.4930025316136313, \"Memory in Mb\": 0.6116933822631836, \"Time in s\": 557.6057650000001 }, { \"step\": 408, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9852579852579852, \"MicroF1\": 0.9852579852579852, \"MacroF1\": 0.6962686567164179, \"Memory in Mb\": 0.1935644149780273, \"Time in s\": 0.122414 }, { \"step\": 816, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.947239263803681, \"MicroF1\": 0.947239263803681, \"MacroF1\": 0.7418606503288051, \"Memory in Mb\": 0.2889022827148437, \"Time in s\": 0.375804 }, { \"step\": 1224, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.884709730171709, \"MicroF1\": 0.884709730171709, \"MacroF1\": 0.8705899666065842, \"Memory in Mb\": 0.3842401504516601, \"Time in s\": 0.764348 }, { \"step\": 1632, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8933169834457388, \"MicroF1\": 0.8933169834457388, \"MacroF1\": 0.8791291775937072, \"Memory in Mb\": 0.4795780181884765, \"Time in s\": 1.303316 }, { \"step\": 2040, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8921039725355566, \"MicroF1\": 0.8921039725355566, \"MacroF1\": 0.8831785360852743, \"Memory in Mb\": 0.575160026550293, \"Time in s\": 2.01214 }, { \"step\": 2448, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.851655087862689, \"MicroF1\": 0.851655087862689, \"MacroF1\": 0.858198428951664, \"Memory in Mb\": 0.6704978942871094, \"Time in s\": 2.906585 }, { \"step\": 2856, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8598949211908932, \"MicroF1\": 0.8598949211908932, \"MacroF1\": 0.8469962214365345, \"Memory in Mb\": 0.7658357620239258, \"Time in s\": 3.994802 }, { \"step\": 3264, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8513637756665645, \"MicroF1\": 0.8513637756665645, \"MacroF1\": 0.8281280134770848, \"Memory in Mb\": 0.8611736297607422, \"Time in s\": 5.296884 }, { \"step\": 3672, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8422773086352493, \"MicroF1\": 0.8422773086352493, \"MacroF1\": 0.8409307955747314, \"Memory in Mb\": 0.9565114974975586, \"Time in s\": 6.831079000000001 }, { \"step\": 4080, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8367246874233881, \"MicroF1\": 0.8367246874233881, \"MacroF1\": 0.8249418657104467, \"Memory in Mb\": 1.0523834228515625, \"Time in s\": 8.617788000000001 }, { \"step\": 4488, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8203699576554491, \"MicroF1\": 0.8203699576554491, \"MacroF1\": 0.8300896799820437, \"Memory in Mb\": 1.147721290588379, \"Time in s\": 10.679552 }, { \"step\": 4896, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8192032686414709, \"MicroF1\": 0.8192032686414709, \"MacroF1\": 0.8269731591910484, \"Memory in Mb\": 1.243059158325195, \"Time in s\": 13.032163 }, { \"step\": 5304, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8172732415613804, \"MicroF1\": 0.8172732415613804, \"MacroF1\": 0.8027823390848743, \"Memory in Mb\": 1.3383970260620115, \"Time in s\": 15.695238 }, { \"step\": 5712, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7961828051129399, \"MicroF1\": 0.7961828051129399, \"MacroF1\": 0.8002006091139847, \"Memory in Mb\": 1.433734893798828, \"Time in s\": 18.689224 }, { \"step\": 6120, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.793920575257395, \"MicroF1\": 0.793920575257395, \"MacroF1\": 0.7746960355921346, \"Memory in Mb\": 1.5290727615356443, \"Time in s\": 22.034543 }, { \"step\": 6528, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7688064960931515, \"MicroF1\": 0.7688064960931515, \"MacroF1\": 0.7622487598340326, \"Memory in Mb\": 1.624410629272461, \"Time in s\": 25.755146 }, { \"step\": 6936, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7568853640951694, \"MicroF1\": 0.7568853640951694, \"MacroF1\": 0.757813781660983, \"Memory in Mb\": 1.7197484970092771, \"Time in s\": 29.876127 }, { \"step\": 7344, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7669889690862045, \"MicroF1\": 0.7669889690862046, \"MacroF1\": 0.7643943615019535, \"Memory in Mb\": 1.8150863647460935, \"Time in s\": 34.413227 }, { \"step\": 7752, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7676428847890595, \"MicroF1\": 0.7676428847890595, \"MacroF1\": 0.7655695901071293, \"Memory in Mb\": 1.9104242324829104, \"Time in s\": 39.374485 }, { \"step\": 8160, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7714180659394534, \"MicroF1\": 0.7714180659394533, \"MacroF1\": 0.7672011803374248, \"Memory in Mb\": 2.0057621002197266, \"Time in s\": 44.773425 }, { \"step\": 8568, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7702813120112058, \"MicroF1\": 0.7702813120112058, \"MacroF1\": 0.7699263138193526, \"Memory in Mb\": 2.1021223068237305, \"Time in s\": 50.625164000000005 }, { \"step\": 8976, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7680222841225627, \"MicroF1\": 0.7680222841225627, \"MacroF1\": 0.7682287234686137, \"Memory in Mb\": 2.197460174560547, \"Time in s\": 56.940867 }, { \"step\": 9384, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7659597143770649, \"MicroF1\": 0.7659597143770649, \"MacroF1\": 0.7643546547243015, \"Memory in Mb\": 2.2927980422973637, \"Time in s\": 63.725868000000006 }, { \"step\": 9792, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7586559084873864, \"MicroF1\": 0.7586559084873864, \"MacroF1\": 0.7552148692020618, \"Memory in Mb\": 2.38813591003418, \"Time in s\": 70.991963 }, { \"step\": 10200, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7505637807628199, \"MicroF1\": 0.7505637807628199, \"MacroF1\": 0.7430512224080149, \"Memory in Mb\": 2.483473777770996, \"Time in s\": 78.748505 }, { \"step\": 10608, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7290468558499105, \"MicroF1\": 0.7290468558499106, \"MacroF1\": 0.715756093271779, \"Memory in Mb\": 2.5788116455078125, \"Time in s\": 87.01168299999999 }, { \"step\": 11016, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7217430776214253, \"MicroF1\": 0.7217430776214253, \"MacroF1\": 0.7173640789896896, \"Memory in Mb\": 2.674149513244629, \"Time in s\": 95.787317 }, { \"step\": 11424, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7151361288628206, \"MicroF1\": 0.7151361288628206, \"MacroF1\": 0.7011862635194492, \"Memory in Mb\": 2.7694873809814453, \"Time in s\": 105.08400199999998 }, { \"step\": 11832, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.705603921900093, \"MicroF1\": 0.705603921900093, \"MacroF1\": 0.6976881379682605, \"Memory in Mb\": 2.8648252487182617, \"Time in s\": 114.917299 }, { \"step\": 12240, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7094533867146009, \"MicroF1\": 0.7094533867146009, \"MacroF1\": 0.705840538940343, \"Memory in Mb\": 2.960163116455078, \"Time in s\": 125.31674099999998 }, { \"step\": 12648, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7053846762077963, \"MicroF1\": 0.7053846762077963, \"MacroF1\": 0.6965736948063981, \"Memory in Mb\": 3.0555009841918945, \"Time in s\": 136.28361299999997 }, { \"step\": 13056, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.6927613941018766, \"MicroF1\": 0.6927613941018766, \"MacroF1\": 0.6842255816736497, \"Memory in Mb\": 3.150838851928711, \"Time in s\": 147.832836 }, { \"step\": 13464, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.6890737577063062, \"MicroF1\": 0.6890737577063062, \"MacroF1\": 0.6845669389392289, \"Memory in Mb\": 3.246176719665528, \"Time in s\": 159.980064 }, { \"step\": 13872, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.6873332852714296, \"MicroF1\": 0.6873332852714296, \"MacroF1\": 0.6839054551822702, \"Memory in Mb\": 3.341514587402344, \"Time in s\": 172.74228 }, { \"step\": 14280, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.682960991666083, \"MicroF1\": 0.682960991666083, \"MacroF1\": 0.6781566371919946, \"Memory in Mb\": 3.43685245513916, \"Time in s\": 186.135321 }, { \"step\": 14688, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.686185061619119, \"MicroF1\": 0.686185061619119, \"MacroF1\": 0.6843713776162116, \"Memory in Mb\": 3.532190322875977, \"Time in s\": 200.177651 }, { \"step\": 15096, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.6928784365684001, \"MicroF1\": 0.6928784365684001, \"MacroF1\": 0.6911392400672977, \"Memory in Mb\": 3.627528190612793, \"Time in s\": 214.888654 }, { \"step\": 15504, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.6913500612784622, \"MicroF1\": 0.6913500612784622, \"MacroF1\": 0.687359772989117, \"Memory in Mb\": 3.72286605834961, \"Time in s\": 230.279565 }, { \"step\": 15912, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.6819810194205267, \"MicroF1\": 0.6819810194205267, \"MacroF1\": 0.6749159449359359, \"Memory in Mb\": 3.818203926086426, \"Time in s\": 246.365674 }, { \"step\": 16320, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.6726515105092223, \"MicroF1\": 0.6726515105092223, \"MacroF1\": 0.6670192172011686, \"Memory in Mb\": 3.913541793823242, \"Time in s\": 263.163212 }, { \"step\": 16728, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.6695163508100676, \"MicroF1\": 0.6695163508100676, \"MacroF1\": 0.6664051037977978, \"Memory in Mb\": 4.008879661560059, \"Time in s\": 280.687557 }, { \"step\": 17136, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.6650131310183834, \"MicroF1\": 0.6650131310183834, \"MacroF1\": 0.6608988619616459, \"Memory in Mb\": 4.1063079833984375, \"Time in s\": 298.952273 }, { \"step\": 17544, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.6568431853160804, \"MicroF1\": 0.6568431853160804, \"MacroF1\": 0.653138289771919, \"Memory in Mb\": 4.201645851135254, \"Time in s\": 317.97399 }, { \"step\": 17952, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.6556180714166342, \"MicroF1\": 0.6556180714166342, \"MacroF1\": 0.6538448358590967, \"Memory in Mb\": 4.29698371887207, \"Time in s\": 337.769402 }, { \"step\": 18360, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.6614194672912468, \"MicroF1\": 0.6614194672912468, \"MacroF1\": 0.6603186829199905, \"Memory in Mb\": 4.392321586608887, \"Time in s\": 358.361854 }, { \"step\": 18768, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.6669686151222891, \"MicroF1\": 0.6669686151222891, \"MacroF1\": 0.666229361655457, \"Memory in Mb\": 4.487659454345703, \"Time in s\": 379.763277 }, { \"step\": 19176, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.6579921773142112, \"MicroF1\": 0.6579921773142112, \"MacroF1\": 0.6554177118629491, \"Memory in Mb\": 4.58299732208252, \"Time in s\": 401.986094 }, { \"step\": 19584, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.6622580809886126, \"MicroF1\": 0.6622580809886126, \"MacroF1\": 0.6609360990360077, \"Memory in Mb\": 4.678335189819336, \"Time in s\": 425.04707400000007 }, { \"step\": 19992, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.6562453103896754, \"MicroF1\": 0.6562453103896754, \"MacroF1\": 0.6545704957554573, \"Memory in Mb\": 4.773673057556152, \"Time in s\": 448.962927 }, { \"step\": 20400, \"track\": \"Multiclass classification\", \"model\": \"Naive Bayes\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.6525319868621011, \"MicroF1\": 0.6525319868621011, \"MacroF1\": 0.6515767870317881, \"Memory in Mb\": 4.869010925292969, \"Time in s\": 473.747426 }, { \"step\": 46, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.3555555555555555, \"MicroF1\": 0.3555555555555555, \"MacroF1\": 0.2537942449707155, \"Memory in Mb\": 0.4191083908081054, \"Time in s\": 0.164966 }, { \"step\": 92, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.4945054945054945, \"MicroF1\": 0.4945054945054945, \"MacroF1\": 0.5043329927491419, \"Memory in Mb\": 0.4191045761108398, \"Time in s\": 0.355643 }, { \"step\": 138, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.5328467153284672, \"MicroF1\": 0.5328467153284672, \"MacroF1\": 0.5564033878668025, \"Memory in Mb\": 0.4191999435424804, \"Time in s\": 0.571134 }, { \"step\": 184, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.6010928961748634, \"MicroF1\": 0.6010928961748634, \"MacroF1\": 0.6227664965396451, \"Memory in Mb\": 0.4191999435424804, \"Time in s\": 0.8113900000000001 }, { \"step\": 230, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.6375545851528385, \"MicroF1\": 0.6375545851528385, \"MacroF1\": 0.6539827168809461, \"Memory in Mb\": 0.4192228317260742, \"Time in s\": 1.079154 }, { \"step\": 276, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.6509090909090909, \"MicroF1\": 0.6509090909090909, \"MacroF1\": 0.6671561759164943, \"Memory in Mb\": 0.4192724227905273, \"Time in s\": 1.371943 }, { \"step\": 322, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.67601246105919, \"MicroF1\": 0.67601246105919, \"MacroF1\": 0.6756614325426025, \"Memory in Mb\": 0.4192724227905273, \"Time in s\": 1.689575 }, { \"step\": 368, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7029972752043597, \"MicroF1\": 0.7029972752043597, \"MacroF1\": 0.6993447851636565, \"Memory in Mb\": 0.4192457199096679, \"Time in s\": 2.032058 }, { \"step\": 414, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7142857142857143, \"MicroF1\": 0.7142857142857143, \"MacroF1\": 0.7108606838045498, \"Memory in Mb\": 0.4191656112670898, \"Time in s\": 2.4019660000000003 }, { \"step\": 460, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7145969498910676, \"MicroF1\": 0.7145969498910676, \"MacroF1\": 0.7090365931960759, \"Memory in Mb\": 0.4192419052124023, \"Time in s\": 2.796914 }, { \"step\": 506, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7207920792079208, \"MicroF1\": 0.7207920792079208, \"MacroF1\": 0.7126631585949763, \"Memory in Mb\": 0.4192419052124023, \"Time in s\": 3.216844 }, { \"step\": 552, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7223230490018149, \"MicroF1\": 0.7223230490018149, \"MacroF1\": 0.7157730164623107, \"Memory in Mb\": 0.4191350936889648, \"Time in s\": 3.6616 }, { \"step\": 598, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7286432160804021, \"MicroF1\": 0.7286432160804021, \"MacroF1\": 0.7216745323124732, \"Memory in Mb\": 0.4191579818725586, \"Time in s\": 4.131175 }, { \"step\": 644, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7278382581648523, \"MicroF1\": 0.7278382581648523, \"MacroF1\": 0.72291051830875, \"Memory in Mb\": 0.4191312789916992, \"Time in s\": 4.628008 }, { \"step\": 690, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7314949201741655, \"MicroF1\": 0.7314949201741654, \"MacroF1\": 0.7263583447448078, \"Memory in Mb\": 0.4191312789916992, \"Time in s\": 5.149870999999999 }, { \"step\": 736, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7333333333333333, \"MicroF1\": 0.7333333333333333, \"MacroF1\": 0.729431071218305, \"Memory in Mb\": 0.4191579818725586, \"Time in s\": 5.696603 }, { \"step\": 782, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7387964148527529, \"MicroF1\": 0.7387964148527529, \"MacroF1\": 0.7349287389986899, \"Memory in Mb\": 0.4191579818725586, \"Time in s\": 6.268242 }, { \"step\": 828, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7376058041112454, \"MicroF1\": 0.7376058041112454, \"MacroF1\": 0.7356226390109742, \"Memory in Mb\": 0.4191579818725586, \"Time in s\": 6.867156 }, { \"step\": 874, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7445589919816724, \"MicroF1\": 0.7445589919816724, \"MacroF1\": 0.7409366047432264, \"Memory in Mb\": 0.4191579818725586, \"Time in s\": 7.49107 }, { \"step\": 920, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7453754080522307, \"MicroF1\": 0.7453754080522307, \"MacroF1\": 0.7408438328939173, \"Memory in Mb\": 0.4191312789916992, \"Time in s\": 8.139827 }, { \"step\": 966, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7471502590673575, \"MicroF1\": 0.7471502590673575, \"MacroF1\": 0.7416651838589269, \"Memory in Mb\": 0.4191312789916992, \"Time in s\": 8.813418 }, { \"step\": 1012, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7467853610286844, \"MicroF1\": 0.7467853610286844, \"MacroF1\": 0.7416356251822, \"Memory in Mb\": 0.4191312789916992, \"Time in s\": 9.514287 }, { \"step\": 1058, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7492904446546831, \"MicroF1\": 0.7492904446546831, \"MacroF1\": 0.7430778844390782, \"Memory in Mb\": 0.4191312789916992, \"Time in s\": 10.240045 }, { \"step\": 1104, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7515865820489573, \"MicroF1\": 0.7515865820489573, \"MacroF1\": 0.7451256886686588, \"Memory in Mb\": 0.4191808700561523, \"Time in s\": 10.990683 }, { \"step\": 1150, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7536988685813751, \"MicroF1\": 0.7536988685813751, \"MacroF1\": 0.7468312166689606, \"Memory in Mb\": 0.4191808700561523, \"Time in s\": 11.766057 }, { \"step\": 1196, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7564853556485356, \"MicroF1\": 0.7564853556485356, \"MacroF1\": 0.7503479321738039, \"Memory in Mb\": 0.4191808700561523, \"Time in s\": 12.566171 }, { \"step\": 1242, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7566478646253022, \"MicroF1\": 0.7566478646253022, \"MacroF1\": 0.7509717522131719, \"Memory in Mb\": 0.4191808700561523, \"Time in s\": 13.393734 }, { \"step\": 1288, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7614607614607615, \"MicroF1\": 0.7614607614607615, \"MacroF1\": 0.7547643483779538, \"Memory in Mb\": 0.4191808700561523, \"Time in s\": 14.246394 }, { \"step\": 1334, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7614403600900225, \"MicroF1\": 0.7614403600900225, \"MacroF1\": 0.7551060921605869, \"Memory in Mb\": 0.4191808700561523, \"Time in s\": 15.123846 }, { \"step\": 1380, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7621464829586657, \"MicroF1\": 0.7621464829586658, \"MacroF1\": 0.7562209880685911, \"Memory in Mb\": 0.4191808700561523, \"Time in s\": 16.026049 }, { \"step\": 1426, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7642105263157895, \"MicroF1\": 0.7642105263157895, \"MacroF1\": 0.7575332274919562, \"Memory in Mb\": 0.4191808700561523, \"Time in s\": 16.955566 }, { \"step\": 1472, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7688647178789939, \"MicroF1\": 0.768864717878994, \"MacroF1\": 0.760438686053582, \"Memory in Mb\": 0.4191808700561523, \"Time in s\": 17.910383 }, { \"step\": 1518, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7705998681608438, \"MicroF1\": 0.7705998681608438, \"MacroF1\": 0.7612069012840875, \"Memory in Mb\": 0.4191808700561523, \"Time in s\": 18.890183 }, { \"step\": 1564, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7709532949456174, \"MicroF1\": 0.7709532949456174, \"MacroF1\": 0.7622701654854867, \"Memory in Mb\": 0.4191808700561523, \"Time in s\": 19.895086 }, { \"step\": 1610, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7712865133623369, \"MicroF1\": 0.771286513362337, \"MacroF1\": 0.7617247271717752, \"Memory in Mb\": 0.4192037582397461, \"Time in s\": 20.927569 }, { \"step\": 1656, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7709969788519637, \"MicroF1\": 0.7709969788519637, \"MacroF1\": 0.7615629120572474, \"Memory in Mb\": 0.4192037582397461, \"Time in s\": 21.985292 }, { \"step\": 1702, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.770135214579659, \"MicroF1\": 0.770135214579659, \"MacroF1\": 0.7627316365695141, \"Memory in Mb\": 0.4192037582397461, \"Time in s\": 23.068121 }, { \"step\": 1748, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7727532913566113, \"MicroF1\": 0.7727532913566113, \"MacroF1\": 0.7649467707214076, \"Memory in Mb\": 0.4192037582397461, \"Time in s\": 24.176005 }, { \"step\": 1794, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7741215839375348, \"MicroF1\": 0.7741215839375348, \"MacroF1\": 0.7649332326562147, \"Memory in Mb\": 0.4191770553588867, \"Time in s\": 25.309107999999995 }, { \"step\": 1840, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7754214246873301, \"MicroF1\": 0.7754214246873301, \"MacroF1\": 0.7664700790631906, \"Memory in Mb\": 0.4191770553588867, \"Time in s\": 26.470049 }, { \"step\": 1886, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7740053050397878, \"MicroF1\": 0.7740053050397878, \"MacroF1\": 0.7655121135276625, \"Memory in Mb\": 0.4191770553588867, \"Time in s\": 27.656614999999995 }, { \"step\": 1932, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7742102537545313, \"MicroF1\": 0.7742102537545313, \"MacroF1\": 0.7648034036287765, \"Memory in Mb\": 0.4191770553588867, \"Time in s\": 28.868293999999995 }, { \"step\": 1978, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7754172989377845, \"MicroF1\": 0.7754172989377845, \"MacroF1\": 0.7656013068970458, \"Memory in Mb\": 0.4191770553588867, \"Time in s\": 30.105038 }, { \"step\": 2024, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7770637666831438, \"MicroF1\": 0.7770637666831438, \"MacroF1\": 0.7660878232247856, \"Memory in Mb\": 0.4191770553588867, \"Time in s\": 31.36953 }, { \"step\": 2070, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7762203963267279, \"MicroF1\": 0.7762203963267279, \"MacroF1\": 0.7654829214385931, \"Memory in Mb\": 0.4191770553588867, \"Time in s\": 32.658967 }, { \"step\": 2116, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7768321513002364, \"MicroF1\": 0.7768321513002364, \"MacroF1\": 0.7653071619305024, \"Memory in Mb\": 0.4191770553588867, \"Time in s\": 33.973288999999994 }, { \"step\": 2162, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7778806108283203, \"MicroF1\": 0.7778806108283203, \"MacroF1\": 0.7659351904174981, \"Memory in Mb\": 0.4191770553588867, \"Time in s\": 35.312507 }, { \"step\": 2208, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7797915722700498, \"MicroF1\": 0.7797915722700498, \"MacroF1\": 0.7668192864082087, \"Memory in Mb\": 0.4191770553588867, \"Time in s\": 36.679284 }, { \"step\": 2254, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7767421216156236, \"MicroF1\": 0.7767421216156236, \"MacroF1\": 0.7637794374955548, \"Memory in Mb\": 0.4191770553588867, \"Time in s\": 38.070969 }, { \"step\": 2300, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7759895606785558, \"MicroF1\": 0.7759895606785558, \"MacroF1\": 0.763026662835187, \"Memory in Mb\": 0.4191770553588867, \"Time in s\": 39.487872 }, { \"step\": 1056, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.6218009478672986, \"MicroF1\": 0.6218009478672986, \"MacroF1\": 0.585266310719421, \"Memory in Mb\": 0.6617898941040039, \"Time in s\": 1.016292 }, { \"step\": 2112, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.6153481762198011, \"MicroF1\": 0.6153481762198011, \"MacroF1\": 0.5806436317780949, \"Memory in Mb\": 0.6617898941040039, \"Time in s\": 2.820671 }, { \"step\": 3168, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.6071992421850332, \"MicroF1\": 0.6071992421850332, \"MacroF1\": 0.572248584718361, \"Memory in Mb\": 0.6617898941040039, \"Time in s\": 5.468586999999999 }, { \"step\": 4224, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.6043097324177126, \"MicroF1\": 0.6043097324177126, \"MacroF1\": 0.5697573109597247, \"Memory in Mb\": 0.6617898941040039, \"Time in s\": 8.970813999999999 }, { \"step\": 5280, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.6088274294373934, \"MicroF1\": 0.6088274294373934, \"MacroF1\": 0.5727379077413696, \"Memory in Mb\": 0.6617898941040039, \"Time in s\": 13.304299 }, { \"step\": 6336, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.6026835043409629, \"MicroF1\": 0.6026835043409629, \"MacroF1\": 0.568251333238805, \"Memory in Mb\": 0.6617898941040039, \"Time in s\": 18.451533 }, { \"step\": 7392, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.600189419564335, \"MicroF1\": 0.600189419564335, \"MacroF1\": 0.5659762112716077, \"Memory in Mb\": 0.6617898941040039, \"Time in s\": 24.373815 }, { \"step\": 8448, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.60258079791642, \"MicroF1\": 0.60258079791642, \"MacroF1\": 0.5679781484640409, \"Memory in Mb\": 0.6617898941040039, \"Time in s\": 31.061276 }, { \"step\": 9504, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.5998105861306956, \"MicroF1\": 0.5998105861306956, \"MacroF1\": 0.5649597336877693, \"Memory in Mb\": 0.6617898941040039, \"Time in s\": 38.490335 }, { \"step\": 10560, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.5998674116867128, \"MicroF1\": 0.5998674116867128, \"MacroF1\": 0.5650173260529011, \"Memory in Mb\": 0.6617898941040039, \"Time in s\": 46.63726 }, { \"step\": 11616, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.5974171330176495, \"MicroF1\": 0.5974171330176495, \"MacroF1\": 0.5633067089377386, \"Memory in Mb\": 0.6617898941040039, \"Time in s\": 55.514266 }, { \"step\": 12672, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.6001894088864336, \"MicroF1\": 0.6001894088864336, \"MacroF1\": 0.5684760329567131, \"Memory in Mb\": 0.6617898941040039, \"Time in s\": 65.102691 }, { \"step\": 13728, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.6120783856632913, \"MicroF1\": 0.6120783856632913, \"MacroF1\": 0.5935956771555828, \"Memory in Mb\": 0.6617898941040039, \"Time in s\": 75.408233 }, { \"step\": 14784, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.6024487587093282, \"MicroF1\": 0.6024487587093282, \"MacroF1\": 0.5842148300149193, \"Memory in Mb\": 0.6617898941040039, \"Time in s\": 86.426133 }, { \"step\": 15840, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.5677757434181451, \"MicroF1\": 0.5677757434181451, \"MacroF1\": 0.5509250187877572, \"Memory in Mb\": 0.6617898941040039, \"Time in s\": 98.158455 }, { \"step\": 16896, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.5419354838709678, \"MicroF1\": 0.5419354838709678, \"MacroF1\": 0.5257359157219257, \"Memory in Mb\": 0.6617898941040039, \"Time in s\": 110.605121 }, { \"step\": 17952, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.5233691716338923, \"MicroF1\": 0.5233691716338923, \"MacroF1\": 0.506858183835206, \"Memory in Mb\": 0.6617898941040039, \"Time in s\": 123.763106 }, { \"step\": 19008, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.5121271110643447, \"MicroF1\": 0.5121271110643447, \"MacroF1\": 0.4929289906509415, \"Memory in Mb\": 0.6617898941040039, \"Time in s\": 137.636213 }, { \"step\": 20064, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.5120370831879579, \"MicroF1\": 0.5120370831879579, \"MacroF1\": 0.4920970323041603, \"Memory in Mb\": 1.317840576171875, \"Time in s\": 152.19804 }, { \"step\": 21120, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.5173066906577016, \"MicroF1\": 0.5173066906577016, \"MacroF1\": 0.497344716983625, \"Memory in Mb\": 1.3185958862304688, \"Time in s\": 167.375493 }, { \"step\": 22176, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.5229312288613304, \"MicroF1\": 0.5229312288613304, \"MacroF1\": 0.5026343687424488, \"Memory in Mb\": 1.3185958862304688, \"Time in s\": 183.138263 }, { \"step\": 23232, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.5301536739701261, \"MicroF1\": 0.5301536739701261, \"MacroF1\": 0.5095132087733324, \"Memory in Mb\": 1.3185958862304688, \"Time in s\": 199.493958 }, { \"step\": 24288, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.5351422571746202, \"MicroF1\": 0.5351422571746202, \"MacroF1\": 0.5135975374357353, \"Memory in Mb\": 1.3185958862304688, \"Time in s\": 216.435818 }, { \"step\": 25344, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.5403069881229531, \"MicroF1\": 0.5403069881229531, \"MacroF1\": 0.5180803411538233, \"Memory in Mb\": 1.3185958862304688, \"Time in s\": 233.973059 }, { \"step\": 26400, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.5441493995984696, \"MicroF1\": 0.5441493995984696, \"MacroF1\": 0.5209012984387186, \"Memory in Mb\": 1.3185958862304688, \"Time in s\": 252.0993 }, { \"step\": 27456, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.5475869604807867, \"MicroF1\": 0.5475869604807867, \"MacroF1\": 0.5230407124785976, \"Memory in Mb\": 1.3185958862304688, \"Time in s\": 270.826115 }, { \"step\": 28512, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.5442460804601733, \"MicroF1\": 0.5442460804601733, \"MacroF1\": 0.5199893698637053, \"Memory in Mb\": 1.3185958862304688, \"Time in s\": 290.125735 }, { \"step\": 29568, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.5439848479724017, \"MicroF1\": 0.5439848479724017, \"MacroF1\": 0.5225387960194382, \"Memory in Mb\": 1.3185958862304688, \"Time in s\": 310.131151 }, { \"step\": 30624, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.5449825294713124, \"MicroF1\": 0.5449825294713124, \"MacroF1\": 0.5260472440529832, \"Memory in Mb\": 1.3185958862304688, \"Time in s\": 330.869455 }, { \"step\": 31680, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.5469238296663405, \"MicroF1\": 0.5469238296663405, \"MacroF1\": 0.5300194392617626, \"Memory in Mb\": 1.3185958862304688, \"Time in s\": 352.339648 }, { \"step\": 32736, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.5492286543455017, \"MicroF1\": 0.5492286543455017, \"MacroF1\": 0.5337692045397759, \"Memory in Mb\": 1.3185958862304688, \"Time in s\": 374.544388 }, { \"step\": 33792, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.5448196265277737, \"MicroF1\": 0.5448196265277737, \"MacroF1\": 0.5298516474077152, \"Memory in Mb\": 1.3185958862304688, \"Time in s\": 397.480297 }, { \"step\": 34848, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.539357763939507, \"MicroF1\": 0.539357763939507, \"MacroF1\": 0.5246413689313029, \"Memory in Mb\": 1.3185958862304688, \"Time in s\": 421.148709 }, { \"step\": 35904, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.5352756037099964, \"MicroF1\": 0.5352756037099964, \"MacroF1\": 0.5204658240271912, \"Memory in Mb\": 1.3185958862304688, \"Time in s\": 445.552724 }, { \"step\": 36960, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.5307232338537298, \"MicroF1\": 0.5307232338537298, \"MacroF1\": 0.5158458403074863, \"Memory in Mb\": 1.3185958862304688, \"Time in s\": 470.685377 }, { \"step\": 38016, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.5287912666052874, \"MicroF1\": 0.5287912666052874, \"MacroF1\": 0.5138605376143625, \"Memory in Mb\": 1.8598642349243164, \"Time in s\": 496.544653 }, { \"step\": 39072, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.5245322617798367, \"MicroF1\": 0.5245322617798367, \"MacroF1\": 0.5100329616180462, \"Memory in Mb\": 1.9744834899902344, \"Time in s\": 523.1337460000001 }, { \"step\": 40128, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.5244847608841927, \"MicroF1\": 0.5244847608841927, \"MacroF1\": 0.5114466799524962, \"Memory in Mb\": 1.9744834899902344, \"Time in s\": 550.3794180000001 }, { \"step\": 41184, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.5269650098341548, \"MicroF1\": 0.5269650098341548, \"MacroF1\": 0.5145630920489553, \"Memory in Mb\": 1.9744834899902344, \"Time in s\": 578.1701970000001 }, { \"step\": 42240, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.5290608205686688, \"MicroF1\": 0.5290608205686688, \"MacroF1\": 0.5171452370879218, \"Memory in Mb\": 1.9744834899902344, \"Time in s\": 606.4941780000001 }, { \"step\": 43296, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.5316318281556762, \"MicroF1\": 0.5316318281556762, \"MacroF1\": 0.5200714653059242, \"Memory in Mb\": 1.9744834899902344, \"Time in s\": 635.3594070000001 }, { \"step\": 44352, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.5332912448422809, \"MicroF1\": 0.5332912448422809, \"MacroF1\": 0.521951703681177, \"Memory in Mb\": 1.975238800048828, \"Time in s\": 664.7773900000002 }, { \"step\": 45408, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.5350937080185875, \"MicroF1\": 0.5350937080185875, \"MacroF1\": 0.5236272112757866, \"Memory in Mb\": 1.975238800048828, \"Time in s\": 694.7425150000001 }, { \"step\": 46464, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.5374168693368917, \"MicroF1\": 0.5374168693368917, \"MacroF1\": 0.5257977177437826, \"Memory in Mb\": 1.975238800048828, \"Time in s\": 725.2648820000002 }, { \"step\": 47520, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.5359540394368568, \"MicroF1\": 0.5359540394368568, \"MacroF1\": 0.5247049329892776, \"Memory in Mb\": 1.975238800048828, \"Time in s\": 756.3925470000001 }, { \"step\": 48576, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.5333196088522902, \"MicroF1\": 0.5333196088522902, \"MacroF1\": 0.5224640186909638, \"Memory in Mb\": 1.975238800048828, \"Time in s\": 788.1537450000002 }, { \"step\": 49632, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.5314017448771937, \"MicroF1\": 0.5314017448771937, \"MacroF1\": 0.5209076603734538, \"Memory in Mb\": 1.975238800048828, \"Time in s\": 820.5431960000002 }, { \"step\": 50688, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.5321877404462683, \"MicroF1\": 0.5321877404462683, \"MacroF1\": 0.5219332135179457, \"Memory in Mb\": 2.097897529602051, \"Time in s\": 853.5752100000002 }, { \"step\": 51744, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.5376959202210927, \"MicroF1\": 0.5376959202210927, \"MacroF1\": 0.5274519689249669, \"Memory in Mb\": 2.335637092590332, \"Time in s\": 887.2128290000002 }, { \"step\": 52800, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.5370177465482301, \"MicroF1\": 0.5370177465482301, \"MacroF1\": 0.5270712327692165, \"Memory in Mb\": 2.5391950607299805, \"Time in s\": 921.3507020000002 }, { \"step\": 408, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9803439803439804, \"MicroF1\": 0.9803439803439804, \"MacroF1\": 0.4950372208436724, \"Memory in Mb\": 0.2276544570922851, \"Time in s\": 0.136786 }, { \"step\": 816, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9423312883435584, \"MicroF1\": 0.9423312883435584, \"MacroF1\": 0.7661667470992702, \"Memory in Mb\": 0.3232784271240234, \"Time in s\": 0.4359419999999999 }, { \"step\": 1224, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8830744071954211, \"MicroF1\": 0.883074407195421, \"MacroF1\": 0.8761191747044462, \"Memory in Mb\": 0.4189023971557617, \"Time in s\": 0.926938 }, { \"step\": 1632, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8902513795217658, \"MicroF1\": 0.8902513795217658, \"MacroF1\": 0.8767853151263398, \"Memory in Mb\": 0.5150146484375, \"Time in s\": 1.637883 }, { \"step\": 2040, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8891613536047082, \"MicroF1\": 0.8891613536047082, \"MacroF1\": 0.8807858055314012, \"Memory in Mb\": 0.6221132278442383, \"Time in s\": 2.570345 }, { \"step\": 2448, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.848385778504291, \"MicroF1\": 0.848385778504291, \"MacroF1\": 0.8522513926518692, \"Memory in Mb\": 0.7177371978759766, \"Time in s\": 3.758757 }, { \"step\": 2856, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8563922942206655, \"MicroF1\": 0.8563922942206655, \"MacroF1\": 0.8440193478447515, \"Memory in Mb\": 0.8133611679077148, \"Time in s\": 5.245901 }, { \"step\": 3264, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8482991112473184, \"MicroF1\": 0.8482991112473184, \"MacroF1\": 0.8269786301577753, \"Memory in Mb\": 0.9089851379394532, \"Time in s\": 7.065474 }, { \"step\": 3672, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8392808499046581, \"MicroF1\": 0.8392808499046581, \"MacroF1\": 0.8374924160046074, \"Memory in Mb\": 1.0046091079711914, \"Time in s\": 9.269679 }, { \"step\": 4080, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8323118411375338, \"MicroF1\": 0.8323118411375338, \"MacroF1\": 0.8182261307945194, \"Memory in Mb\": 1.1253337860107422, \"Time in s\": 11.89727 }, { \"step\": 4488, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8159126365054602, \"MicroF1\": 0.8159126365054602, \"MacroF1\": 0.8260965842218733, \"Memory in Mb\": 1.2209577560424805, \"Time in s\": 14.983422 }, { \"step\": 4896, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8149131767109296, \"MicroF1\": 0.8149131767109296, \"MacroF1\": 0.8221314665977922, \"Memory in Mb\": 1.3165817260742188, \"Time in s\": 18.566921 }, { \"step\": 5304, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8125589289081652, \"MicroF1\": 0.8125589289081652, \"MacroF1\": 0.797613058026624, \"Memory in Mb\": 1.412205696105957, \"Time in s\": 22.693048 }, { \"step\": 5712, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7907546839432674, \"MicroF1\": 0.7907546839432674, \"MacroF1\": 0.7936708037520237, \"Memory in Mb\": 1.507829666137695, \"Time in s\": 27.396131 }, { \"step\": 6120, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7886909625755842, \"MicroF1\": 0.7886909625755842, \"MacroF1\": 0.7694478218498494, \"Memory in Mb\": 1.6034536361694336, \"Time in s\": 32.715078 }, { \"step\": 6528, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7635973647924008, \"MicroF1\": 0.7635973647924008, \"MacroF1\": 0.75687960152136, \"Memory in Mb\": 1.699077606201172, \"Time in s\": 38.687416 }, { \"step\": 6936, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.75155010814708, \"MicroF1\": 0.7515501081470799, \"MacroF1\": 0.7521509466338958, \"Memory in Mb\": 1.7947015762329102, \"Time in s\": 45.356366 }, { \"step\": 7344, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7611330518861501, \"MicroF1\": 0.7611330518861501, \"MacroF1\": 0.7576671162861804, \"Memory in Mb\": 1.8917903900146484, \"Time in s\": 52.757111 }, { \"step\": 7752, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7617081666881693, \"MicroF1\": 0.7617081666881692, \"MacroF1\": 0.7593340838982118, \"Memory in Mb\": 1.9874143600463867, \"Time in s\": 60.92847 }, { \"step\": 8160, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7655349920333374, \"MicroF1\": 0.7655349920333374, \"MacroF1\": 0.7610505848438686, \"Memory in Mb\": 2.083038330078125, \"Time in s\": 69.910689 }, { \"step\": 8568, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7644449632310026, \"MicroF1\": 0.7644449632310025, \"MacroF1\": 0.7639417799779614, \"Memory in Mb\": 2.226712226867676, \"Time in s\": 79.742469 }, { \"step\": 8976, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7624512534818941, \"MicroF1\": 0.7624512534818941, \"MacroF1\": 0.7625605608371231, \"Memory in Mb\": 2.322336196899414, \"Time in s\": 90.464241 }, { \"step\": 9384, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7605243525524885, \"MicroF1\": 0.7605243525524885, \"MacroF1\": 0.7588384348689571, \"Memory in Mb\": 2.4179601669311523, \"Time in s\": 102.115634 }, { \"step\": 9792, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.753344908589521, \"MicroF1\": 0.753344908589521, \"MacroF1\": 0.7499438215834663, \"Memory in Mb\": 2.51358413696289, \"Time in s\": 114.735409 }, { \"step\": 10200, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7450730463770958, \"MicroF1\": 0.7450730463770959, \"MacroF1\": 0.7369660419615973, \"Memory in Mb\": 2.609208106994629, \"Time in s\": 128.375943 }, { \"step\": 10608, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7240501555576506, \"MicroF1\": 0.7240501555576506, \"MacroF1\": 0.7111305646829175, \"Memory in Mb\": 2.704832077026367, \"Time in s\": 143.06648900000002 }, { \"step\": 11016, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7166591012256015, \"MicroF1\": 0.7166591012256015, \"MacroF1\": 0.7122511515574345, \"Memory in Mb\": 2.800456047058105, \"Time in s\": 158.84720500000003 }, { \"step\": 11424, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.710146196270682, \"MicroF1\": 0.710146196270682, \"MacroF1\": 0.6963016796632095, \"Memory in Mb\": 2.896080017089844, \"Time in s\": 175.75710000000004 }, { \"step\": 11832, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7005324993660722, \"MicroF1\": 0.7005324993660722, \"MacroF1\": 0.6925666211338902, \"Memory in Mb\": 2.991703987121582, \"Time in s\": 193.83910100000003 }, { \"step\": 12240, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7043876133671052, \"MicroF1\": 0.7043876133671052, \"MacroF1\": 0.7007845610449206, \"Memory in Mb\": 3.0873279571533203, \"Time in s\": 213.15240600000004 }, { \"step\": 12648, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7004032576895707, \"MicroF1\": 0.7004032576895707, \"MacroF1\": 0.6915775762792659, \"Memory in Mb\": 3.1829519271850586, \"Time in s\": 233.71252100000004 }, { \"step\": 13056, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.6877058598238223, \"MicroF1\": 0.6877058598238223, \"MacroF1\": 0.6789768292873962, \"Memory in Mb\": 3.278575897216797, \"Time in s\": 255.55614900000003 }, { \"step\": 13464, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.6838743222164451, \"MicroF1\": 0.6838743222164451, \"MacroF1\": 0.6791243465680946, \"Memory in Mb\": 3.374199867248535, \"Time in s\": 278.72697300000004 }, { \"step\": 13872, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.6822146925239708, \"MicroF1\": 0.6822146925239708, \"MacroF1\": 0.6786558938530484, \"Memory in Mb\": 3.469823837280273, \"Time in s\": 303.265051 }, { \"step\": 14280, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.6777085230058127, \"MicroF1\": 0.6777085230058127, \"MacroF1\": 0.6725285130045525, \"Memory in Mb\": 3.565447807312012, \"Time in s\": 329.21132600000004 }, { \"step\": 14688, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.6807380676788997, \"MicroF1\": 0.6807380676788997, \"MacroF1\": 0.6786761142186741, \"Memory in Mb\": 3.66107177734375, \"Time in s\": 356.60277 }, { \"step\": 15096, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.6873799271281882, \"MicroF1\": 0.6873799271281882, \"MacroF1\": 0.68548393064844, \"Memory in Mb\": 3.756695747375488, \"Time in s\": 385.483239 }, { \"step\": 15504, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.6858027478552539, \"MicroF1\": 0.6858027478552539, \"MacroF1\": 0.6816808496509055, \"Memory in Mb\": 3.8523197174072266, \"Time in s\": 415.890234 }, { \"step\": 15912, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.6765759537426937, \"MicroF1\": 0.6765759537426937, \"MacroF1\": 0.6694713281964944, \"Memory in Mb\": 3.947943687438965, \"Time in s\": 447.863862 }, { \"step\": 16320, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.6673815797536614, \"MicroF1\": 0.6673815797536614, \"MacroF1\": 0.6617321933140904, \"Memory in Mb\": 4.043567657470703, \"Time in s\": 481.440864 }, { \"step\": 16728, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.6643151790518323, \"MicroF1\": 0.6643151790518323, \"MacroF1\": 0.6611780293584051, \"Memory in Mb\": 4.139191627502441, \"Time in s\": 516.667277 }, { \"step\": 17136, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.6598774438284214, \"MicroF1\": 0.6598774438284214, \"MacroF1\": 0.655734247886306, \"Memory in Mb\": 4.333066940307617, \"Time in s\": 553.571784 }, { \"step\": 17544, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.6518269395200365, \"MicroF1\": 0.6518269395200365, \"MacroF1\": 0.6481085155228207, \"Memory in Mb\": 4.428690910339356, \"Time in s\": 592.193317 }, { \"step\": 17952, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.6507158375577963, \"MicroF1\": 0.6507158375577963, \"MacroF1\": 0.648936899585426, \"Memory in Mb\": 4.524314880371094, \"Time in s\": 632.57823 }, { \"step\": 18360, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.6566806470940683, \"MicroF1\": 0.6566806470940683, \"MacroF1\": 0.6555764711123697, \"Memory in Mb\": 4.619938850402832, \"Time in s\": 674.762883 }, { \"step\": 18768, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.662279533223211, \"MicroF1\": 0.662279533223211, \"MacroF1\": 0.6615432060687811, \"Memory in Mb\": 4.71556282043457, \"Time in s\": 718.781471 }, { \"step\": 19176, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.6534028683181226, \"MicroF1\": 0.6534028683181226, \"MacroF1\": 0.6508089832432515, \"Memory in Mb\": 4.811186790466309, \"Time in s\": 764.6790530000001 }, { \"step\": 19584, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.6577643874789358, \"MicroF1\": 0.6577643874789358, \"MacroF1\": 0.6564201177589184, \"Memory in Mb\": 4.906810760498047, \"Time in s\": 812.4977690000001 }, { \"step\": 19992, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.6518433294982742, \"MicroF1\": 0.6518433294982742, \"MacroF1\": 0.6501496360982538, \"Memory in Mb\": 5.002434730529785, \"Time in s\": 862.2665020000001 }, { \"step\": 20400, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.6482180499044071, \"MicroF1\": 0.6482180499044071, \"MacroF1\": 0.6472493759146579, \"Memory in Mb\": 5.098058700561523, \"Time in s\": 914.036687 }, { \"step\": 46, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.4, \"MicroF1\": 0.4000000000000001, \"MacroF1\": 0.2926704014939309, \"Memory in Mb\": 0.4254798889160156, \"Time in s\": 0.179349 }, { \"step\": 92, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.5274725274725275, \"MicroF1\": 0.5274725274725275, \"MacroF1\": 0.5399541634835753, \"Memory in Mb\": 0.425537109375, \"Time in s\": 0.395098 }, { \"step\": 138, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.5547445255474452, \"MicroF1\": 0.5547445255474452, \"MacroF1\": 0.5795767508697842, \"Memory in Mb\": 0.4256591796875, \"Time in s\": 0.646414 }, { \"step\": 184, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.6174863387978142, \"MicroF1\": 0.6174863387978142, \"MacroF1\": 0.6398140932417979, \"Memory in Mb\": 0.4257431030273437, \"Time in s\": 0.936097 }, { \"step\": 230, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.6419213973799127, \"MicroF1\": 0.6419213973799127, \"MacroF1\": 0.6592174177506214, \"Memory in Mb\": 0.4257431030273437, \"Time in s\": 1.2615820000000002 }, { \"step\": 276, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.6545454545454545, \"MicroF1\": 0.6545454545454545, \"MacroF1\": 0.6716869228432982, \"Memory in Mb\": 0.4257926940917969, \"Time in s\": 1.6228100000000003 }, { \"step\": 322, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.6791277258566978, \"MicroF1\": 0.6791277258566978, \"MacroF1\": 0.6806263486692059, \"Memory in Mb\": 0.4258537292480469, \"Time in s\": 2.022508 }, { \"step\": 368, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7029972752043597, \"MicroF1\": 0.7029972752043597, \"MacroF1\": 0.7008299817149242, \"Memory in Mb\": 0.4258270263671875, \"Time in s\": 2.458102 }, { \"step\": 414, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7142857142857143, \"MicroF1\": 0.7142857142857143, \"MacroF1\": 0.7121569327354127, \"Memory in Mb\": 0.4257469177246094, \"Time in s\": 2.92926 }, { \"step\": 460, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7145969498910676, \"MicroF1\": 0.7145969498910676, \"MacroF1\": 0.7103106155638, \"Memory in Mb\": 0.4258232116699219, \"Time in s\": 3.4385440000000003 }, { \"step\": 506, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7227722772277227, \"MicroF1\": 0.7227722772277227, \"MacroF1\": 0.715881182832702, \"Memory in Mb\": 0.4258232116699219, \"Time in s\": 3.983535 }, { \"step\": 552, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7241379310344828, \"MicroF1\": 0.7241379310344829, \"MacroF1\": 0.7187949260386588, \"Memory in Mb\": 0.4257164001464844, \"Time in s\": 4.564144000000001 }, { \"step\": 598, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7286432160804021, \"MicroF1\": 0.7286432160804021, \"MacroF1\": 0.7227601649788371, \"Memory in Mb\": 0.4257392883300781, \"Time in s\": 5.1830560000000006 }, { \"step\": 644, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7278382581648523, \"MicroF1\": 0.7278382581648523, \"MacroF1\": 0.7240595992457829, \"Memory in Mb\": 0.4257736206054687, \"Time in s\": 5.837887 }, { \"step\": 690, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7314949201741655, \"MicroF1\": 0.7314949201741654, \"MacroF1\": 0.727547508877315, \"Memory in Mb\": 0.4257736206054687, \"Time in s\": 6.528431 }, { \"step\": 736, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7333333333333333, \"MicroF1\": 0.7333333333333333, \"MacroF1\": 0.730585229165138, \"Memory in Mb\": 0.4258003234863281, \"Time in s\": 7.25733 }, { \"step\": 782, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7387964148527529, \"MicroF1\": 0.7387964148527529, \"MacroF1\": 0.7359626710287273, \"Memory in Mb\": 0.4258003234863281, \"Time in s\": 8.022590000000001 }, { \"step\": 828, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7376058041112454, \"MicroF1\": 0.7376058041112454, \"MacroF1\": 0.7367699509780541, \"Memory in Mb\": 0.4258003234863281, \"Time in s\": 8.823569 }, { \"step\": 874, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7434135166093929, \"MicroF1\": 0.7434135166093929, \"MacroF1\": 0.7406779161411566, \"Memory in Mb\": 0.4258003234863281, \"Time in s\": 9.663167 }, { \"step\": 920, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7431991294885746, \"MicroF1\": 0.7431991294885745, \"MacroF1\": 0.7396284921253597, \"Memory in Mb\": 0.4257736206054687, \"Time in s\": 10.538696000000002 }, { \"step\": 966, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7430051813471502, \"MicroF1\": 0.7430051813471502, \"MacroF1\": 0.7386475429248082, \"Memory in Mb\": 0.4257736206054687, \"Time in s\": 11.449986 }, { \"step\": 1012, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7428288822947576, \"MicroF1\": 0.7428288822947575, \"MacroF1\": 0.7387392151852316, \"Memory in Mb\": 0.4257736206054687, \"Time in s\": 12.399906 }, { \"step\": 1058, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7445600756859035, \"MicroF1\": 0.7445600756859035, \"MacroF1\": 0.7397141356071754, \"Memory in Mb\": 0.4257736206054687, \"Time in s\": 13.385988 }, { \"step\": 1104, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7470534904805077, \"MicroF1\": 0.7470534904805077, \"MacroF1\": 0.7419829508197956, \"Memory in Mb\": 0.4258232116699219, \"Time in s\": 14.408007 }, { \"step\": 1150, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7484769364664926, \"MicroF1\": 0.7484769364664926, \"MacroF1\": 0.7430153502407321, \"Memory in Mb\": 0.4258232116699219, \"Time in s\": 15.46854 }, { \"step\": 1196, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7514644351464436, \"MicroF1\": 0.7514644351464436, \"MacroF1\": 0.7466450927602833, \"Memory in Mb\": 0.4254570007324219, \"Time in s\": 16.565517 }, { \"step\": 1242, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7518130539887188, \"MicroF1\": 0.7518130539887188, \"MacroF1\": 0.7475811251410989, \"Memory in Mb\": 0.4255790710449219, \"Time in s\": 17.698596 }, { \"step\": 1288, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7567987567987567, \"MicroF1\": 0.7567987567987567, \"MacroF1\": 0.7515585748403605, \"Memory in Mb\": 0.4256401062011719, \"Time in s\": 18.868108 }, { \"step\": 1334, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7576894223555889, \"MicroF1\": 0.7576894223555888, \"MacroF1\": 0.7527145732365901, \"Memory in Mb\": 0.4256401062011719, \"Time in s\": 20.076794 }, { \"step\": 1380, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7592458303118201, \"MicroF1\": 0.7592458303118201, \"MacroF1\": 0.754880899709855, \"Memory in Mb\": 0.4257011413574219, \"Time in s\": 21.321463 }, { \"step\": 1426, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7621052631578947, \"MicroF1\": 0.7621052631578947, \"MacroF1\": 0.7572480123106181, \"Memory in Mb\": 0.4257011413574219, \"Time in s\": 22.601949 }, { \"step\": 1472, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7661454792658056, \"MicroF1\": 0.7661454792658056, \"MacroF1\": 0.7596240117389202, \"Memory in Mb\": 0.4257011413574219, \"Time in s\": 23.921025 }, { \"step\": 1518, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7679630850362558, \"MicroF1\": 0.7679630850362558, \"MacroF1\": 0.7604664202984912, \"Memory in Mb\": 0.4257621765136719, \"Time in s\": 25.275945 }, { \"step\": 1564, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7683941138835573, \"MicroF1\": 0.7683941138835573, \"MacroF1\": 0.7616623934037686, \"Memory in Mb\": 0.4257621765136719, \"Time in s\": 26.66678 }, { \"step\": 1610, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7681789931634556, \"MicroF1\": 0.7681789931634556, \"MacroF1\": 0.7606779105029744, \"Memory in Mb\": 0.4257850646972656, \"Time in s\": 28.096857 }, { \"step\": 1656, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7685800604229607, \"MicroF1\": 0.7685800604229607, \"MacroF1\": 0.7611818346958917, \"Memory in Mb\": 0.4257850646972656, \"Time in s\": 29.563118 }, { \"step\": 1702, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7683715461493239, \"MicroF1\": 0.768371546149324, \"MacroF1\": 0.7630805397579306, \"Memory in Mb\": 0.4257850646972656, \"Time in s\": 31.065673 }, { \"step\": 1748, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7716084716657127, \"MicroF1\": 0.7716084716657126, \"MacroF1\": 0.7661058855209445, \"Memory in Mb\": 0.4257850646972656, \"Time in s\": 32.607308 }, { \"step\": 1794, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7730061349693251, \"MicroF1\": 0.7730061349693251, \"MacroF1\": 0.76613283717613, \"Memory in Mb\": 0.4257583618164062, \"Time in s\": 34.185135 }, { \"step\": 1840, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7743338771071234, \"MicroF1\": 0.7743338771071234, \"MacroF1\": 0.7676486165305356, \"Memory in Mb\": 0.4258193969726562, \"Time in s\": 35.798944000000006 }, { \"step\": 1886, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7729442970822281, \"MicroF1\": 0.7729442970822282, \"MacroF1\": 0.7669643117326908, \"Memory in Mb\": 0.4258193969726562, \"Time in s\": 37.451807 }, { \"step\": 1932, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7736923873640601, \"MicroF1\": 0.7736923873640601, \"MacroF1\": 0.7669808567090198, \"Memory in Mb\": 0.4258193969726562, \"Time in s\": 39.140782 }, { \"step\": 1978, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7744056651492159, \"MicroF1\": 0.7744056651492159, \"MacroF1\": 0.7669005381948409, \"Memory in Mb\": 0.4258193969726562, \"Time in s\": 40.865953000000005 }, { \"step\": 2024, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7755808205635195, \"MicroF1\": 0.7755808205635196, \"MacroF1\": 0.7665616644775576, \"Memory in Mb\": 0.4258193969726562, \"Time in s\": 42.627552 }, { \"step\": 2070, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7752537457709038, \"MicroF1\": 0.7752537457709039, \"MacroF1\": 0.7663566554091733, \"Memory in Mb\": 0.4258193969726562, \"Time in s\": 44.428542 }, { \"step\": 2116, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.775886524822695, \"MicroF1\": 0.775886524822695, \"MacroF1\": 0.7661827507972012, \"Memory in Mb\": 0.4258193969726562, \"Time in s\": 46.266451 }, { \"step\": 2162, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7764923646459972, \"MicroF1\": 0.7764923646459972, \"MacroF1\": 0.7663510353808046, \"Memory in Mb\": 0.4258193969726562, \"Time in s\": 48.14124 }, { \"step\": 2208, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7784322609877662, \"MicroF1\": 0.7784322609877662, \"MacroF1\": 0.767276937076619, \"Memory in Mb\": 0.4258193969726562, \"Time in s\": 50.054953000000005 }, { \"step\": 2254, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.775410563692854, \"MicroF1\": 0.775410563692854, \"MacroF1\": 0.7642399015136985, \"Memory in Mb\": 0.4258193969726562, \"Time in s\": 52.004875000000006 }, { \"step\": 2300, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7746846454980426, \"MicroF1\": 0.7746846454980426, \"MacroF1\": 0.7634961218545901, \"Memory in Mb\": 0.4258193969726562, \"Time in s\": 53.99744400000001 }, { \"step\": 1056, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.6161137440758294, \"MicroF1\": 0.6161137440758294, \"MacroF1\": 0.5813841513331479, \"Memory in Mb\": 0.6684322357177734, \"Time in s\": 1.380603 }, { \"step\": 2112, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.6120322122216959, \"MicroF1\": 0.6120322122216959, \"MacroF1\": 0.5792161554760864, \"Memory in Mb\": 0.6684932708740234, \"Time in s\": 3.972211 }, { \"step\": 3168, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.6049889485317335, \"MicroF1\": 0.6049889485317335, \"MacroF1\": 0.5721633809277146, \"Memory in Mb\": 0.6685543060302734, \"Time in s\": 7.807885 }, { \"step\": 4224, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.603125739995264, \"MicroF1\": 0.603125739995264, \"MacroF1\": 0.5703574432462962, \"Memory in Mb\": 0.6685543060302734, \"Time in s\": 12.856232 }, { \"step\": 5280, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.6061754120098504, \"MicroF1\": 0.6061754120098504, \"MacroF1\": 0.5722430970062696, \"Memory in Mb\": 0.6686153411865234, \"Time in s\": 19.046562 }, { \"step\": 6336, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.5995264404104184, \"MicroF1\": 0.5995264404104184, \"MacroF1\": 0.5671511237518186, \"Memory in Mb\": 0.6686153411865234, \"Time in s\": 26.345703 }, { \"step\": 7392, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.5972128264104992, \"MicroF1\": 0.5972128264104992, \"MacroF1\": 0.5650210504998666, \"Memory in Mb\": 0.6686153411865234, \"Time in s\": 34.702495 }, { \"step\": 8448, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.5989108559251806, \"MicroF1\": 0.5989108559251806, \"MacroF1\": 0.566418690076869, \"Memory in Mb\": 0.6686153411865234, \"Time in s\": 44.110114 }, { \"step\": 9504, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.5962327685993897, \"MicroF1\": 0.5962327685993897, \"MacroF1\": 0.5633780031885508, \"Memory in Mb\": 0.6686153411865234, \"Time in s\": 54.569631 }, { \"step\": 10560, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.5964579979164694, \"MicroF1\": 0.5964579979164694, \"MacroF1\": 0.5634236596216465, \"Memory in Mb\": 0.6686763763427734, \"Time in s\": 66.07704199999999 }, { \"step\": 11616, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.594317692638829, \"MicroF1\": 0.594317692638829, \"MacroF1\": 0.5620068495149612, \"Memory in Mb\": 0.6686763763427734, \"Time in s\": 78.631408 }, { \"step\": 12672, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.5975061163286244, \"MicroF1\": 0.5975061163286244, \"MacroF1\": 0.567518061449456, \"Memory in Mb\": 0.6686763763427734, \"Time in s\": 92.232933 }, { \"step\": 13728, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.6097472135207984, \"MicroF1\": 0.6097472135207984, \"MacroF1\": 0.5927729676671933, \"Memory in Mb\": 0.6686763763427734, \"Time in s\": 106.889718 }, { \"step\": 14784, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.6001488195900697, \"MicroF1\": 0.6001488195900697, \"MacroF1\": 0.5832911478837771, \"Memory in Mb\": 0.6683712005615234, \"Time in s\": 122.594966 }, { \"step\": 15840, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.5673969316244712, \"MicroF1\": 0.5673969316244712, \"MacroF1\": 0.5522471754341497, \"Memory in Mb\": 0.8954944610595703, \"Time in s\": 139.36423 }, { \"step\": 16896, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.5712340929269014, \"MicroF1\": 0.5712340929269014, \"MacroF1\": 0.559038323684958, \"Memory in Mb\": 1.4438505172729492, \"Time in s\": 157.284858 }, { \"step\": 17952, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.5741184335134533, \"MicroF1\": 0.5741184335134533, \"MacroF1\": 0.5632919959429029, \"Memory in Mb\": 1.874833106994629, \"Time in s\": 176.64490099999998 }, { \"step\": 19008, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.5867312042931552, \"MicroF1\": 0.5867312042931552, \"MacroF1\": 0.5723846445183199, \"Memory in Mb\": 0.4898128509521484, \"Time in s\": 197.018382 }, { \"step\": 20064, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.5939789662562927, \"MicroF1\": 0.5939789662562927, \"MacroF1\": 0.5773993022741072, \"Memory in Mb\": 0.6687717437744141, \"Time in s\": 218.485499 }, { \"step\": 21120, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.595908897201572, \"MicroF1\": 0.595908897201572, \"MacroF1\": 0.5788762098776178, \"Memory in Mb\": 0.6688938140869141, \"Time in s\": 241.01085 }, { \"step\": 22176, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.5977452085682075, \"MicroF1\": 0.5977452085682075, \"MacroF1\": 0.5801804614049403, \"Memory in Mb\": 1.2152299880981443, \"Time in s\": 264.603623 }, { \"step\": 23232, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.5997158968619517, \"MicroF1\": 0.5997158968619517, \"MacroF1\": 0.5818597835760811, \"Memory in Mb\": 1.3294572830200195, \"Time in s\": 289.451742 }, { \"step\": 24288, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.6025033968789888, \"MicroF1\": 0.6025033968789888, \"MacroF1\": 0.5841484049015139, \"Memory in Mb\": 1.3295183181762695, \"Time in s\": 315.702926 }, { \"step\": 25344, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.6047823856686264, \"MicroF1\": 0.6047823856686264, \"MacroF1\": 0.5859943093850892, \"Memory in Mb\": 1.3296403884887695, \"Time in s\": 343.366363 }, { \"step\": 26400, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.6074472517898405, \"MicroF1\": 0.6074472517898405, \"MacroF1\": 0.5878557237787366, \"Memory in Mb\": 1.3296403884887695, \"Time in s\": 372.430483 }, { \"step\": 27456, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.6086323074121289, \"MicroF1\": 0.6086323074121289, \"MacroF1\": 0.5880340775890752, \"Memory in Mb\": 1.3298234939575195, \"Time in s\": 402.902886 }, { \"step\": 28512, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.6087124267826453, \"MicroF1\": 0.6087124267826453, \"MacroF1\": 0.5895354690395743, \"Memory in Mb\": 1.3298234939575195, \"Time in s\": 434.780231 }, { \"step\": 29568, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.6080765718537559, \"MicroF1\": 0.6080765718537559, \"MacroF1\": 0.5920130278134075, \"Memory in Mb\": 1.3298234939575195, \"Time in s\": 468.066846 }, { \"step\": 30624, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.6071253632890311, \"MicroF1\": 0.6071253632890311, \"MacroF1\": 0.5937369304389161, \"Memory in Mb\": 1.3293352127075195, \"Time in s\": 502.755803 }, { \"step\": 31680, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.6071845702200196, \"MicroF1\": 0.6071845702200196, \"MacroF1\": 0.5960066132315273, \"Memory in Mb\": 1.3295793533325195, \"Time in s\": 538.8589619999999 }, { \"step\": 32736, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.6079425691156255, \"MicroF1\": 0.6079425691156255, \"MacroF1\": 0.59836863034629, \"Memory in Mb\": 1.3296403884887695, \"Time in s\": 576.3667869999999 }, { \"step\": 33792, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.6027936432777958, \"MicroF1\": 0.6027936432777958, \"MacroF1\": 0.5936321389881086, \"Memory in Mb\": 0.6688251495361328, \"Time in s\": 615.5159199999999 }, { \"step\": 34848, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.6018882543690992, \"MicroF1\": 0.6018882543690992, \"MacroF1\": 0.5927698243358274, \"Memory in Mb\": 0.6689472198486328, \"Time in s\": 655.7217059999999 }, { \"step\": 35904, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.601398211848592, \"MicroF1\": 0.601398211848592, \"MacroF1\": 0.592182344393812, \"Memory in Mb\": 0.6690082550048828, \"Time in s\": 696.988077 }, { \"step\": 36960, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.5999080061689981, \"MicroF1\": 0.5999080061689981, \"MacroF1\": 0.5906275041314122, \"Memory in Mb\": 0.6690082550048828, \"Time in s\": 739.3140189999999 }, { \"step\": 38016, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.5996054189135868, \"MicroF1\": 0.5996054189135868, \"MacroF1\": 0.5899615119365567, \"Memory in Mb\": 0.6690082550048828, \"Time in s\": 782.70332 }, { \"step\": 39072, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.5989608661155332, \"MicroF1\": 0.5989608661155332, \"MacroF1\": 0.5889868403975307, \"Memory in Mb\": 0.6687030792236328, \"Time in s\": 827.150323 }, { \"step\": 40128, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.5947865526951928, \"MicroF1\": 0.5947865526951928, \"MacroF1\": 0.5855600636799734, \"Memory in Mb\": 0.6687030792236328, \"Time in s\": 872.66104 }, { \"step\": 41184, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.5926717334822621, \"MicroF1\": 0.5926717334822621, \"MacroF1\": 0.5840930914391779, \"Memory in Mb\": 0.6688861846923828, \"Time in s\": 919.237173 }, { \"step\": 42240, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.5913018774118706, \"MicroF1\": 0.5913018774118706, \"MacroF1\": 0.5832685369240246, \"Memory in Mb\": 0.6689472198486328, \"Time in s\": 966.874788 }, { \"step\": 43296, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.5898833583554683, \"MicroF1\": 0.5898833583554683, \"MacroF1\": 0.5823904732646675, \"Memory in Mb\": 0.6690082550048828, \"Time in s\": 1015.577362 }, { \"step\": 44352, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.5883745575071588, \"MicroF1\": 0.5883745575071588, \"MacroF1\": 0.5813207633940128, \"Memory in Mb\": 1.112539291381836, \"Time in s\": 1065.314826 }, { \"step\": 45408, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.5863853590856035, \"MicroF1\": 0.5863853590856035, \"MacroF1\": 0.5797569747943008, \"Memory in Mb\": 1.3286066055297852, \"Time in s\": 1116.269805 }, { \"step\": 46464, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.5850461657663086, \"MicroF1\": 0.5850461657663086, \"MacroF1\": 0.5780695197887614, \"Memory in Mb\": 1.3287897109985352, \"Time in s\": 1168.116792 }, { \"step\": 47520, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.5867968602032871, \"MicroF1\": 0.5867968602032871, \"MacroF1\": 0.5799343284152632, \"Memory in Mb\": 1.328934669494629, \"Time in s\": 1221.156775 }, { \"step\": 48576, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.5917035512094699, \"MicroF1\": 0.5917035512094699, \"MacroF1\": 0.5847625919047718, \"Memory in Mb\": 1.329483985900879, \"Time in s\": 1275.6038119999998 }, { \"step\": 49632, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.5968447139892405, \"MicroF1\": 0.5968447139892405, \"MacroF1\": 0.5895877351185161, \"Memory in Mb\": 1.329422950744629, \"Time in s\": 1331.456239 }, { \"step\": 50688, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.601673012804072, \"MicroF1\": 0.601673012804072, \"MacroF1\": 0.5939045014873635, \"Memory in Mb\": 1.329606056213379, \"Time in s\": 1388.7152379999998 }, { \"step\": 51744, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.6067487389598593, \"MicroF1\": 0.6067487389598593, \"MacroF1\": 0.5983547975185618, \"Memory in Mb\": 1.329606056213379, \"Time in s\": 1447.387746 }, { \"step\": 52800, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Insects\", \"Accuracy\": 0.6119623477717381, \"MicroF1\": 0.6119623477717381, \"MacroF1\": 0.6029934068442723, \"Memory in Mb\": 0.147679328918457, \"Time in s\": 1507.071307 }, { \"step\": 408, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9803439803439804, \"MicroF1\": 0.9803439803439804, \"MacroF1\": 0.4950372208436724, \"Memory in Mb\": 0.2342357635498047, \"Time in s\": 0.174096 }, { \"step\": 816, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.943558282208589, \"MicroF1\": 0.943558282208589, \"MacroF1\": 0.7669956277713079, \"Memory in Mb\": 0.3298597335815429, \"Time in s\": 0.617741 }, { \"step\": 1224, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8863450531479967, \"MicroF1\": 0.8863450531479967, \"MacroF1\": 0.8786592421362931, \"Memory in Mb\": 0.4254837036132812, \"Time in s\": 1.425386 }, { \"step\": 1632, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.891477621091355, \"MicroF1\": 0.891477621091355, \"MacroF1\": 0.8818548670971932, \"Memory in Mb\": 0.5215349197387695, \"Time in s\": 2.789107 }, { \"step\": 2040, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.889651790093183, \"MicroF1\": 0.889651790093183, \"MacroF1\": 0.8812768038030504, \"Memory in Mb\": 0.6287555694580078, \"Time in s\": 4.864698000000001 }, { \"step\": 2448, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8414384961176952, \"MicroF1\": 0.8414384961176952, \"MacroF1\": 0.8420581397672002, \"Memory in Mb\": 0.7242574691772461, \"Time in s\": 7.621751000000001 }, { \"step\": 2856, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8500875656742557, \"MicroF1\": 0.8500875656742557, \"MacroF1\": 0.834558203718852, \"Memory in Mb\": 0.8199424743652344, \"Time in s\": 10.917147 }, { \"step\": 3264, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8406374501992032, \"MicroF1\": 0.8406374501992032, \"MacroF1\": 0.8151418555553325, \"Memory in Mb\": 0.9155054092407228, \"Time in s\": 14.806837 }, { \"step\": 3672, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8321983110868973, \"MicroF1\": 0.8321983110868973, \"MacroF1\": 0.8307198315203921, \"Memory in Mb\": 1.011190414428711, \"Time in s\": 19.353183 }, { \"step\": 4080, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.826182887962736, \"MicroF1\": 0.826182887962736, \"MacroF1\": 0.812376785603362, \"Memory in Mb\": 1.1319761276245115, \"Time in s\": 24.603589 }, { \"step\": 4488, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.809226654780477, \"MicroF1\": 0.809226654780477, \"MacroF1\": 0.8196273526663149, \"Memory in Mb\": 1.2275390625, \"Time in s\": 30.588099 }, { \"step\": 4896, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8081716036772216, \"MicroF1\": 0.8081716036772216, \"MacroF1\": 0.815232111826365, \"Memory in Mb\": 1.3230409622192385, \"Time in s\": 37.350443 }, { \"step\": 5304, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8057703186875353, \"MicroF1\": 0.8057703186875353, \"MacroF1\": 0.7903391475861199, \"Memory in Mb\": 1.4186649322509766, \"Time in s\": 44.935392 }, { \"step\": 5712, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7860269655051655, \"MicroF1\": 0.7860269655051656, \"MacroF1\": 0.7895763142947655, \"Memory in Mb\": 1.5144109725952148, \"Time in s\": 53.372574 }, { \"step\": 6120, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.784441902271613, \"MicroF1\": 0.784441902271613, \"MacroF1\": 0.7657785418705475, \"Memory in Mb\": 1.6098518371582031, \"Time in s\": 62.716148 }, { \"step\": 6528, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7585414432357898, \"MicroF1\": 0.7585414432357898, \"MacroF1\": 0.751418836389106, \"Memory in Mb\": 1.7056589126586914, \"Time in s\": 73.022548 }, { \"step\": 6936, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7473684210526316, \"MicroF1\": 0.7473684210526316, \"MacroF1\": 0.7484284412750403, \"Memory in Mb\": 1.8010997772216797, \"Time in s\": 84.351871 }, { \"step\": 7344, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7565027917744791, \"MicroF1\": 0.7565027917744791, \"MacroF1\": 0.7526701844923946, \"Memory in Mb\": 1.898371696472168, \"Time in s\": 96.7592 }, { \"step\": 7752, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7577086827506129, \"MicroF1\": 0.7577086827506129, \"MacroF1\": 0.7557350658705178, \"Memory in Mb\": 1.9939956665039065, \"Time in s\": 110.303168 }, { \"step\": 8160, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7617355068023042, \"MicroF1\": 0.7617355068023042, \"MacroF1\": 0.7576049653668415, \"Memory in Mb\": 2.0895586013793945, \"Time in s\": 125.047569 }, { \"step\": 8568, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7604762460604646, \"MicroF1\": 0.7604762460604646, \"MacroF1\": 0.7596175662696861, \"Memory in Mb\": 2.2332935333251958, \"Time in s\": 141.03972299999998 }, { \"step\": 8976, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.756991643454039, \"MicroF1\": 0.7569916434540391, \"MacroF1\": 0.7575313939177277, \"Memory in Mb\": 2.328978538513184, \"Time in s\": 158.34463899999997 }, { \"step\": 9384, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7558350207822658, \"MicroF1\": 0.7558350207822658, \"MacroF1\": 0.7548436696787698, \"Memory in Mb\": 2.424480438232422, \"Time in s\": 177.02417899999998 }, { \"step\": 9792, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.748340312531917, \"MicroF1\": 0.7483403125319169, \"MacroF1\": 0.7443908596260193, \"Memory in Mb\": 2.52004337310791, \"Time in s\": 197.139779 }, { \"step\": 10200, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7393862143347387, \"MicroF1\": 0.7393862143347387, \"MacroF1\": 0.7315892779928432, \"Memory in Mb\": 2.6156673431396484, \"Time in s\": 218.763511 }, { \"step\": 10608, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7196191194494201, \"MicroF1\": 0.7196191194494201, \"MacroF1\": 0.7089541376321257, \"Memory in Mb\": 2.7114133834838867, \"Time in s\": 241.930258 }, { \"step\": 11016, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7123921924648207, \"MicroF1\": 0.7123921924648208, \"MacroF1\": 0.7092068316988943, \"Memory in Mb\": 2.806976318359375, \"Time in s\": 266.699543 }, { \"step\": 11424, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7062943184802591, \"MicroF1\": 0.7062943184802591, \"MacroF1\": 0.694671323095531, \"Memory in Mb\": 2.9026002883911133, \"Time in s\": 293.16281 }, { \"step\": 11832, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.6967289324655566, \"MicroF1\": 0.6967289324655566, \"MacroF1\": 0.6902328307983061, \"Memory in Mb\": 2.9981632232666016, \"Time in s\": 321.350926 }, { \"step\": 12240, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7007108423890841, \"MicroF1\": 0.7007108423890841, \"MacroF1\": 0.6983689907908355, \"Memory in Mb\": 3.09378719329834, \"Time in s\": 351.321335 }, { \"step\": 12648, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.6969241717403337, \"MicroF1\": 0.6969241717403337, \"MacroF1\": 0.6892508246262707, \"Memory in Mb\": 3.189472198486328, \"Time in s\": 383.138799 }, { \"step\": 13056, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.6836461126005362, \"MicroF1\": 0.6836461126005362, \"MacroF1\": 0.6755391962059191, \"Memory in Mb\": 3.2851572036743164, \"Time in s\": 416.860837 }, { \"step\": 13464, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.6793433855752804, \"MicroF1\": 0.6793433855752804, \"MacroF1\": 0.6754035266161622, \"Memory in Mb\": 3.3807201385498047, \"Time in s\": 452.545925 }, { \"step\": 13872, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.6769519140653161, \"MicroF1\": 0.6769519140653161, \"MacroF1\": 0.6742482232309566, \"Memory in Mb\": 3.476466178894043, \"Time in s\": 490.25119899999993 }, { \"step\": 14280, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.6728762518383641, \"MicroF1\": 0.6728762518383641, \"MacroF1\": 0.6689356443053496, \"Memory in Mb\": 3.5720291137695312, \"Time in s\": 530.0355519999999 }, { \"step\": 14688, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.6762442976782188, \"MicroF1\": 0.6762442976782188, \"MacroF1\": 0.6753292472514647, \"Memory in Mb\": 3.66759204864502, \"Time in s\": 571.9481539999999 }, { \"step\": 15096, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.6830076184166942, \"MicroF1\": 0.6830076184166942, \"MacroF1\": 0.6822311287838643, \"Memory in Mb\": 3.763277053833008, \"Time in s\": 616.057788 }, { \"step\": 15504, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.6818035218989873, \"MicroF1\": 0.6818035218989873, \"MacroF1\": 0.6788656596145115, \"Memory in Mb\": 3.858839988708496, \"Time in s\": 662.434182 }, { \"step\": 15912, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.6816039218150964, \"MicroF1\": 0.6816039218150964, \"MacroF1\": 0.6801525397911032, \"Memory in Mb\": 0.2779512405395508, \"Time in s\": 710.461266 }, { \"step\": 16320, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.6858263373981249, \"MicroF1\": 0.6858263373981249, \"MacroF1\": 0.6851912800185752, \"Memory in Mb\": 0.4695272445678711, \"Time in s\": 759.2533930000001 }, { \"step\": 16728, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.6896634184253004, \"MicroF1\": 0.6896634184253004, \"MacroF1\": 0.6890226069872225, \"Memory in Mb\": 0.6609811782836914, \"Time in s\": 808.8937860000001 }, { \"step\": 17136, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.6925007295010213, \"MicroF1\": 0.6925007295010213, \"MacroF1\": 0.691863544221197, \"Memory in Mb\": 0.9803314208984376, \"Time in s\": 859.476205 }, { \"step\": 17544, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.6990252522373597, \"MicroF1\": 0.6990252522373597, \"MacroF1\": 0.6986638608261282, \"Memory in Mb\": 0.2722988128662109, \"Time in s\": 910.506546 }, { \"step\": 17952, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7040833379756003, \"MicroF1\": 0.7040833379756003, \"MacroF1\": 0.7034973599095433, \"Memory in Mb\": 0.1428661346435547, \"Time in s\": 961.768777 }, { \"step\": 18360, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7102783376000872, \"MicroF1\": 0.7102783376000872, \"MacroF1\": 0.7096708693716106, \"Memory in Mb\": 0.2385511398315429, \"Time in s\": 1013.187005 }, { \"step\": 18768, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7155645548036447, \"MicroF1\": 0.7155645548036447, \"MacroF1\": 0.714820465744771, \"Memory in Mb\": 0.3341751098632812, \"Time in s\": 1064.826228 }, { \"step\": 19176, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7183833116036505, \"MicroF1\": 0.7183833116036505, \"MacroF1\": 0.7174783905571958, \"Memory in Mb\": 0.4296159744262695, \"Time in s\": 1116.742805 }, { \"step\": 19584, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7229229433692489, \"MicroF1\": 0.7229229433692489, \"MacroF1\": 0.7220221994049509, \"Memory in Mb\": 0.5253620147705078, \"Time in s\": 1168.993402 }, { \"step\": 19992, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7224751138012105, \"MicroF1\": 0.7224751138012104, \"MacroF1\": 0.7211832505275634, \"Memory in Mb\": 0.6323385238647461, \"Time in s\": 1221.636335 }, { \"step\": 20400, \"track\": \"Multiclass classification\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7237119466640521, \"MicroF1\": 0.7237119466640521, \"MacroF1\": 0.7223930256436224, \"Memory in Mb\": 0.7279014587402344, \"Time in s\": 1274.7271140000005 }, { \"step\": 46, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.4222222222222222, \"MicroF1\": 0.4222222222222222, \"MacroF1\": 0.3590236094437775, \"Memory in Mb\": 0.9732446670532228, \"Time in s\": 0.50913 }, { \"step\": 92, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.5604395604395604, \"MicroF1\": 0.5604395604395604, \"MacroF1\": 0.5746538615446178, \"Memory in Mb\": 1.0627803802490234, \"Time in s\": 1.324173 }, { \"step\": 138, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.5766423357664233, \"MicroF1\": 0.5766423357664233, \"MacroF1\": 0.598257695340355, \"Memory in Mb\": 1.355058670043945, \"Time in s\": 2.399595 }, { \"step\": 184, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.6229508196721312, \"MicroF1\": 0.6229508196721312, \"MacroF1\": 0.6451744040758779, \"Memory in Mb\": 1.424909591674805, \"Time in s\": 3.72413 }, { \"step\": 230, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.6506550218340611, \"MicroF1\": 0.6506550218340611, \"MacroF1\": 0.668065528002595, \"Memory in Mb\": 1.5721073150634766, \"Time in s\": 5.289042 }, { \"step\": 276, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.6727272727272727, \"MicroF1\": 0.6727272727272727, \"MacroF1\": 0.6900672130049011, \"Memory in Mb\": 1.7710065841674805, \"Time in s\": 7.016464 }, { \"step\": 322, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7040498442367601, \"MicroF1\": 0.7040498442367601, \"MacroF1\": 0.7087861936875777, \"Memory in Mb\": 1.8489313125610352, \"Time in s\": 8.876771 }, { \"step\": 368, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7302452316076294, \"MicroF1\": 0.7302452316076294, \"MacroF1\": 0.7285991575377422, \"Memory in Mb\": 1.987476348876953, \"Time in s\": 10.883345 }, { \"step\": 414, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7457627118644068, \"MicroF1\": 0.7457627118644068, \"MacroF1\": 0.7430362907281778, \"Memory in Mb\": 2.008787155151367, \"Time in s\": 13.045158 }, { \"step\": 460, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7342047930283224, \"MicroF1\": 0.7342047930283224, \"MacroF1\": 0.7271744800226859, \"Memory in Mb\": 1.8246965408325195, \"Time in s\": 15.362221000000002 }, { \"step\": 506, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7405940594059406, \"MicroF1\": 0.7405940594059406, \"MacroF1\": 0.7304322149686578, \"Memory in Mb\": 1.7282800674438477, \"Time in s\": 17.823546 }, { \"step\": 552, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7368421052631579, \"MicroF1\": 0.7368421052631579, \"MacroF1\": 0.7267508109083203, \"Memory in Mb\": 1.5214414596557615, \"Time in s\": 20.437237 }, { \"step\": 598, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7403685092127303, \"MicroF1\": 0.7403685092127302, \"MacroF1\": 0.7318978254380312, \"Memory in Mb\": 1.6621322631835938, \"Time in s\": 23.204591 }, { \"step\": 644, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7325038880248833, \"MicroF1\": 0.7325038880248833, \"MacroF1\": 0.7248107612258206, \"Memory in Mb\": 1.7895660400390625, \"Time in s\": 26.125323 }, { \"step\": 690, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7242380261248186, \"MicroF1\": 0.7242380261248187, \"MacroF1\": 0.7153272190465999, \"Memory in Mb\": 1.929594039916992, \"Time in s\": 29.195315 }, { \"step\": 736, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7251700680272108, \"MicroF1\": 0.725170068027211, \"MacroF1\": 0.7148466398758337, \"Memory in Mb\": 2.079819679260254, \"Time in s\": 32.416287000000004 }, { \"step\": 782, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7259923175416133, \"MicroF1\": 0.7259923175416134, \"MacroF1\": 0.7134712280209222, \"Memory in Mb\": 2.0407657623291016, \"Time in s\": 35.785816000000004 }, { \"step\": 828, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.727932285368803, \"MicroF1\": 0.727932285368803, \"MacroF1\": 0.7177600265828429, \"Memory in Mb\": 2.245401382446289, \"Time in s\": 39.307391 }, { \"step\": 874, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7353951890034365, \"MicroF1\": 0.7353951890034366, \"MacroF1\": 0.7262567978322628, \"Memory in Mb\": 2.3208675384521484, \"Time in s\": 42.982338000000006 }, { \"step\": 920, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7431991294885746, \"MicroF1\": 0.7431991294885745, \"MacroF1\": 0.7345004589126253, \"Memory in Mb\": 2.463038444519043, \"Time in s\": 46.81357700000001 }, { \"step\": 966, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7471502590673575, \"MicroF1\": 0.7471502590673575, \"MacroF1\": 0.7368855656689401, \"Memory in Mb\": 2.4979677200317383, \"Time in s\": 50.81254500000001 }, { \"step\": 1012, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7546983184965381, \"MicroF1\": 0.754698318496538, \"MacroF1\": 0.7446216664767904, \"Memory in Mb\": 2.589772224426269, \"Time in s\": 54.96693400000001 }, { \"step\": 1058, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.760643330179754, \"MicroF1\": 0.760643330179754, \"MacroF1\": 0.7502594177262459, \"Memory in Mb\": 2.824686050415039, \"Time in s\": 59.28579400000001 }, { \"step\": 1104, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7624660018132366, \"MicroF1\": 0.7624660018132366, \"MacroF1\": 0.7523020427630668, \"Memory in Mb\": 2.512765884399414, \"Time in s\": 63.76907700000001 }, { \"step\": 1150, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7650130548302873, \"MicroF1\": 0.7650130548302874, \"MacroF1\": 0.7555087521342715, \"Memory in Mb\": 2.350802421569824, \"Time in s\": 68.40298100000001 }, { \"step\": 1196, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7690376569037657, \"MicroF1\": 0.7690376569037657, \"MacroF1\": 0.7603504370239863, \"Memory in Mb\": 2.0774078369140625, \"Time in s\": 73.17908000000001 }, { \"step\": 1242, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7719580983078163, \"MicroF1\": 0.7719580983078163, \"MacroF1\": 0.7638249032322542, \"Memory in Mb\": 2.143113136291504, \"Time in s\": 78.09633200000002 }, { \"step\": 1288, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7746697746697747, \"MicroF1\": 0.7746697746697747, \"MacroF1\": 0.7668828628349821, \"Memory in Mb\": 2.3053293228149414, \"Time in s\": 83.14236000000002 }, { \"step\": 1334, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7771942985746436, \"MicroF1\": 0.7771942985746436, \"MacroF1\": 0.7696789046658701, \"Memory in Mb\": 2.4279375076293945, \"Time in s\": 88.31606900000003 }, { \"step\": 1380, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7817258883248731, \"MicroF1\": 0.7817258883248731, \"MacroF1\": 0.7754511149783998, \"Memory in Mb\": 2.350360870361328, \"Time in s\": 93.61768400000004 }, { \"step\": 1426, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7866666666666666, \"MicroF1\": 0.7866666666666666, \"MacroF1\": 0.7797171864703156, \"Memory in Mb\": 2.461531639099121, \"Time in s\": 99.04413800000005 }, { \"step\": 1472, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7912984364377974, \"MicroF1\": 0.7912984364377974, \"MacroF1\": 0.7836430453045393, \"Memory in Mb\": 2.5941333770751958, \"Time in s\": 104.59649900000004 }, { \"step\": 1518, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7963085036255768, \"MicroF1\": 0.7963085036255768, \"MacroF1\": 0.7883976288226552, \"Memory in Mb\": 2.7080554962158203, \"Time in s\": 110.30036700000004 }, { \"step\": 1564, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7984644913627639, \"MicroF1\": 0.7984644913627639, \"MacroF1\": 0.7915512335737709, \"Memory in Mb\": 2.379396438598633, \"Time in s\": 116.13112600000004 }, { \"step\": 1610, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.798011187072716, \"MicroF1\": 0.7980111870727161, \"MacroF1\": 0.7913527809122488, \"Memory in Mb\": 2.557906150817871, \"Time in s\": 122.09210800000004 }, { \"step\": 1656, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7987915407854985, \"MicroF1\": 0.7987915407854985, \"MacroF1\": 0.7921693301011166, \"Memory in Mb\": 2.5870275497436523, \"Time in s\": 128.19249900000003 }, { \"step\": 1702, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7995296884185773, \"MicroF1\": 0.7995296884185774, \"MacroF1\": 0.7947635312368726, \"Memory in Mb\": 2.441390991210937, \"Time in s\": 134.42240900000002 }, { \"step\": 1748, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8019461934745278, \"MicroF1\": 0.8019461934745278, \"MacroF1\": 0.7968342396743014, \"Memory in Mb\": 2.619420051574707, \"Time in s\": 140.777643 }, { \"step\": 1794, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8059118795315114, \"MicroF1\": 0.8059118795315114, \"MacroF1\": 0.8002313091513137, \"Memory in Mb\": 2.70393180847168, \"Time in s\": 147.25850100000002 }, { \"step\": 1840, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8058727569331158, \"MicroF1\": 0.8058727569331158, \"MacroF1\": 0.8006185305294855, \"Memory in Mb\": 3.167543411254883, \"Time in s\": 153.88034500000003 }, { \"step\": 1886, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8084880636604774, \"MicroF1\": 0.8084880636604774, \"MacroF1\": 0.8041348438460234, \"Memory in Mb\": 3.187774658203125, \"Time in s\": 160.64099900000002 }, { \"step\": 1932, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8089073019161056, \"MicroF1\": 0.8089073019161055, \"MacroF1\": 0.8042053366874767, \"Memory in Mb\": 3.4328765869140625, \"Time in s\": 167.537774 }, { \"step\": 1978, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8108244815376834, \"MicroF1\": 0.8108244815376834, \"MacroF1\": 0.8062422218151643, \"Memory in Mb\": 3.621993064880371, \"Time in s\": 174.56717600000002 }, { \"step\": 2024, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8111715274345032, \"MicroF1\": 0.8111715274345032, \"MacroF1\": 0.805670935248126, \"Memory in Mb\": 3.783546447753906, \"Time in s\": 181.730034 }, { \"step\": 2070, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8134364427259546, \"MicroF1\": 0.8134364427259546, \"MacroF1\": 0.8085538776813638, \"Memory in Mb\": 3.740958213806152, \"Time in s\": 189.029553 }, { \"step\": 2116, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.816548463356974, \"MicroF1\": 0.816548463356974, \"MacroF1\": 0.8113031614777911, \"Memory in Mb\": 3.760796546936035, \"Time in s\": 196.461518 }, { \"step\": 2162, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8167515039333642, \"MicroF1\": 0.8167515039333642, \"MacroF1\": 0.8113905234748385, \"Memory in Mb\": 4.035200119018555, \"Time in s\": 204.024576 }, { \"step\": 2208, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.818305391934753, \"MicroF1\": 0.818305391934753, \"MacroF1\": 0.8126353495892602, \"Memory in Mb\": 4.192110061645508, \"Time in s\": 211.717055 }, { \"step\": 2254, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8184642698624057, \"MicroF1\": 0.8184642698624057, \"MacroF1\": 0.8136291554244021, \"Memory in Mb\": 4.486760139465332, \"Time in s\": 219.553296 }, { \"step\": 2300, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8190517616354936, \"MicroF1\": 0.8190517616354936, \"MacroF1\": 0.8144252010220491, \"Memory in Mb\": 4.66081428527832, \"Time in s\": 227.540685 }, { \"step\": 1056, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Insects\", \"Accuracy\": 0.6701421800947868, \"MicroF1\": 0.6701421800947868, \"MacroF1\": 0.6068786932307204, \"Memory in Mb\": 6.831533432006836, \"Time in s\": 4.539822 }, { \"step\": 2112, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Insects\", \"Accuracy\": 0.6887730933207011, \"MicroF1\": 0.6887730933207011, \"MacroF1\": 0.6229217946585527, \"Memory in Mb\": 10.195775032043455, \"Time in s\": 12.638872 }, { \"step\": 3168, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Insects\", \"Accuracy\": 0.6962425007893905, \"MicroF1\": 0.6962425007893905, \"MacroF1\": 0.622910390568452, \"Memory in Mb\": 16.60274887084961, \"Time in s\": 24.371093 }, { \"step\": 4224, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Insects\", \"Accuracy\": 0.7054226852948141, \"MicroF1\": 0.7054226852948141, \"MacroF1\": 0.6279874627708885, \"Memory in Mb\": 17.471903800964355, \"Time in s\": 39.890989 }, { \"step\": 5280, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Insects\", \"Accuracy\": 0.7099829513165372, \"MicroF1\": 0.7099829513165372, \"MacroF1\": 0.6301031937879839, \"Memory in Mb\": 20.888835906982425, \"Time in s\": 58.938014 }, { \"step\": 6336, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Insects\", \"Accuracy\": 0.7108129439621153, \"MicroF1\": 0.7108129439621153, \"MacroF1\": 0.6300557461749893, \"Memory in Mb\": 23.72772216796875, \"Time in s\": 81.51329799999999 }, { \"step\": 7392, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Insects\", \"Accuracy\": 0.7126234609660398, \"MicroF1\": 0.7126234609660397, \"MacroF1\": 0.6287819651813062, \"Memory in Mb\": 28.070199966430664, \"Time in s\": 107.680368 }, { \"step\": 8448, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Insects\", \"Accuracy\": 0.7168225405469397, \"MicroF1\": 0.7168225405469397, \"MacroF1\": 0.6299159911335922, \"Memory in Mb\": 31.613859176635746, \"Time in s\": 137.61638299999998 }, { \"step\": 9504, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Insects\", \"Accuracy\": 0.7214563821950963, \"MicroF1\": 0.7214563821950963, \"MacroF1\": 0.6314635817104112, \"Memory in Mb\": 35.43206214904785, \"Time in s\": 171.294053 }, { \"step\": 10560, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Insects\", \"Accuracy\": 0.7230798371057865, \"MicroF1\": 0.7230798371057865, \"MacroF1\": 0.6311670445333952, \"Memory in Mb\": 35.676584243774414, \"Time in s\": 208.900765 }, { \"step\": 11616, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Insects\", \"Accuracy\": 0.7247524752475247, \"MicroF1\": 0.7247524752475247, \"MacroF1\": 0.6314302971551563, \"Memory in Mb\": 41.6592378616333, \"Time in s\": 250.157475 }, { \"step\": 12672, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Insects\", \"Accuracy\": 0.7252781943019493, \"MicroF1\": 0.7252781943019494, \"MacroF1\": 0.6359647238599803, \"Memory in Mb\": 42.66780757904053, \"Time in s\": 295.204683 }, { \"step\": 13728, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Insects\", \"Accuracy\": 0.7402928535004006, \"MicroF1\": 0.7402928535004006, \"MacroF1\": 0.7348419335996624, \"Memory in Mb\": 24.025733947753903, \"Time in s\": 342.696854 }, { \"step\": 14784, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Insects\", \"Accuracy\": 0.7468714063451262, \"MicroF1\": 0.7468714063451262, \"MacroF1\": 0.7455387452701401, \"Memory in Mb\": 2.272738456726074, \"Time in s\": 392.697558 }, { \"step\": 15840, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Insects\", \"Accuracy\": 0.7388092682618852, \"MicroF1\": 0.7388092682618853, \"MacroF1\": 0.7393651674564367, \"Memory in Mb\": 6.547223091125488, \"Time in s\": 445.901262 }, { \"step\": 16896, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Insects\", \"Accuracy\": 0.7343000887836638, \"MicroF1\": 0.7343000887836638, \"MacroF1\": 0.7364396291092657, \"Memory in Mb\": 11.097336769104004, \"Time in s\": 502.213806 }, { \"step\": 17952, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Insects\", \"Accuracy\": 0.7271461199933151, \"MicroF1\": 0.7271461199933151, \"MacroF1\": 0.7303078098029304, \"Memory in Mb\": 16.88492202758789, \"Time in s\": 561.702561 }, { \"step\": 19008, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Insects\", \"Accuracy\": 0.7373073078339559, \"MicroF1\": 0.7373073078339558, \"MacroF1\": 0.7369507693389319, \"Memory in Mb\": 6.800461769104004, \"Time in s\": 623.6949609999999 }, { \"step\": 20064, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Insects\", \"Accuracy\": 0.7412650152021133, \"MicroF1\": 0.7412650152021133, \"MacroF1\": 0.7370000710650216, \"Memory in Mb\": 3.369675636291504, \"Time in s\": 688.4094249999999 }, { \"step\": 21120, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Insects\", \"Accuracy\": 0.7425540982054074, \"MicroF1\": 0.7425540982054074, \"MacroF1\": 0.7353012584659039, \"Memory in Mb\": 5.941231727600098, \"Time in s\": 756.7149029999999 }, { \"step\": 22176, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Insects\", \"Accuracy\": 0.7435851183765502, \"MicroF1\": 0.7435851183765501, \"MacroF1\": 0.7334480812988377, \"Memory in Mb\": 8.846389770507812, \"Time in s\": 828.508796 }, { \"step\": 23232, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Insects\", \"Accuracy\": 0.7452111402866859, \"MicroF1\": 0.7452111402866859, \"MacroF1\": 0.7324964055744654, \"Memory in Mb\": 9.471121788024902, \"Time in s\": 903.877525 }, { \"step\": 24288, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Insects\", \"Accuracy\": 0.7463663688392967, \"MicroF1\": 0.7463663688392966, \"MacroF1\": 0.7310050929424414, \"Memory in Mb\": 12.406947135925291, \"Time in s\": 982.696529 }, { \"step\": 25344, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Insects\", \"Accuracy\": 0.7474647831748412, \"MicroF1\": 0.7474647831748412, \"MacroF1\": 0.7298615493429103, \"Memory in Mb\": 15.979948043823242, \"Time in s\": 1064.886272 }, { \"step\": 26400, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Insects\", \"Accuracy\": 0.7483616803666806, \"MicroF1\": 0.7483616803666806, \"MacroF1\": 0.7285096183890708, \"Memory in Mb\": 19.665884017944336, \"Time in s\": 1150.466447 }, { \"step\": 27456, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Insects\", \"Accuracy\": 0.749626661810235, \"MicroF1\": 0.749626661810235, \"MacroF1\": 0.7275235594970662, \"Memory in Mb\": 24.26569175720215, \"Time in s\": 1239.482455 }, { \"step\": 28512, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Insects\", \"Accuracy\": 0.7465188874469503, \"MicroF1\": 0.7465188874469504, \"MacroF1\": 0.7258897093263847, \"Memory in Mb\": 8.914395332336426, \"Time in s\": 1332.2242660000002 }, { \"step\": 29568, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Insects\", \"Accuracy\": 0.7451550715324518, \"MicroF1\": 0.7451550715324519, \"MacroF1\": 0.7292330017207805, \"Memory in Mb\": 8.459691047668457, \"Time in s\": 1428.2455670000002 }, { \"step\": 30624, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Insects\", \"Accuracy\": 0.7443751428664729, \"MicroF1\": 0.7443751428664729, \"MacroF1\": 0.7327893612754602, \"Memory in Mb\": 12.943696022033691, \"Time in s\": 1527.20672 }, { \"step\": 31680, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Insects\", \"Accuracy\": 0.7437103443921841, \"MicroF1\": 0.7437103443921841, \"MacroF1\": 0.7357305076230832, \"Memory in Mb\": 18.80640697479248, \"Time in s\": 1629.363326 }, { \"step\": 32736, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Insects\", \"Accuracy\": 0.7432717275087827, \"MicroF1\": 0.7432717275087827, \"MacroF1\": 0.7381285892142362, \"Memory in Mb\": 16.162379264831543, \"Time in s\": 1734.350472 }, { \"step\": 33792, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Insects\", \"Accuracy\": 0.7377408185611554, \"MicroF1\": 0.7377408185611554, \"MacroF1\": 0.7340057348640155, \"Memory in Mb\": 11.18346881866455, \"Time in s\": 1842.731877 }, { \"step\": 34848, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Insects\", \"Accuracy\": 0.7340373633311332, \"MicroF1\": 0.7340373633311332, \"MacroF1\": 0.7302084976112027, \"Memory in Mb\": 5.613262176513672, \"Time in s\": 1955.023471 }, { \"step\": 35904, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Insects\", \"Accuracy\": 0.7312759379439044, \"MicroF1\": 0.7312759379439044, \"MacroF1\": 0.7271196230245338, \"Memory in Mb\": 9.756120681762695, \"Time in s\": 2071.1719540000004 }, { \"step\": 36960, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Insects\", \"Accuracy\": 0.7278335452799047, \"MicroF1\": 0.7278335452799047, \"MacroF1\": 0.7234434079919367, \"Memory in Mb\": 11.990450859069824, \"Time in s\": 2191.1586210000005 }, { \"step\": 38016, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Insects\", \"Accuracy\": 0.7254241746678942, \"MicroF1\": 0.7254241746678942, \"MacroF1\": 0.7207605796154644, \"Memory in Mb\": 14.404197692871094, \"Time in s\": 2314.6725880000004 }, { \"step\": 39072, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Insects\", \"Accuracy\": 0.7250390315067441, \"MicroF1\": 0.7250390315067441, \"MacroF1\": 0.7205508934526729, \"Memory in Mb\": 7.651473045349121, \"Time in s\": 2441.666332 }, { \"step\": 40128, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Insects\", \"Accuracy\": 0.7236524036185112, \"MicroF1\": 0.7236524036185111, \"MacroF1\": 0.7196200887167502, \"Memory in Mb\": 7.583705902099609, \"Time in s\": 2572.286548 }, { \"step\": 41184, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Insects\", \"Accuracy\": 0.7235995435009591, \"MicroF1\": 0.7235995435009591, \"MacroF1\": 0.7199895911465058, \"Memory in Mb\": 12.209360122680664, \"Time in s\": 2706.295895 }, { \"step\": 42240, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Insects\", \"Accuracy\": 0.7235966760576719, \"MicroF1\": 0.7235966760576719, \"MacroF1\": 0.7203672841246517, \"Memory in Mb\": 15.002169609069824, \"Time in s\": 2843.875227 }, { \"step\": 43296, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Insects\", \"Accuracy\": 0.7241713823767179, \"MicroF1\": 0.7241713823767179, \"MacroF1\": 0.7213145862540888, \"Memory in Mb\": 17.433518409729004, \"Time in s\": 2985.305163 }, { \"step\": 44352, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Insects\", \"Accuracy\": 0.7245608892696895, \"MicroF1\": 0.7245608892696895, \"MacroF1\": 0.7219384327675483, \"Memory in Mb\": 20.337363243103027, \"Time in s\": 3130.446815 }, { \"step\": 45408, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Insects\", \"Accuracy\": 0.7253947629220164, \"MicroF1\": 0.7253947629220163, \"MacroF1\": 0.7227741676779873, \"Memory in Mb\": 20.507991790771484, \"Time in s\": 3279.062075 }, { \"step\": 46464, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Insects\", \"Accuracy\": 0.7263198674213891, \"MicroF1\": 0.7263198674213891, \"MacroF1\": 0.7236028172229397, \"Memory in Mb\": 24.947001457214355, \"Time in s\": 3431.085655 }, { \"step\": 47520, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Insects\", \"Accuracy\": 0.7259622466802753, \"MicroF1\": 0.7259622466802753, \"MacroF1\": 0.7234132526915972, \"Memory in Mb\": 9.389252662658691, \"Time in s\": 3586.979459 }, { \"step\": 48576, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Insects\", \"Accuracy\": 0.7297581060216161, \"MicroF1\": 0.7297581060216161, \"MacroF1\": 0.7273884829439242, \"Memory in Mb\": 9.12541389465332, \"Time in s\": 3745.95156 }, { \"step\": 49632, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Insects\", \"Accuracy\": 0.7336543692450284, \"MicroF1\": 0.7336543692450284, \"MacroF1\": 0.7312645046388119, \"Memory in Mb\": 8.804935455322266, \"Time in s\": 3907.504223 }, { \"step\": 50688, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Insects\", \"Accuracy\": 0.7372501824925524, \"MicroF1\": 0.7372501824925524, \"MacroF1\": 0.7346466630802606, \"Memory in Mb\": 12.506796836853027, \"Time in s\": 4071.296369 }, { \"step\": 51744, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Insects\", \"Accuracy\": 0.741182382157973, \"MicroF1\": 0.741182382157973, \"MacroF1\": 0.7382896911640772, \"Memory in Mb\": 15.31224250793457, \"Time in s\": 4237.296913 }, { \"step\": 52800, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Insects\", \"Accuracy\": 0.7442565200098487, \"MicroF1\": 0.7442565200098487, \"MacroF1\": 0.7419321396565435, \"Memory in Mb\": 0.3696470260620117, \"Time in s\": 4404.707415 }, { \"step\": 408, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9803439803439804, \"MicroF1\": 0.9803439803439804, \"MacroF1\": 0.4950372208436724, \"Memory in Mb\": 0.3514842987060547, \"Time in s\": 0.595241 }, { \"step\": 816, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9730061349693252, \"MicroF1\": 0.9730061349693252, \"MacroF1\": 0.7867307803099512, \"Memory in Mb\": 1.312638282775879, \"Time in s\": 2.098583 }, { \"step\": 1224, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9705641864268192, \"MicroF1\": 0.9705641864268192, \"MacroF1\": 0.93705029195588, \"Memory in Mb\": 2.2586374282836914, \"Time in s\": 4.31371 }, { \"step\": 1632, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9711833231146536, \"MicroF1\": 0.9711833231146536, \"MacroF1\": 0.9377953913100076, \"Memory in Mb\": 3.394951820373535, \"Time in s\": 7.338651 }, { \"step\": 2040, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.969592937714566, \"MicroF1\": 0.969592937714566, \"MacroF1\": 0.9445939973353388, \"Memory in Mb\": 5.254854202270508, \"Time in s\": 11.230817 }, { \"step\": 2448, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.970167552104618, \"MicroF1\": 0.970167552104618, \"MacroF1\": 0.9654865811906564, \"Memory in Mb\": 2.048126220703125, \"Time in s\": 15.860121 }, { \"step\": 2856, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9726795096322242, \"MicroF1\": 0.9726795096322242, \"MacroF1\": 0.9705770446236132, \"Memory in Mb\": 2.732625961303711, \"Time in s\": 21.099127 }, { \"step\": 3264, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.971805087342936, \"MicroF1\": 0.971805087342936, \"MacroF1\": 0.9627836140542232, \"Memory in Mb\": 2.790935516357422, \"Time in s\": 26.971176 }, { \"step\": 3672, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9733042767638246, \"MicroF1\": 0.9733042767638246, \"MacroF1\": 0.9719148371902758, \"Memory in Mb\": 2.987569808959961, \"Time in s\": 33.477629 }, { \"step\": 4080, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9698455503799952, \"MicroF1\": 0.9698455503799952, \"MacroF1\": 0.958802050565698, \"Memory in Mb\": 4.571287155151367, \"Time in s\": 40.764845 }, { \"step\": 4488, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9710274125250724, \"MicroF1\": 0.9710274125250724, \"MacroF1\": 0.970190142555116, \"Memory in Mb\": 1.7459039688110352, \"Time in s\": 48.697823 }, { \"step\": 4896, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9722165474974463, \"MicroF1\": 0.9722165474974463, \"MacroF1\": 0.971936417428158, \"Memory in Mb\": 2.7892093658447266, \"Time in s\": 57.250967 }, { \"step\": 5304, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9720912690929664, \"MicroF1\": 0.9720912690929664, \"MacroF1\": 0.970282662152698, \"Memory in Mb\": 2.895453453063965, \"Time in s\": 66.54505900000001 }, { \"step\": 5712, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9723340921029592, \"MicroF1\": 0.9723340921029592, \"MacroF1\": 0.9718828908328702, \"Memory in Mb\": 4.064221382141113, \"Time in s\": 76.51574600000001 }, { \"step\": 6120, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9718908318352673, \"MicroF1\": 0.9718908318352673, \"MacroF1\": 0.9703726237787478, \"Memory in Mb\": 5.130434989929199, \"Time in s\": 87.20768000000001 }, { \"step\": 6528, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.97196261682243, \"MicroF1\": 0.97196261682243, \"MacroF1\": 0.9714458378209956, \"Memory in Mb\": 2.455193519592285, \"Time in s\": 98.596053 }, { \"step\": 6936, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9733237202595528, \"MicroF1\": 0.9733237202595528, \"MacroF1\": 0.9740372626056704, \"Memory in Mb\": 2.4587574005126958, \"Time in s\": 110.618692 }, { \"step\": 7344, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9735802805392892, \"MicroF1\": 0.9735802805392892, \"MacroF1\": 0.973376514333954, \"Memory in Mb\": 3.893580436706543, \"Time in s\": 123.504669 }, { \"step\": 7752, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9729067217133271, \"MicroF1\": 0.9729067217133271, \"MacroF1\": 0.972110994169212, \"Memory in Mb\": 4.61766529083252, \"Time in s\": 137.14807100000002 }, { \"step\": 8160, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.97217796298566, \"MicroF1\": 0.97217796298566, \"MacroF1\": 0.9713389113158796, \"Memory in Mb\": 5.2350358963012695, \"Time in s\": 151.61678500000002 }, { \"step\": 8568, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9725691607330454, \"MicroF1\": 0.9725691607330454, \"MacroF1\": 0.9726516232305996, \"Memory in Mb\": 3.659168243408203, \"Time in s\": 166.92608600000003 }, { \"step\": 8976, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9733704735376044, \"MicroF1\": 0.9733704735376044, \"MacroF1\": 0.973745927183376, \"Memory in Mb\": 5.072476387023926, \"Time in s\": 182.940013 }, { \"step\": 9384, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9733560694873707, \"MicroF1\": 0.9733560694873707, \"MacroF1\": 0.9732604538569352, \"Memory in Mb\": 5.722126007080078, \"Time in s\": 199.767059 }, { \"step\": 9792, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9731385966704116, \"MicroF1\": 0.9731385966704116, \"MacroF1\": 0.9729642609350584, \"Memory in Mb\": 4.404660224914551, \"Time in s\": 217.43706000000003 }, { \"step\": 10200, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9725463280713796, \"MicroF1\": 0.9725463280713796, \"MacroF1\": 0.9722080895483168, \"Memory in Mb\": 2.652709007263184, \"Time in s\": 235.89310200000003 }, { \"step\": 10608, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9726595644385784, \"MicroF1\": 0.9726595644385784, \"MacroF1\": 0.972708084817296, \"Memory in Mb\": 1.1834087371826172, \"Time in s\": 254.958035 }, { \"step\": 11016, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9729459827507944, \"MicroF1\": 0.9729459827507944, \"MacroF1\": 0.973083018444042, \"Memory in Mb\": 1.520833969116211, \"Time in s\": 274.61158 }, { \"step\": 11424, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9724240567276548, \"MicroF1\": 0.9724240567276548, \"MacroF1\": 0.972236101273467, \"Memory in Mb\": 2.823396682739258, \"Time in s\": 294.969552 }, { \"step\": 11832, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9720226523539852, \"MicroF1\": 0.9720226523539852, \"MacroF1\": 0.9719096687987197, \"Memory in Mb\": 2.493410110473633, \"Time in s\": 316.08929700000004 }, { \"step\": 12240, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9726284827191763, \"MicroF1\": 0.9726284827191763, \"MacroF1\": 0.9728780734732722, \"Memory in Mb\": 2.3478269577026367, \"Time in s\": 337.99406600000003 }, { \"step\": 12648, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9727998734877836, \"MicroF1\": 0.9727998734877836, \"MacroF1\": 0.9729097588140672, \"Memory in Mb\": 2.5516576766967773, \"Time in s\": 360.586004 }, { \"step\": 13056, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9726541554959786, \"MicroF1\": 0.9726541554959786, \"MacroF1\": 0.9726709194030316, \"Memory in Mb\": 3.304943084716797, \"Time in s\": 383.87924 }, { \"step\": 13464, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9722201589541708, \"MicroF1\": 0.9722201589541708, \"MacroF1\": 0.9721650267620996, \"Memory in Mb\": 4.003572463989258, \"Time in s\": 407.932153 }, { \"step\": 13872, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.97246052916156, \"MicroF1\": 0.97246052916156, \"MacroF1\": 0.972591005704606, \"Memory in Mb\": 4.270735740661621, \"Time in s\": 432.78626 }, { \"step\": 14280, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9713565375726592, \"MicroF1\": 0.9713565375726592, \"MacroF1\": 0.9711654862365112, \"Memory in Mb\": 4.102839469909668, \"Time in s\": 458.452707 }, { \"step\": 14688, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9718118063593654, \"MicroF1\": 0.9718118063593654, \"MacroF1\": 0.9719655808676524, \"Memory in Mb\": 3.971695899963379, \"Time in s\": 484.888355 }, { \"step\": 15096, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9724412056972508, \"MicroF1\": 0.9724412056972508, \"MacroF1\": 0.9726138064055022, \"Memory in Mb\": 4.612870216369629, \"Time in s\": 512.116119 }, { \"step\": 15504, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.972327936528414, \"MicroF1\": 0.972327936528414, \"MacroF1\": 0.9723669009986284, \"Memory in Mb\": 3.2941598892211914, \"Time in s\": 540.133825 }, { \"step\": 15912, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.97240902520269, \"MicroF1\": 0.97240902520269, \"MacroF1\": 0.9724748506273226, \"Memory in Mb\": 5.215278625488281, \"Time in s\": 569.064511 }, { \"step\": 16320, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9718119982842086, \"MicroF1\": 0.9718119982842086, \"MacroF1\": 0.9717822259045504, \"Memory in Mb\": 2.705050468444824, \"Time in s\": 598.831932 }, { \"step\": 16728, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9713636635379924, \"MicroF1\": 0.9713636635379924, \"MacroF1\": 0.971358198091739, \"Memory in Mb\": 1.4999914169311523, \"Time in s\": 629.214947 }, { \"step\": 17136, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9716953603735046, \"MicroF1\": 0.9716953603735046, \"MacroF1\": 0.9717778191727772, \"Memory in Mb\": 1.5952835083007812, \"Time in s\": 660.25813 }, { \"step\": 17544, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9716696118109788, \"MicroF1\": 0.9716696118109788, \"MacroF1\": 0.971712982907841, \"Memory in Mb\": 2.6761178970336914, \"Time in s\": 692.002463 }, { \"step\": 17952, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9709765472675616, \"MicroF1\": 0.9709765472675616, \"MacroF1\": 0.970966525204854, \"Memory in Mb\": 3.671113014221192, \"Time in s\": 724.550435 }, { \"step\": 18360, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9709679176425732, \"MicroF1\": 0.9709679176425732, \"MacroF1\": 0.9710033330464194, \"Memory in Mb\": 4.91295337677002, \"Time in s\": 757.867405 }, { \"step\": 18768, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.971012948260244, \"MicroF1\": 0.971012948260244, \"MacroF1\": 0.9710485326344032, \"Memory in Mb\": 5.05178165435791, \"Time in s\": 792.058633 }, { \"step\": 19176, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.97116036505867, \"MicroF1\": 0.97116036505867, \"MacroF1\": 0.9711938240802872, \"Memory in Mb\": 5.466279983520508, \"Time in s\": 827.148813 }, { \"step\": 19584, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9707909921871012, \"MicroF1\": 0.9707909921871012, \"MacroF1\": 0.9708057459916865, \"Memory in Mb\": 5.881702423095703, \"Time in s\": 863.142165 }, { \"step\": 19992, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9705867640438196, \"MicroF1\": 0.9705867640438196, \"MacroF1\": 0.9706070593086332, \"Memory in Mb\": 5.831451416015625, \"Time in s\": 900.075184 }, { \"step\": 20400, \"track\": \"Multiclass classification\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9698514633070248, \"MicroF1\": 0.9698514633070248, \"MacroF1\": 0.9698673821244655, \"Memory in Mb\": 2.3371658325195312, \"Time in s\": 937.846308 }, { \"step\": 46, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.3111111111111111, \"MicroF1\": 0.3111111111111111, \"MacroF1\": 0.2220238095238095, \"Memory in Mb\": 2.606511116027832, \"Time in s\": 1.431937 }, { \"step\": 92, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.4945054945054945, \"MicroF1\": 0.4945054945054945, \"MacroF1\": 0.5053729602697932, \"Memory in Mb\": 2.609585762023926, \"Time in s\": 3.886705 }, { \"step\": 138, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.5109489051094891, \"MicroF1\": 0.5109489051094891, \"MacroF1\": 0.5310665055578762, \"Memory in Mb\": 2.6113672256469727, \"Time in s\": 7.103774 }, { \"step\": 184, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.5737704918032787, \"MicroF1\": 0.5737704918032787, \"MacroF1\": 0.5886643910747036, \"Memory in Mb\": 2.6136903762817383, \"Time in s\": 11.095617 }, { \"step\": 230, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.6026200873362445, \"MicroF1\": 0.6026200873362445, \"MacroF1\": 0.6106719627755607, \"Memory in Mb\": 2.614529609680176, \"Time in s\": 15.833752 }, { \"step\": 276, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.6181818181818182, \"MicroF1\": 0.6181818181818182, \"MacroF1\": 0.6264208209498925, \"Memory in Mb\": 2.6147661209106445, \"Time in s\": 21.302563 }, { \"step\": 322, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.6448598130841121, \"MicroF1\": 0.6448598130841121, \"MacroF1\": 0.6378728366046057, \"Memory in Mb\": 2.616147041320801, \"Time in s\": 27.471138 }, { \"step\": 368, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.667574931880109, \"MicroF1\": 0.667574931880109, \"MacroF1\": 0.6581306320431076, \"Memory in Mb\": 2.6166696548461914, \"Time in s\": 34.32642 }, { \"step\": 414, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.6803874092009685, \"MicroF1\": 0.6803874092009685, \"MacroF1\": 0.6704325632692101, \"Memory in Mb\": 2.6175050735473637, \"Time in s\": 41.86551 }, { \"step\": 460, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.6884531590413944, \"MicroF1\": 0.6884531590413944, \"MacroF1\": 0.6760149332924277, \"Memory in Mb\": 2.617680549621582, \"Time in s\": 50.055222 }, { \"step\": 506, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.691089108910891, \"MicroF1\": 0.691089108910891, \"MacroF1\": 0.6769247074861785, \"Memory in Mb\": 2.617680549621582, \"Time in s\": 58.91147 }, { \"step\": 552, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.691470054446461, \"MicroF1\": 0.691470054446461, \"MacroF1\": 0.6803521213965826, \"Memory in Mb\": 2.6178178787231445, \"Time in s\": 68.422832 }, { \"step\": 598, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.6968174204355109, \"MicroF1\": 0.6968174204355109, \"MacroF1\": 0.6854975219125513, \"Memory in Mb\": 2.617863655090332, \"Time in s\": 78.595742 }, { \"step\": 644, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.6936236391912908, \"MicroF1\": 0.6936236391912908, \"MacroF1\": 0.6835764097697864, \"Memory in Mb\": 2.6192026138305664, \"Time in s\": 89.423453 }, { \"step\": 690, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.6966618287373004, \"MicroF1\": 0.6966618287373004, \"MacroF1\": 0.6871604229696352, \"Memory in Mb\": 2.6194162368774414, \"Time in s\": 100.907013 }, { \"step\": 736, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.6965986394557823, \"MicroF1\": 0.6965986394557823, \"MacroF1\": 0.6884795420777536, \"Memory in Mb\": 2.6194887161254883, \"Time in s\": 113.056901 }, { \"step\": 782, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7016645326504481, \"MicroF1\": 0.7016645326504481, \"MacroF1\": 0.6927955715819348, \"Memory in Mb\": 2.6197519302368164, \"Time in s\": 125.871728 }, { \"step\": 828, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7037484885126964, \"MicroF1\": 0.7037484885126964, \"MacroF1\": 0.6971811816445675, \"Memory in Mb\": 2.61989688873291, \"Time in s\": 139.34805 }, { \"step\": 874, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7124856815578465, \"MicroF1\": 0.7124856815578465, \"MacroF1\": 0.7027179013602759, \"Memory in Mb\": 2.61989688873291, \"Time in s\": 153.488556 }, { \"step\": 920, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7127312295973884, \"MicroF1\": 0.7127312295973884, \"MacroF1\": 0.7019247882761857, \"Memory in Mb\": 2.61989688873291, \"Time in s\": 168.286881 }, { \"step\": 966, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7119170984455958, \"MicroF1\": 0.7119170984455958, \"MacroF1\": 0.7013991197312313, \"Memory in Mb\": 2.61989688873291, \"Time in s\": 183.731934 }, { \"step\": 1012, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7111770524233432, \"MicroF1\": 0.7111770524233432, \"MacroF1\": 0.7000689942734505, \"Memory in Mb\": 2.61989688873291, \"Time in s\": 199.829198 }, { \"step\": 1058, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7123935666982024, \"MicroF1\": 0.7123935666982024, \"MacroF1\": 0.700757485135609, \"Memory in Mb\": 2.620041847229004, \"Time in s\": 216.594079 }, { \"step\": 1104, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7116953762466002, \"MicroF1\": 0.7116953762466002, \"MacroF1\": 0.6997536275311635, \"Memory in Mb\": 2.6201601028442383, \"Time in s\": 234.008444 }, { \"step\": 1150, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7136640557006092, \"MicroF1\": 0.7136640557006092, \"MacroF1\": 0.7002507718266925, \"Memory in Mb\": 2.6201601028442383, \"Time in s\": 252.079326 }, { \"step\": 1196, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7154811715481172, \"MicroF1\": 0.7154811715481171, \"MacroF1\": 0.7029614354817431, \"Memory in Mb\": 2.530026435852051, \"Time in s\": 270.790044 }, { \"step\": 1242, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.717163577759871, \"MicroF1\": 0.717163577759871, \"MacroF1\": 0.7059650228666394, \"Memory in Mb\": 2.753697395324707, \"Time in s\": 290.035925 }, { \"step\": 1288, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7187257187257188, \"MicroF1\": 0.7187257187257188, \"MacroF1\": 0.706699668165461, \"Memory in Mb\": 3.664814949035645, \"Time in s\": 309.61323300000004 }, { \"step\": 1334, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.719429857464366, \"MicroF1\": 0.719429857464366, \"MacroF1\": 0.7094425115390415, \"Memory in Mb\": 4.463783264160156, \"Time in s\": 329.52005700000007 }, { \"step\": 1380, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7251631617113851, \"MicroF1\": 0.725163161711385, \"MacroF1\": 0.7174387625572534, \"Memory in Mb\": 4.938790321350098, \"Time in s\": 349.76822000000004 }, { \"step\": 1426, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7319298245614035, \"MicroF1\": 0.7319298245614035, \"MacroF1\": 0.7244482628352659, \"Memory in Mb\": 5.045901298522949, \"Time in s\": 370.340987 }, { \"step\": 1472, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7335146159075459, \"MicroF1\": 0.7335146159075459, \"MacroF1\": 0.7247675805597543, \"Memory in Mb\": 5.884430885314941, \"Time in s\": 391.25731 }, { \"step\": 1518, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7251153592617007, \"MicroF1\": 0.7251153592617007, \"MacroF1\": 0.7184902268106362, \"Memory in Mb\": 6.2875261306762695, \"Time in s\": 412.53433 }, { \"step\": 1564, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7204094689699296, \"MicroF1\": 0.7204094689699295, \"MacroF1\": 0.7171509654034274, \"Memory in Mb\": 6.316588401794434, \"Time in s\": 434.180383 }, { \"step\": 1610, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7165941578620261, \"MicroF1\": 0.7165941578620262, \"MacroF1\": 0.7136076251491865, \"Memory in Mb\": 6.364602088928223, \"Time in s\": 456.188448 }, { \"step\": 1656, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7202416918429003, \"MicroF1\": 0.7202416918429003, \"MacroF1\": 0.7179265770125135, \"Memory in Mb\": 6.460197448730469, \"Time in s\": 478.554693 }, { \"step\": 1702, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.721928277483833, \"MicroF1\": 0.7219282774838331, \"MacroF1\": 0.7220156076184944, \"Memory in Mb\": 6.666633605957031, \"Time in s\": 501.28713 }, { \"step\": 1748, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7263880938752146, \"MicroF1\": 0.7263880938752146, \"MacroF1\": 0.7263874723147012, \"Memory in Mb\": 6.882956504821777, \"Time in s\": 524.384863 }, { \"step\": 1794, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7328499721137758, \"MicroF1\": 0.7328499721137758, \"MacroF1\": 0.7320714565315939, \"Memory in Mb\": 6.874361991882324, \"Time in s\": 547.829739 }, { \"step\": 1840, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.734094616639478, \"MicroF1\": 0.734094616639478, \"MacroF1\": 0.7334477172925166, \"Memory in Mb\": 7.857270240783691, \"Time in s\": 571.634536 }, { \"step\": 1886, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7358090185676393, \"MicroF1\": 0.7358090185676393, \"MacroF1\": 0.736235296466255, \"Memory in Mb\": 8.041683197021484, \"Time in s\": 595.832215 }, { \"step\": 1932, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7369238736406007, \"MicroF1\": 0.7369238736406007, \"MacroF1\": 0.7364098924240724, \"Memory in Mb\": 8.212060928344727, \"Time in s\": 620.406916 }, { \"step\": 1978, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7369752149721801, \"MicroF1\": 0.73697521497218, \"MacroF1\": 0.7356260672719533, \"Memory in Mb\": 8.416284561157227, \"Time in s\": 645.365163 }, { \"step\": 2024, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7409787444389521, \"MicroF1\": 0.7409787444389521, \"MacroF1\": 0.7385453010661254, \"Memory in Mb\": 8.869349479675293, \"Time in s\": 670.737914 }, { \"step\": 2070, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7438376027066216, \"MicroF1\": 0.7438376027066217, \"MacroF1\": 0.7418803204845174, \"Memory in Mb\": 9.001053810119629, \"Time in s\": 696.540108 }, { \"step\": 2116, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7475177304964539, \"MicroF1\": 0.7475177304964539, \"MacroF1\": 0.7450940881618369, \"Memory in Mb\": 9.427652359008787, \"Time in s\": 722.759269 }, { \"step\": 2162, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7482646922720962, \"MicroF1\": 0.7482646922720962, \"MacroF1\": 0.7457425826498583, \"Memory in Mb\": 9.724228858947754, \"Time in s\": 749.427824 }, { \"step\": 2208, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7521522428636158, \"MicroF1\": 0.7521522428636158, \"MacroF1\": 0.7492034954191574, \"Memory in Mb\": 9.71615219116211, \"Time in s\": 776.532359 }, { \"step\": 2254, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7532179316466933, \"MicroF1\": 0.7532179316466933, \"MacroF1\": 0.7508205496072249, \"Memory in Mb\": 10.198495864868164, \"Time in s\": 804.090452 }, { \"step\": 2300, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7546759460635059, \"MicroF1\": 0.754675946063506, \"MacroF1\": 0.7527273841922961, \"Memory in Mb\": 10.425667762756348, \"Time in s\": 832.069921 }, { \"step\": 1056, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Insects\", \"Accuracy\": 0.6265402843601896, \"MicroF1\": 0.6265402843601896, \"MacroF1\": 0.5882776540607534, \"Memory in Mb\": 10.90817928314209, \"Time in s\": 22.534162 }, { \"step\": 2112, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Insects\", \"Accuracy\": 0.6570345807674088, \"MicroF1\": 0.6570345807674088, \"MacroF1\": 0.61544126739188, \"Memory in Mb\": 21.709880828857425, \"Time in s\": 65.027277 }, { \"step\": 3168, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Insects\", \"Accuracy\": 0.6684559520050521, \"MicroF1\": 0.6684559520050521, \"MacroF1\": 0.6242294974630811, \"Memory in Mb\": 28.635205268859863, \"Time in s\": 131.537407 }, { \"step\": 4224, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Insects\", \"Accuracy\": 0.6810324413923751, \"MicroF1\": 0.6810324413923751, \"MacroF1\": 0.6325456686453049, \"Memory in Mb\": 36.43542194366455, \"Time in s\": 221.773783 }, { \"step\": 5280, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Insects\", \"Accuracy\": 0.6910399696912294, \"MicroF1\": 0.6910399696912294, \"MacroF1\": 0.6411255615252124, \"Memory in Mb\": 45.614484786987305, \"Time in s\": 335.089181 }, { \"step\": 6336, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Insects\", \"Accuracy\": 0.6937647987371744, \"MicroF1\": 0.6937647987371744, \"MacroF1\": 0.6440375279924044, \"Memory in Mb\": 53.59738254547119, \"Time in s\": 471.883196 }, { \"step\": 7392, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Insects\", \"Accuracy\": 0.6988228927073468, \"MicroF1\": 0.6988228927073468, \"MacroF1\": 0.6494865599203364, \"Memory in Mb\": 66.1818675994873, \"Time in s\": 633.159586 }, { \"step\": 8448, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Insects\", \"Accuracy\": 0.7001302237480762, \"MicroF1\": 0.7001302237480762, \"MacroF1\": 0.6494906800979877, \"Memory in Mb\": 76.50763607025146, \"Time in s\": 819.592493 }, { \"step\": 9504, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Insects\", \"Accuracy\": 0.7055666631590024, \"MicroF1\": 0.7055666631590024, \"MacroF1\": 0.6515748182594757, \"Memory in Mb\": 80.03414821624756, \"Time in s\": 1031.228873 }, { \"step\": 10560, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Insects\", \"Accuracy\": 0.7099157117151246, \"MicroF1\": 0.7099157117151246, \"MacroF1\": 0.6536141909419667, \"Memory in Mb\": 75.23120212554932, \"Time in s\": 1268.49353 }, { \"step\": 11616, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Insects\", \"Accuracy\": 0.7112354713732243, \"MicroF1\": 0.7112354713732243, \"MacroF1\": 0.6532930257397846, \"Memory in Mb\": 87.85937118530273, \"Time in s\": 1530.837041 }, { \"step\": 12672, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Insects\", \"Accuracy\": 0.7140715018546286, \"MicroF1\": 0.7140715018546285, \"MacroF1\": 0.6586632134486646, \"Memory in Mb\": 96.90367698669434, \"Time in s\": 1818.32799 }, { \"step\": 13728, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Insects\", \"Accuracy\": 0.7196765498652291, \"MicroF1\": 0.7196765498652291, \"MacroF1\": 0.7110222921473365, \"Memory in Mb\": 56.69392013549805, \"Time in s\": 2124.564995 }, { \"step\": 14784, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Insects\", \"Accuracy\": 0.7275248596360685, \"MicroF1\": 0.7275248596360685, \"MacroF1\": 0.7243727970733626, \"Memory in Mb\": 23.290308952331543, \"Time in s\": 2446.995357 }, { \"step\": 15840, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Insects\", \"Accuracy\": 0.7219521434433992, \"MicroF1\": 0.7219521434433992, \"MacroF1\": 0.7204121258981635, \"Memory in Mb\": 12.419946670532228, \"Time in s\": 2789.3598920000004 }, { \"step\": 16896, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Insects\", \"Accuracy\": 0.7180822728617934, \"MicroF1\": 0.7180822728617934, \"MacroF1\": 0.7177336146344276, \"Memory in Mb\": 18.459078788757324, \"Time in s\": 3150.9630650000004 }, { \"step\": 17952, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Insects\", \"Accuracy\": 0.7130521976491561, \"MicroF1\": 0.713052197649156, \"MacroF1\": 0.7136298242976093, \"Memory in Mb\": 32.946556091308594, \"Time in s\": 3531.3081450000004 }, { \"step\": 19008, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Insects\", \"Accuracy\": 0.7221023833324565, \"MicroF1\": 0.7221023833324565, \"MacroF1\": 0.7193994629254835, \"Memory in Mb\": 14.42181396484375, \"Time in s\": 3928.2542010000006 }, { \"step\": 20064, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Insects\", \"Accuracy\": 0.7273089767233215, \"MicroF1\": 0.7273089767233214, \"MacroF1\": 0.721146893328104, \"Memory in Mb\": 20.33617401123047, \"Time in s\": 4340.108142000001 }, { \"step\": 21120, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Insects\", \"Accuracy\": 0.7289170888773142, \"MicroF1\": 0.7289170888773142, \"MacroF1\": 0.7201390592471967, \"Memory in Mb\": 29.71843242645264, \"Time in s\": 4775.145968000001 }, { \"step\": 22176, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Insects\", \"Accuracy\": 0.7305524239007892, \"MicroF1\": 0.7305524239007891, \"MacroF1\": 0.719265816341323, \"Memory in Mb\": 21.72282314300537, \"Time in s\": 5233.162328 }, { \"step\": 23232, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Insects\", \"Accuracy\": 0.7328569583745856, \"MicroF1\": 0.7328569583745856, \"MacroF1\": 0.7192472788421966, \"Memory in Mb\": 31.907146453857425, \"Time in s\": 5712.0379140000005 }, { \"step\": 24288, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Insects\", \"Accuracy\": 0.7349610902952196, \"MicroF1\": 0.7349610902952196, \"MacroF1\": 0.7190161489472059, \"Memory in Mb\": 36.71180248260498, \"Time in s\": 6211.046404000001 }, { \"step\": 25344, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Insects\", \"Accuracy\": 0.7382314643096713, \"MicroF1\": 0.7382314643096713, \"MacroF1\": 0.7202655895968563, \"Memory in Mb\": 44.65183067321777, \"Time in s\": 6729.485654000001 }, { \"step\": 26400, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Insects\", \"Accuracy\": 0.7397249895829388, \"MicroF1\": 0.7397249895829386, \"MacroF1\": 0.7198095986730461, \"Memory in Mb\": 54.357375144958496, \"Time in s\": 7266.820082000001 }, { \"step\": 27456, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Insects\", \"Accuracy\": 0.7418685121107267, \"MicroF1\": 0.7418685121107267, \"MacroF1\": 0.7199133187431289, \"Memory in Mb\": 60.99125003814697, \"Time in s\": 7822.667714000001 }, { \"step\": 28512, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Insects\", \"Accuracy\": 0.7388727157939041, \"MicroF1\": 0.7388727157939041, \"MacroF1\": 0.7182833957431396, \"Memory in Mb\": 26.944812774658203, \"Time in s\": 8398.518895000001 }, { \"step\": 29568, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Insects\", \"Accuracy\": 0.7376128792234586, \"MicroF1\": 0.7376128792234586, \"MacroF1\": 0.7214769633664444, \"Memory in Mb\": 24.290247917175293, \"Time in s\": 8990.352305 }, { \"step\": 30624, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Insects\", \"Accuracy\": 0.7372889658100121, \"MicroF1\": 0.7372889658100121, \"MacroF1\": 0.7255972176885724, \"Memory in Mb\": 19.85909652709961, \"Time in s\": 9598.262147 }, { \"step\": 31680, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Insects\", \"Accuracy\": 0.7371444805707251, \"MicroF1\": 0.737144480570725, \"MacroF1\": 0.7291466686667684, \"Memory in Mb\": 32.85751724243164, \"Time in s\": 10221.399895 }, { \"step\": 32736, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Insects\", \"Accuracy\": 0.7374064457003208, \"MicroF1\": 0.7374064457003208, \"MacroF1\": 0.7322831246511409, \"Memory in Mb\": 38.75182342529297, \"Time in s\": 10860.215509 }, { \"step\": 33792, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Insects\", \"Accuracy\": 0.7329170489183511, \"MicroF1\": 0.7329170489183511, \"MacroF1\": 0.7291423789419403, \"Memory in Mb\": 76.79454803466797, \"Time in s\": 11517.947008 }, { \"step\": 34848, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Insects\", \"Accuracy\": 0.7290728039716475, \"MicroF1\": 0.7290728039716475, \"MacroF1\": 0.7252059051088736, \"Memory in Mb\": 37.93787670135498, \"Time in s\": 12198.376918999998 }, { \"step\": 35904, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Insects\", \"Accuracy\": 0.726791633011169, \"MicroF1\": 0.7267916330111689, \"MacroF1\": 0.72277521319889, \"Memory in Mb\": 30.2938232421875, \"Time in s\": 12898.613101 }, { \"step\": 36960, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Insects\", \"Accuracy\": 0.7233150247571634, \"MicroF1\": 0.7233150247571634, \"MacroF1\": 0.7191521630945247, \"Memory in Mb\": 34.07670021057129, \"Time in s\": 13619.767749 }, { \"step\": 38016, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Insects\", \"Accuracy\": 0.7210837827173484, \"MicroF1\": 0.7210837827173484, \"MacroF1\": 0.7166085958184295, \"Memory in Mb\": 39.338196754455566, \"Time in s\": 14364.864177 }, { \"step\": 39072, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Insects\", \"Accuracy\": 0.7203040618361445, \"MicroF1\": 0.7203040618361445, \"MacroF1\": 0.7160627724850469, \"Memory in Mb\": 41.774664878845215, \"Time in s\": 15133.386085 }, { \"step\": 40128, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Insects\", \"Accuracy\": 0.7183193361078576, \"MicroF1\": 0.7183193361078576, \"MacroF1\": 0.7145670483840382, \"Memory in Mb\": 44.649410247802734, \"Time in s\": 15926.835433 }, { \"step\": 41184, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Insects\", \"Accuracy\": 0.7176990505791224, \"MicroF1\": 0.7176990505791223, \"MacroF1\": 0.7142800617937591, \"Memory in Mb\": 38.580246925354, \"Time in s\": 16742.181512 }, { \"step\": 42240, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Insects\", \"Accuracy\": 0.7177489997395772, \"MicroF1\": 0.7177489997395772, \"MacroF1\": 0.7147225222929322, \"Memory in Mb\": 44.2959041595459, \"Time in s\": 17577.282826 }, { \"step\": 43296, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Insects\", \"Accuracy\": 0.7185818223813374, \"MicroF1\": 0.7185818223813374, \"MacroF1\": 0.7159354160738768, \"Memory in Mb\": 44.74843406677246, \"Time in s\": 18431.175396 }, { \"step\": 44352, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Insects\", \"Accuracy\": 0.7192171540664246, \"MicroF1\": 0.7192171540664247, \"MacroF1\": 0.7168891106233332, \"Memory in Mb\": 50.63485240936279, \"Time in s\": 19303.296412 }, { \"step\": 45408, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Insects\", \"Accuracy\": 0.7197128196093113, \"MicroF1\": 0.7197128196093113, \"MacroF1\": 0.7173999204613543, \"Memory in Mb\": 48.77041816711426, \"Time in s\": 20195.421521 }, { \"step\": 46464, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Insects\", \"Accuracy\": 0.7207240169597314, \"MicroF1\": 0.7207240169597314, \"MacroF1\": 0.7184187872009821, \"Memory in Mb\": 56.04546070098877, \"Time in s\": 21107.071122 }, { \"step\": 47520, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Insects\", \"Accuracy\": 0.7212062543403691, \"MicroF1\": 0.7212062543403692, \"MacroF1\": 0.7191280088329424, \"Memory in Mb\": 48.19489002227783, \"Time in s\": 22037.88723 }, { \"step\": 48576, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Insects\", \"Accuracy\": 0.7252084405558414, \"MicroF1\": 0.7252084405558414, \"MacroF1\": 0.7232782847500743, \"Memory in Mb\": 54.32844257354736, \"Time in s\": 22988.011007 }, { \"step\": 49632, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Insects\", \"Accuracy\": 0.7291813584251778, \"MicroF1\": 0.7291813584251778, \"MacroF1\": 0.7271951034706091, \"Memory in Mb\": 53.6518030166626, \"Time in s\": 23956.634362 }, { \"step\": 50688, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Insects\", \"Accuracy\": 0.7326928009154221, \"MicroF1\": 0.7326928009154221, \"MacroF1\": 0.7304439468758875, \"Memory in Mb\": 27.42653465270996, \"Time in s\": 24940.316756 }, { \"step\": 51744, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Insects\", \"Accuracy\": 0.7367180101656262, \"MicroF1\": 0.7367180101656263, \"MacroF1\": 0.7341247480346391, \"Memory in Mb\": 36.39958953857422, \"Time in s\": 25936.798718 }, { \"step\": 52800, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Insects\", \"Accuracy\": 0.7395784011060815, \"MicroF1\": 0.7395784011060814, \"MacroF1\": 0.737512125998823, \"Memory in Mb\": 8.341936111450195, \"Time in s\": 26942.262482 }, { \"step\": 408, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9803439803439804, \"MicroF1\": 0.9803439803439804, \"MacroF1\": 0.4950372208436724, \"Memory in Mb\": 1.551915168762207, \"Time in s\": 2.576854 }, { \"step\": 816, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.974233128834356, \"MicroF1\": 0.974233128834356, \"MacroF1\": 0.8747406597440331, \"Memory in Mb\": 4.161267280578613, \"Time in s\": 7.918686 }, { \"step\": 1224, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9672935404742437, \"MicroF1\": 0.9672935404742437, \"MacroF1\": 0.9345378451161834, \"Memory in Mb\": 7.904744148254394, \"Time in s\": 16.17177 }, { \"step\": 1632, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9662783568362968, \"MicroF1\": 0.9662783568362968, \"MacroF1\": 0.920078959712528, \"Memory in Mb\": 12.156608581542969, \"Time in s\": 27.617862 }, { \"step\": 2040, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9632172633643944, \"MicroF1\": 0.9632172633643944, \"MacroF1\": 0.9392069284616192, \"Memory in Mb\": 18.052184104919437, \"Time in s\": 42.519185 }, { \"step\": 2448, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9591336330200244, \"MicroF1\": 0.9591336330200244, \"MacroF1\": 0.952707267188964, \"Memory in Mb\": 17.44593620300293, \"Time in s\": 61.058317 }, { \"step\": 2856, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9600700525394046, \"MicroF1\": 0.9600700525394046, \"MacroF1\": 0.9487475492194613, \"Memory in Mb\": 23.22895908355713, \"Time in s\": 83.383302 }, { \"step\": 3264, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9589334967821024, \"MicroF1\": 0.9589334967821024, \"MacroF1\": 0.9481804303110768, \"Memory in Mb\": 30.014172554016117, \"Time in s\": 110.001368 }, { \"step\": 3672, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.957504767093435, \"MicroF1\": 0.957504767093435, \"MacroF1\": 0.948270905442242, \"Memory in Mb\": 37.68842315673828, \"Time in s\": 141.307336 }, { \"step\": 4080, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9529296396175532, \"MicroF1\": 0.9529296396175532, \"MacroF1\": 0.9350591426916868, \"Memory in Mb\": 43.92499256134033, \"Time in s\": 178.285036 }, { \"step\": 4488, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9558725206151104, \"MicroF1\": 0.9558725206151104, \"MacroF1\": 0.958348874105129, \"Memory in Mb\": 23.460043907165527, \"Time in s\": 220.514164 }, { \"step\": 4896, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.957711950970378, \"MicroF1\": 0.957711950970378, \"MacroF1\": 0.9572545884780326, \"Memory in Mb\": 21.815909385681152, \"Time in s\": 267.806579 }, { \"step\": 5304, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9581369036394494, \"MicroF1\": 0.9581369036394494, \"MacroF1\": 0.9564558175945328, \"Memory in Mb\": 29.116984367370605, \"Time in s\": 320.079148 }, { \"step\": 5712, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.959376641568902, \"MicroF1\": 0.959376641568902, \"MacroF1\": 0.9590743474150508, \"Memory in Mb\": 34.215229988098145, \"Time in s\": 377.359359 }, { \"step\": 6120, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9576728223565942, \"MicroF1\": 0.9576728223565942, \"MacroF1\": 0.9540539138154064, \"Memory in Mb\": 42.24546051025391, \"Time in s\": 440.303036 }, { \"step\": 6528, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.957254481385016, \"MicroF1\": 0.9572544813850162, \"MacroF1\": 0.9569914463415944, \"Memory in Mb\": 19.71925640106201, \"Time in s\": 508.401183 }, { \"step\": 6936, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9586157173756308, \"MicroF1\": 0.9586157173756308, \"MacroF1\": 0.9593505106134974, \"Memory in Mb\": 22.396859169006348, \"Time in s\": 580.778744 }, { \"step\": 7344, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9592809478414815, \"MicroF1\": 0.9592809478414815, \"MacroF1\": 0.9593459120031488, \"Memory in Mb\": 26.322874069213867, \"Time in s\": 657.980924 }, { \"step\": 7752, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9600051606244356, \"MicroF1\": 0.9600051606244356, \"MacroF1\": 0.9601169971762602, \"Memory in Mb\": 30.259758949279785, \"Time in s\": 740.2744439999999 }, { \"step\": 8160, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.95747027822037, \"MicroF1\": 0.95747027822037, \"MacroF1\": 0.9549133730963548, \"Memory in Mb\": 37.68336868286133, \"Time in s\": 827.6717389999999 }, { \"step\": 8568, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9564608380996849, \"MicroF1\": 0.9564608380996849, \"MacroF1\": 0.9560990529914856, \"Memory in Mb\": 43.737112045288086, \"Time in s\": 921.0092 }, { \"step\": 8976, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9568802228412256, \"MicroF1\": 0.9568802228412256, \"MacroF1\": 0.9569984740230398, \"Memory in Mb\": 33.59728527069092, \"Time in s\": 1020.6233179999998 }, { \"step\": 9384, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9575828626238942, \"MicroF1\": 0.9575828626238942, \"MacroF1\": 0.9578510301970172, \"Memory in Mb\": 34.01332950592041, \"Time in s\": 1125.214274 }, { \"step\": 9792, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9576141354304974, \"MicroF1\": 0.9576141354304974, \"MacroF1\": 0.95758927245962, \"Memory in Mb\": 40.18074893951416, \"Time in s\": 1234.457491 }, { \"step\": 10200, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9558780272575744, \"MicroF1\": 0.9558780272575744, \"MacroF1\": 0.954787839223492, \"Memory in Mb\": 48.97087860107422, \"Time in s\": 1349.23618 }, { \"step\": 10608, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9524842085415292, \"MicroF1\": 0.9524842085415292, \"MacroF1\": 0.9506853107984292, \"Memory in Mb\": 30.6993989944458, \"Time in s\": 1470.124694 }, { \"step\": 11016, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9539718565592374, \"MicroF1\": 0.9539718565592374, \"MacroF1\": 0.9545620457235888, \"Memory in Mb\": 26.549206733703613, \"Time in s\": 1595.074551 }, { \"step\": 11424, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9543902652543116, \"MicroF1\": 0.9543902652543116, \"MacroF1\": 0.9545363240408884, \"Memory in Mb\": 33.6107063293457, \"Time in s\": 1724.543611 }, { \"step\": 11832, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9541881497760122, \"MicroF1\": 0.9541881497760122, \"MacroF1\": 0.954140840579052, \"Memory in Mb\": 25.182985305786133, \"Time in s\": 1859.013318 }, { \"step\": 12240, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.955061688046409, \"MicroF1\": 0.955061688046409, \"MacroF1\": 0.9554321262858616, \"Memory in Mb\": 27.34038543701172, \"Time in s\": 1997.870855 }, { \"step\": 12648, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9546928125247094, \"MicroF1\": 0.9546928125247094, \"MacroF1\": 0.9546233453975912, \"Memory in Mb\": 35.30395698547363, \"Time in s\": 2141.6119080000003 }, { \"step\": 13056, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.953887399463807, \"MicroF1\": 0.953887399463807, \"MacroF1\": 0.9537532269202632, \"Memory in Mb\": 33.51621055603027, \"Time in s\": 2290.943904 }, { \"step\": 13464, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9540221347396568, \"MicroF1\": 0.9540221347396568, \"MacroF1\": 0.954138309472004, \"Memory in Mb\": 33.38596153259277, \"Time in s\": 2445.0119170000003 }, { \"step\": 13872, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9546535938288516, \"MicroF1\": 0.9546535938288516, \"MacroF1\": 0.9549190485054234, \"Memory in Mb\": 31.36033725738525, \"Time in s\": 2603.685647 }, { \"step\": 14280, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9534281112122698, \"MicroF1\": 0.9534281112122698, \"MacroF1\": 0.9532093226981456, \"Memory in Mb\": 38.61776542663574, \"Time in s\": 2767.1788560000005 }, { \"step\": 14688, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9540409886294, \"MicroF1\": 0.9540409886294, \"MacroF1\": 0.9542688403803362, \"Memory in Mb\": 42.8822660446167, \"Time in s\": 2935.9642900000003 }, { \"step\": 15096, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9547532295462072, \"MicroF1\": 0.9547532295462072, \"MacroF1\": 0.9549723528375392, \"Memory in Mb\": 41.949758529663086, \"Time in s\": 3110.4045410000003 }, { \"step\": 15504, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9549764561697736, \"MicroF1\": 0.9549764561697736, \"MacroF1\": 0.9551012466300322, \"Memory in Mb\": 36.29027271270752, \"Time in s\": 3290.0043080000005 }, { \"step\": 15912, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9551253849538056, \"MicroF1\": 0.9551253849538056, \"MacroF1\": 0.955237279627336, \"Memory in Mb\": 33.26945877075195, \"Time in s\": 3474.8467850000006 }, { \"step\": 16320, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9555119799007292, \"MicroF1\": 0.9555119799007292, \"MacroF1\": 0.9556369370454034, \"Memory in Mb\": 38.47606945037842, \"Time in s\": 3664.7140100000006 }, { \"step\": 16728, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.954923178095295, \"MicroF1\": 0.954923178095295, \"MacroF1\": 0.9549151106032768, \"Memory in Mb\": 38.78229522705078, \"Time in s\": 3859.856695 }, { \"step\": 17136, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.955587977823169, \"MicroF1\": 0.955587977823169, \"MacroF1\": 0.9557184838324558, \"Memory in Mb\": 44.56228828430176, \"Time in s\": 4060.267757000001 }, { \"step\": 17544, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9550817990081514, \"MicroF1\": 0.9550817990081514, \"MacroF1\": 0.9550944582439086, \"Memory in Mb\": 49.72221755981445, \"Time in s\": 4266.497535 }, { \"step\": 17952, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9547657512116317, \"MicroF1\": 0.9547657512116317, \"MacroF1\": 0.9547923955213532, \"Memory in Mb\": 44.72002029418945, \"Time in s\": 4478.609232000001 }, { \"step\": 18360, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9553897271093196, \"MicroF1\": 0.9553897271093196, \"MacroF1\": 0.955476322048541, \"Memory in Mb\": 52.00297737121582, \"Time in s\": 4696.983244000001 }, { \"step\": 18768, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.955507006980338, \"MicroF1\": 0.955507006980338, \"MacroF1\": 0.9555572955831596, \"Memory in Mb\": 59.27475929260254, \"Time in s\": 4921.837298000001 }, { \"step\": 19176, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9548891786179922, \"MicroF1\": 0.9548891786179922, \"MacroF1\": 0.9549038695373788, \"Memory in Mb\": 71.70181655883789, \"Time in s\": 5153.196879000001 }, { \"step\": 19584, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.954858806107338, \"MicroF1\": 0.954858806107338, \"MacroF1\": 0.9548865417655428, \"Memory in Mb\": 78.58561515808105, \"Time in s\": 5390.578061000001 }, { \"step\": 19992, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9539292681706768, \"MicroF1\": 0.9539292681706768, \"MacroF1\": 0.9539347026376764, \"Memory in Mb\": 85.24763870239258, \"Time in s\": 5634.910465000001 }, { \"step\": 20400, \"track\": \"Multiclass classification\", \"model\": \"Streaming Random Patches\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9532330016177264, \"MicroF1\": 0.9532330016177264, \"MacroF1\": 0.9532392337717848, \"Memory in Mb\": 74.55205345153809, \"Time in s\": 5886.477404000001 }, { \"step\": 46, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.5555555555555556, \"MicroF1\": 0.5555555555555556, \"MacroF1\": 0.4458032432860809, \"Memory in Mb\": 0.061410903930664, \"Time in s\": 0.019654 }, { \"step\": 92, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.6483516483516484, \"MicroF1\": 0.6483516483516484, \"MacroF1\": 0.646491610589355, \"Memory in Mb\": 0.1154079437255859, \"Time in s\": 0.0623149999999999 }, { \"step\": 138, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.708029197080292, \"MicroF1\": 0.708029197080292, \"MacroF1\": 0.7216654146545566, \"Memory in Mb\": 0.1258535385131836, \"Time in s\": 0.135768 }, { \"step\": 184, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7431693989071039, \"MicroF1\": 0.743169398907104, \"MacroF1\": 0.7576794034998369, \"Memory in Mb\": 0.1263303756713867, \"Time in s\": 0.24048 }, { \"step\": 230, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7641921397379913, \"MicroF1\": 0.7641921397379913, \"MacroF1\": 0.7751275973499576, \"Memory in Mb\": 0.126317024230957, \"Time in s\": 0.376459 }, { \"step\": 276, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7672727272727272, \"MicroF1\": 0.7672727272727272, \"MacroF1\": 0.7799448750812884, \"Memory in Mb\": 0.1262655258178711, \"Time in s\": 0.543817 }, { \"step\": 322, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7757009345794392, \"MicroF1\": 0.7757009345794392, \"MacroF1\": 0.781311030606134, \"Memory in Mb\": 0.1267433166503906, \"Time in s\": 0.742198 }, { \"step\": 368, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.782016348773842, \"MicroF1\": 0.782016348773842, \"MacroF1\": 0.7830988277979799, \"Memory in Mb\": 0.1267471313476562, \"Time in s\": 0.972069 }, { \"step\": 414, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7893462469733656, \"MicroF1\": 0.7893462469733655, \"MacroF1\": 0.7891834545778567, \"Memory in Mb\": 0.1262693405151367, \"Time in s\": 1.233319 }, { \"step\": 460, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7799564270152506, \"MicroF1\": 0.7799564270152506, \"MacroF1\": 0.778762654261754, \"Memory in Mb\": 0.1262626647949218, \"Time in s\": 1.526064 }, { \"step\": 506, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7841584158415842, \"MicroF1\": 0.7841584158415842, \"MacroF1\": 0.7830263284725031, \"Memory in Mb\": 0.126779556274414, \"Time in s\": 1.849975 }, { \"step\": 552, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7840290381125227, \"MicroF1\": 0.7840290381125228, \"MacroF1\": 0.7833214841514466, \"Memory in Mb\": 0.1267738342285156, \"Time in s\": 2.2053070000000004 }, { \"step\": 598, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7839195979899497, \"MicroF1\": 0.7839195979899497, \"MacroF1\": 0.7851401823229054, \"Memory in Mb\": 0.1262836456298828, \"Time in s\": 2.5919950000000003 }, { \"step\": 644, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7884914463452566, \"MicroF1\": 0.7884914463452566, \"MacroF1\": 0.790931132142264, \"Memory in Mb\": 0.1262893676757812, \"Time in s\": 3.010019 }, { \"step\": 690, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.795355587808418, \"MicroF1\": 0.795355587808418, \"MacroF1\": 0.7973717331367783, \"Memory in Mb\": 0.1267967224121093, \"Time in s\": 3.459217 }, { \"step\": 736, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7918367346938775, \"MicroF1\": 0.7918367346938775, \"MacroF1\": 0.79371924750244, \"Memory in Mb\": 0.1262922286987304, \"Time in s\": 3.93961 }, { \"step\": 782, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8015364916773368, \"MicroF1\": 0.8015364916773368, \"MacroF1\": 0.8027236936866887, \"Memory in Mb\": 0.1262769699096679, \"Time in s\": 4.451398 }, { \"step\": 828, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7980652962515115, \"MicroF1\": 0.7980652962515115, \"MacroF1\": 0.8001612113332863, \"Memory in Mb\": 0.1267776489257812, \"Time in s\": 4.994585 }, { \"step\": 874, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8041237113402062, \"MicroF1\": 0.8041237113402062, \"MacroF1\": 0.8058476562214167, \"Memory in Mb\": 0.1267652511596679, \"Time in s\": 5.568929 }, { \"step\": 920, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8063112078346029, \"MicroF1\": 0.8063112078346029, \"MacroF1\": 0.8071524109530731, \"Memory in Mb\": 0.1262378692626953, \"Time in s\": 6.174556 }, { \"step\": 966, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8072538860103627, \"MicroF1\": 0.8072538860103627, \"MacroF1\": 0.8069383576906736, \"Memory in Mb\": 0.1262502670288086, \"Time in s\": 6.811568 }, { \"step\": 1012, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8120672601384767, \"MicroF1\": 0.8120672601384767, \"MacroF1\": 0.8103691514865562, \"Memory in Mb\": 0.1267623901367187, \"Time in s\": 7.479958 }, { \"step\": 1058, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8117313150425733, \"MicroF1\": 0.8117313150425733, \"MacroF1\": 0.8093057999862455, \"Memory in Mb\": 0.1267585754394531, \"Time in s\": 8.179363 }, { \"step\": 1104, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8105167724388033, \"MicroF1\": 0.8105167724388033, \"MacroF1\": 0.8087453181575575, \"Memory in Mb\": 0.126260757446289, \"Time in s\": 8.909729 }, { \"step\": 1150, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8120104438642297, \"MicroF1\": 0.8120104438642298, \"MacroF1\": 0.8093458779132273, \"Memory in Mb\": 0.1267480850219726, \"Time in s\": 9.671618 }, { \"step\": 1196, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8125523012552301, \"MicroF1\": 0.8125523012552303, \"MacroF1\": 0.8098995946687924, \"Memory in Mb\": 0.1267566680908203, \"Time in s\": 10.464471 }, { \"step\": 1242, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8170829975825947, \"MicroF1\": 0.8170829975825946, \"MacroF1\": 0.8146737825459542, \"Memory in Mb\": 0.1263046264648437, \"Time in s\": 11.288498 }, { \"step\": 1288, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8174048174048174, \"MicroF1\": 0.8174048174048174, \"MacroF1\": 0.8149699191034137, \"Memory in Mb\": 0.1263151168823242, \"Time in s\": 12.143769999999998 }, { \"step\": 1334, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8169542385596399, \"MicroF1\": 0.8169542385596399, \"MacroF1\": 0.8144172630221828, \"Memory in Mb\": 0.1267910003662109, \"Time in s\": 13.030243999999998 }, { \"step\": 1380, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8165337200870196, \"MicroF1\": 0.8165337200870196, \"MacroF1\": 0.8142638589810781, \"Memory in Mb\": 0.1267881393432617, \"Time in s\": 13.947682999999998 }, { \"step\": 1426, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8210526315789474, \"MicroF1\": 0.8210526315789475, \"MacroF1\": 0.8177443463463022, \"Memory in Mb\": 0.1262807846069336, \"Time in s\": 14.896382999999998 }, { \"step\": 1472, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.822569680489463, \"MicroF1\": 0.822569680489463, \"MacroF1\": 0.8180682540474884, \"Memory in Mb\": 0.1267719268798828, \"Time in s\": 15.876335 }, { \"step\": 1518, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8233355306526038, \"MicroF1\": 0.8233355306526038, \"MacroF1\": 0.8183049909694801, \"Memory in Mb\": 0.1267585754394531, \"Time in s\": 16.887748 }, { \"step\": 1564, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8227767114523352, \"MicroF1\": 0.8227767114523352, \"MacroF1\": 0.8180063024943973, \"Memory in Mb\": 0.1262645721435547, \"Time in s\": 17.930424 }, { \"step\": 1610, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8228713486637663, \"MicroF1\": 0.8228713486637663, \"MacroF1\": 0.818440484251979, \"Memory in Mb\": 0.1262655258178711, \"Time in s\": 19.004458 }, { \"step\": 1656, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.824773413897281, \"MicroF1\": 0.824773413897281, \"MacroF1\": 0.8207684581521858, \"Memory in Mb\": 0.1267824172973632, \"Time in s\": 20.10985 }, { \"step\": 1702, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.824808935920047, \"MicroF1\": 0.824808935920047, \"MacroF1\": 0.8222541912553749, \"Memory in Mb\": 0.1268024444580078, \"Time in s\": 21.24685 }, { \"step\": 1748, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8259874069834001, \"MicroF1\": 0.8259874069834001, \"MacroF1\": 0.8228660744170171, \"Memory in Mb\": 0.1263065338134765, \"Time in s\": 22.415483 }, { \"step\": 1794, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.826547685443391, \"MicroF1\": 0.826547685443391, \"MacroF1\": 0.8226613560637924, \"Memory in Mb\": 0.126774787902832, \"Time in s\": 23.615758 }, { \"step\": 1840, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8254486133768353, \"MicroF1\": 0.8254486133768353, \"MacroF1\": 0.8217381124058762, \"Memory in Mb\": 0.1267585754394531, \"Time in s\": 24.847279 }, { \"step\": 1886, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8249336870026526, \"MicroF1\": 0.8249336870026526, \"MacroF1\": 0.8216008133499116, \"Memory in Mb\": 0.1262845993041992, \"Time in s\": 26.110083 }, { \"step\": 1932, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8234075608493009, \"MicroF1\": 0.8234075608493009, \"MacroF1\": 0.8193527544316537, \"Memory in Mb\": 0.1262779235839843, \"Time in s\": 27.403915 }, { \"step\": 1978, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8234699038947901, \"MicroF1\": 0.8234699038947901, \"MacroF1\": 0.8195124114516217, \"Memory in Mb\": 0.1267585754394531, \"Time in s\": 28.728915 }, { \"step\": 2024, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8220464656450815, \"MicroF1\": 0.8220464656450815, \"MacroF1\": 0.8172381305352, \"Memory in Mb\": 0.126774787902832, \"Time in s\": 30.084907 }, { \"step\": 2070, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8206863218946351, \"MicroF1\": 0.8206863218946351, \"MacroF1\": 0.8164336862763343, \"Memory in Mb\": 0.1262893676757812, \"Time in s\": 31.472199 }, { \"step\": 2116, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8217494089834515, \"MicroF1\": 0.8217494089834515, \"MacroF1\": 0.8168455585843762, \"Memory in Mb\": 0.1262645721435547, \"Time in s\": 32.891028 }, { \"step\": 2162, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8204534937528922, \"MicroF1\": 0.8204534937528921, \"MacroF1\": 0.8154843900985335, \"Memory in Mb\": 0.1267728805541992, \"Time in s\": 34.340933 }, { \"step\": 2208, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.822383325781604, \"MicroF1\": 0.822383325781604, \"MacroF1\": 0.8171788245797035, \"Memory in Mb\": 0.1262683868408203, \"Time in s\": 35.822171 }, { \"step\": 2254, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.821127385707945, \"MicroF1\": 0.821127385707945, \"MacroF1\": 0.8170261701336431, \"Memory in Mb\": 0.126255989074707, \"Time in s\": 37.335093 }, { \"step\": 2300, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8199217050891692, \"MicroF1\": 0.8199217050891693, \"MacroF1\": 0.8158945802523674, \"Memory in Mb\": 0.1267604827880859, \"Time in s\": 38.879423 }, { \"step\": 1056, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Insects\", \"Accuracy\": 0.6322274881516587, \"MicroF1\": 0.6322274881516587, \"MacroF1\": 0.5639948035153092, \"Memory in Mb\": 0.2159481048583984, \"Time in s\": 1.014879 }, { \"step\": 2112, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Insects\", \"Accuracy\": 0.636191378493605, \"MicroF1\": 0.636191378493605, \"MacroF1\": 0.5686546251961576, \"Memory in Mb\": 0.2164621353149414, \"Time in s\": 3.11274 }, { \"step\": 3168, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Insects\", \"Accuracy\": 0.6274076413009156, \"MicroF1\": 0.6274076413009156, \"MacroF1\": 0.5664829980315041, \"Memory in Mb\": 0.2159862518310547, \"Time in s\": 6.352824 }, { \"step\": 4224, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Insects\", \"Accuracy\": 0.6317783566185177, \"MicroF1\": 0.6317783566185177, \"MacroF1\": 0.5676004628647836, \"Memory in Mb\": 0.216461181640625, \"Time in s\": 10.71472 }, { \"step\": 5280, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Insects\", \"Accuracy\": 0.6277704110627013, \"MicroF1\": 0.6277704110627013, \"MacroF1\": 0.5651907052085646, \"Memory in Mb\": 0.215977668762207, \"Time in s\": 16.153805 }, { \"step\": 6336, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Insects\", \"Accuracy\": 0.6244672454617206, \"MicroF1\": 0.6244672454617206, \"MacroF1\": 0.5642758642399058, \"Memory in Mb\": 0.2164630889892578, \"Time in s\": 22.622426 }, { \"step\": 7392, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Insects\", \"Accuracy\": 0.621160871329996, \"MicroF1\": 0.621160871329996, \"MacroF1\": 0.5621999618118433, \"Memory in Mb\": 0.2159862518310547, \"Time in s\": 30.095944 }, { \"step\": 8448, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Insects\", \"Accuracy\": 0.6183260329110927, \"MicroF1\": 0.6183260329110927, \"MacroF1\": 0.560545956984929, \"Memory in Mb\": 0.2164678573608398, \"Time in s\": 38.53514199999999 }, { \"step\": 9504, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Insects\", \"Accuracy\": 0.6195938124802693, \"MicroF1\": 0.6195938124802693, \"MacroF1\": 0.5612689785887882, \"Memory in Mb\": 0.2159566879272461, \"Time in s\": 47.930055 }, { \"step\": 10560, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Insects\", \"Accuracy\": 0.6209868358746093, \"MicroF1\": 0.6209868358746093, \"MacroF1\": 0.5626902992589761, \"Memory in Mb\": 0.2164936065673828, \"Time in s\": 58.279528 }, { \"step\": 11616, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Insects\", \"Accuracy\": 0.6196297890658631, \"MicroF1\": 0.6196297890658631, \"MacroF1\": 0.5618958864151227, \"Memory in Mb\": 0.215947151184082, \"Time in s\": 69.583507 }, { \"step\": 12672, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Insects\", \"Accuracy\": 0.6235498382132428, \"MicroF1\": 0.6235498382132428, \"MacroF1\": 0.577401509815314, \"Memory in Mb\": 0.2165937423706054, \"Time in s\": 81.841211 }, { \"step\": 13728, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Insects\", \"Accuracy\": 0.6524368033801996, \"MicroF1\": 0.6524368033801996, \"MacroF1\": 0.656066758247117, \"Memory in Mb\": 0.2161521911621093, \"Time in s\": 95.05061 }, { \"step\": 14784, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Insects\", \"Accuracy\": 0.6639383075153893, \"MicroF1\": 0.6639383075153893, \"MacroF1\": 0.6656513873636037, \"Memory in Mb\": 0.2165174484252929, \"Time in s\": 109.208982 }, { \"step\": 15840, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Insects\", \"Accuracy\": 0.6599532798787803, \"MicroF1\": 0.6599532798787803, \"MacroF1\": 0.6660828271082423, \"Memory in Mb\": 0.2159938812255859, \"Time in s\": 124.323768 }, { \"step\": 16896, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Insects\", \"Accuracy\": 0.6583012725658479, \"MicroF1\": 0.6583012725658479, \"MacroF1\": 0.6678320995738946, \"Memory in Mb\": 0.2164812088012695, \"Time in s\": 140.393226 }, { \"step\": 17952, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Insects\", \"Accuracy\": 0.6558966074313409, \"MicroF1\": 0.6558966074313409, \"MacroF1\": 0.6676009154715022, \"Memory in Mb\": 0.2160320281982422, \"Time in s\": 157.416689 }, { \"step\": 19008, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Insects\", \"Accuracy\": 0.6731730415110223, \"MicroF1\": 0.6731730415110223, \"MacroF1\": 0.6774302820037228, \"Memory in Mb\": 0.2165126800537109, \"Time in s\": 175.38966399999998 }, { \"step\": 20064, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Insects\", \"Accuracy\": 0.6798086029008623, \"MicroF1\": 0.6798086029008623, \"MacroF1\": 0.6780616401383449, \"Memory in Mb\": 0.2158927917480468, \"Time in s\": 194.316978 }, { \"step\": 21120, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Insects\", \"Accuracy\": 0.680382593872816, \"MicroF1\": 0.680382593872816, \"MacroF1\": 0.6752117016598617, \"Memory in Mb\": 0.2164011001586914, \"Time in s\": 214.200546 }, { \"step\": 22176, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Insects\", \"Accuracy\": 0.6805862457722661, \"MicroF1\": 0.6805862457722661, \"MacroF1\": 0.6722568877045599, \"Memory in Mb\": 0.2158823013305664, \"Time in s\": 235.037825 }, { \"step\": 23232, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Insects\", \"Accuracy\": 0.6813740260858336, \"MicroF1\": 0.6813740260858336, \"MacroF1\": 0.6702824994179433, \"Memory in Mb\": 0.2163906097412109, \"Time in s\": 256.831775 }, { \"step\": 24288, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Insects\", \"Accuracy\": 0.6815992094536172, \"MicroF1\": 0.6815992094536172, \"MacroF1\": 0.6677450869096582, \"Memory in Mb\": 0.2159175872802734, \"Time in s\": 279.574148 }, { \"step\": 25344, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Insects\", \"Accuracy\": 0.6821212958213313, \"MicroF1\": 0.6821212958213313, \"MacroF1\": 0.6660355323582295, \"Memory in Mb\": 0.2163667678833007, \"Time in s\": 303.273869 }, { \"step\": 26400, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Insects\", \"Accuracy\": 0.6831319368157884, \"MicroF1\": 0.6831319368157884, \"MacroF1\": 0.6646803813034555, \"Memory in Mb\": 0.2159061431884765, \"Time in s\": 327.922368 }, { \"step\": 27456, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Insects\", \"Accuracy\": 0.6836277545073757, \"MicroF1\": 0.6836277545073757, \"MacroF1\": 0.6627124931293528, \"Memory in Mb\": 0.2164020538330078, \"Time in s\": 353.52686 }, { \"step\": 28512, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Insects\", \"Accuracy\": 0.6834905825821612, \"MicroF1\": 0.6834905825821612, \"MacroF1\": 0.664548122616301, \"Memory in Mb\": 0.2161540985107422, \"Time in s\": 380.080866 }, { \"step\": 29568, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Insects\", \"Accuracy\": 0.6814692055331958, \"MicroF1\": 0.6814692055331958, \"MacroF1\": 0.6671975305669872, \"Memory in Mb\": 0.2166757583618164, \"Time in s\": 407.595828 }, { \"step\": 30624, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Insects\", \"Accuracy\": 0.6796525487378767, \"MicroF1\": 0.6796525487378767, \"MacroF1\": 0.669471411791397, \"Memory in Mb\": 0.2161798477172851, \"Time in s\": 436.062936 }, { \"step\": 31680, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Insects\", \"Accuracy\": 0.6779570062186306, \"MicroF1\": 0.6779570062186306, \"MacroF1\": 0.6711290718417154, \"Memory in Mb\": 0.216679573059082, \"Time in s\": 465.4881830000001 }, { \"step\": 32736, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Insects\", \"Accuracy\": 0.6768901787078051, \"MicroF1\": 0.6768901787078051, \"MacroF1\": 0.6727094382078547, \"Memory in Mb\": 0.2161359786987304, \"Time in s\": 495.86473400000006 }, { \"step\": 33792, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Insects\", \"Accuracy\": 0.6734337545500281, \"MicroF1\": 0.6734337545500281, \"MacroF1\": 0.6702378074852682, \"Memory in Mb\": 0.2164754867553711, \"Time in s\": 527.2018280000001 }, { \"step\": 34848, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Insects\", \"Accuracy\": 0.6690676385341636, \"MicroF1\": 0.6690676385341636, \"MacroF1\": 0.6661382581729155, \"Memory in Mb\": 0.215947151184082, \"Time in s\": 559.4897450000001 }, { \"step\": 35904, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Insects\", \"Accuracy\": 0.6663510013090828, \"MicroF1\": 0.6663510013090828, \"MacroF1\": 0.6633778558128317, \"Memory in Mb\": 0.2165002822875976, \"Time in s\": 592.7366190000001 }, { \"step\": 36960, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Insects\", \"Accuracy\": 0.662409697232068, \"MicroF1\": 0.662409697232068, \"MacroF1\": 0.6597878724618786, \"Memory in Mb\": 0.215972900390625, \"Time in s\": 626.9366260000002 }, { \"step\": 38016, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Insects\", \"Accuracy\": 0.6594239116138366, \"MicroF1\": 0.6594239116138366, \"MacroF1\": 0.6567102170776443, \"Memory in Mb\": 0.2164802551269531, \"Time in s\": 662.1000680000002 }, { \"step\": 39072, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Insects\", \"Accuracy\": 0.662409459701569, \"MicroF1\": 0.662409459701569, \"MacroF1\": 0.6591983036871739, \"Memory in Mb\": 0.2159795761108398, \"Time in s\": 698.2204010000002 }, { \"step\": 40128, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Insects\", \"Accuracy\": 0.6615495800832357, \"MicroF1\": 0.6615495800832357, \"MacroF1\": 0.658372148729009, \"Memory in Mb\": 0.2165098190307617, \"Time in s\": 735.3021140000002 }, { \"step\": 41184, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Insects\", \"Accuracy\": 0.6616079450258602, \"MicroF1\": 0.6616079450258602, \"MacroF1\": 0.6583203582230679, \"Memory in Mb\": 0.2160120010375976, \"Time in s\": 773.3352610000002 }, { \"step\": 42240, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Insects\", \"Accuracy\": 0.6620895381045953, \"MicroF1\": 0.6620895381045953, \"MacroF1\": 0.6586855795305535, \"Memory in Mb\": 0.216496467590332, \"Time in s\": 812.3294790000002 }, { \"step\": 43296, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Insects\", \"Accuracy\": 0.6626862224275321, \"MicroF1\": 0.6626862224275321, \"MacroF1\": 0.6591267371039767, \"Memory in Mb\": 0.216012954711914, \"Time in s\": 852.2739140000002 }, { \"step\": 44352, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Insects\", \"Accuracy\": 0.6625104281752384, \"MicroF1\": 0.6625104281752384, \"MacroF1\": 0.6587853710847982, \"Memory in Mb\": 0.2164974212646484, \"Time in s\": 893.1794370000002 }, { \"step\": 45408, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Insects\", \"Accuracy\": 0.6629374325544519, \"MicroF1\": 0.6629374325544519, \"MacroF1\": 0.6587077344895959, \"Memory in Mb\": 0.2159938812255859, \"Time in s\": 935.0370030000004 }, { \"step\": 46464, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Insects\", \"Accuracy\": 0.6634311172330671, \"MicroF1\": 0.6634311172330671, \"MacroF1\": 0.6587873315408634, \"Memory in Mb\": 0.2165002822875976, \"Time in s\": 977.8531990000002 }, { \"step\": 47520, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Insects\", \"Accuracy\": 0.666217723436941, \"MicroF1\": 0.666217723436941, \"MacroF1\": 0.6621071051846, \"Memory in Mb\": 0.2159204483032226, \"Time in s\": 1021.6188110000004 }, { \"step\": 48576, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Insects\", \"Accuracy\": 0.6698507462686567, \"MicroF1\": 0.6698507462686567, \"MacroF1\": 0.6663907774790556, \"Memory in Mb\": 0.2164478302001953, \"Time in s\": 1066.3421490000003 }, { \"step\": 49632, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Insects\", \"Accuracy\": 0.6739940762829683, \"MicroF1\": 0.6739940762829683, \"MacroF1\": 0.6709516060662618, \"Memory in Mb\": 0.2159433364868164, \"Time in s\": 1112.0155100000002 }, { \"step\": 50688, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Insects\", \"Accuracy\": 0.6774715410262987, \"MicroF1\": 0.6774715410262987, \"MacroF1\": 0.6745572423992897, \"Memory in Mb\": 0.2164840698242187, \"Time in s\": 1158.6520200000002 }, { \"step\": 51744, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Insects\", \"Accuracy\": 0.6814834856888855, \"MicroF1\": 0.6814834856888855, \"MacroF1\": 0.6786206144243011, \"Memory in Mb\": 0.2159938812255859, \"Time in s\": 1206.2391940000002 }, { \"step\": 52800, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Insects\", \"Accuracy\": 0.6865470936949564, \"MicroF1\": 0.6865470936949564, \"MacroF1\": 0.6836613373539585, \"Memory in Mb\": 0.2166557312011718, \"Time in s\": 1254.784849 }, { \"step\": 408, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9828009828009828, \"MicroF1\": 0.9828009828009828, \"MacroF1\": 0.6067632850241546, \"Memory in Mb\": 0.2092580795288086, \"Time in s\": 0.626636 }, { \"step\": 816, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9828220858895704, \"MicroF1\": 0.9828220858895704, \"MacroF1\": 0.9550926410288756, \"Memory in Mb\": 0.2098121643066406, \"Time in s\": 1.943188 }, { \"step\": 1224, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9852820932134096, \"MicroF1\": 0.9852820932134096, \"MacroF1\": 0.9672695079711996, \"Memory in Mb\": 0.2093591690063476, \"Time in s\": 3.654696 }, { \"step\": 1632, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9840588595953402, \"MicroF1\": 0.9840588595953402, \"MacroF1\": 0.9604409213604836, \"Memory in Mb\": 0.2098979949951172, \"Time in s\": 5.778702 }, { \"step\": 2040, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.984796468857283, \"MicroF1\": 0.984796468857283, \"MacroF1\": 0.9791423790442798, \"Memory in Mb\": 0.2104520797729492, \"Time in s\": 8.327232 }, { \"step\": 2448, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9861054352268084, \"MicroF1\": 0.9861054352268084, \"MacroF1\": 0.9837809767868474, \"Memory in Mb\": 0.2099990844726562, \"Time in s\": 11.304767 }, { \"step\": 2856, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9859894921190894, \"MicroF1\": 0.9859894921190894, \"MacroF1\": 0.9813641447908844, \"Memory in Mb\": 0.2105531692504882, \"Time in s\": 14.706237 }, { \"step\": 3264, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9871284094391664, \"MicroF1\": 0.9871284094391664, \"MacroF1\": 0.9868437405314092, \"Memory in Mb\": 0.2106037139892578, \"Time in s\": 18.525325 }, { \"step\": 3672, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9880141650776356, \"MicroF1\": 0.9880141650776356, \"MacroF1\": 0.9878382173613446, \"Memory in Mb\": 0.2101507186889648, \"Time in s\": 22.76123 }, { \"step\": 4080, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9877420936504046, \"MicroF1\": 0.9877420936504046, \"MacroF1\": 0.9857777629944036, \"Memory in Mb\": 0.2107048034667968, \"Time in s\": 27.408752 }, { \"step\": 4488, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9881880989525296, \"MicroF1\": 0.9881880989525296, \"MacroF1\": 0.9878235870948694, \"Memory in Mb\": 0.2102518081665039, \"Time in s\": 32.463238 }, { \"step\": 4896, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9885597548518896, \"MicroF1\": 0.9885597548518896, \"MacroF1\": 0.9882962361329112, \"Memory in Mb\": 0.2103023529052734, \"Time in s\": 37.912156 }, { \"step\": 5304, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9867999245709976, \"MicroF1\": 0.9867999245709976, \"MacroF1\": 0.9836140972543967, \"Memory in Mb\": 0.210906982421875, \"Time in s\": 43.748231 }, { \"step\": 5712, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9873927508317284, \"MicroF1\": 0.9873927508317284, \"MacroF1\": 0.9875632488318824, \"Memory in Mb\": 0.210453987121582, \"Time in s\": 49.983611 }, { \"step\": 6120, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9872528190880864, \"MicroF1\": 0.9872528190880864, \"MacroF1\": 0.986679154193125, \"Memory in Mb\": 0.211008071899414, \"Time in s\": 56.586139 }, { \"step\": 6528, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.987130381492263, \"MicroF1\": 0.987130381492263, \"MacroF1\": 0.9866769113371192, \"Memory in Mb\": 0.2110586166381836, \"Time in s\": 63.546493 }, { \"step\": 6936, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9875991348233598, \"MicroF1\": 0.9875991348233598, \"MacroF1\": 0.9877805463370743, \"Memory in Mb\": 0.2106056213378906, \"Time in s\": 70.86485 }, { \"step\": 7344, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.986926324390576, \"MicroF1\": 0.986926324390576, \"MacroF1\": 0.9861386596476128, \"Memory in Mb\": 0.2126245498657226, \"Time in s\": 78.54037100000001 }, { \"step\": 7752, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9865823764675524, \"MicroF1\": 0.9865823764675524, \"MacroF1\": 0.986151116916088, \"Memory in Mb\": 0.2121715545654297, \"Time in s\": 86.582909 }, { \"step\": 8160, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9861502635126854, \"MicroF1\": 0.9861502635126854, \"MacroF1\": 0.9857089041873668, \"Memory in Mb\": 0.2122220993041992, \"Time in s\": 94.988236 }, { \"step\": 8568, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9863429438543247, \"MicroF1\": 0.9863429438543247, \"MacroF1\": 0.986382977302644, \"Memory in Mb\": 0.2127761840820312, \"Time in s\": 103.754213 }, { \"step\": 8976, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9865181058495822, \"MicroF1\": 0.9865181058495822, \"MacroF1\": 0.9865643235024212, \"Memory in Mb\": 0.2123231887817382, \"Time in s\": 112.879589 }, { \"step\": 9384, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9858254289672812, \"MicroF1\": 0.9858254289672812, \"MacroF1\": 0.9853734936692788, \"Memory in Mb\": 0.2128772735595703, \"Time in s\": 122.365291 }, { \"step\": 9792, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9857011541211316, \"MicroF1\": 0.9857011541211316, \"MacroF1\": 0.9856081881161904, \"Memory in Mb\": 0.2129278182983398, \"Time in s\": 132.212374 }, { \"step\": 10200, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9860770663790568, \"MicroF1\": 0.9860770663790568, \"MacroF1\": 0.9862471716083434, \"Memory in Mb\": 0.2124748229980468, \"Time in s\": 142.422221 }, { \"step\": 10608, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.98538700857924, \"MicroF1\": 0.98538700857924, \"MacroF1\": 0.9850628829106896, \"Memory in Mb\": 0.2130289077758789, \"Time in s\": 152.99283200000002 }, { \"step\": 11016, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9855651384475717, \"MicroF1\": 0.9855651384475717, \"MacroF1\": 0.9856470830770891, \"Memory in Mb\": 0.2125759124755859, \"Time in s\": 163.926329 }, { \"step\": 11424, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9857305436400244, \"MicroF1\": 0.9857305436400244, \"MacroF1\": 0.9858087969497248, \"Memory in Mb\": 0.2126264572143554, \"Time in s\": 175.222393 }, { \"step\": 11832, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9858845406136422, \"MicroF1\": 0.9858845406136422, \"MacroF1\": 0.9859589489459036, \"Memory in Mb\": 0.2131805419921875, \"Time in s\": 186.880683 }, { \"step\": 12240, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9861099763052537, \"MicroF1\": 0.9861099763052537, \"MacroF1\": 0.9862068987479334, \"Memory in Mb\": 0.2127275466918945, \"Time in s\": 198.905241 }, { \"step\": 12648, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.986241796473472, \"MicroF1\": 0.986241796473472, \"MacroF1\": 0.9863073128720756, \"Memory in Mb\": 0.2132816314697265, \"Time in s\": 211.292323 }, { \"step\": 13056, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.985905783224818, \"MicroF1\": 0.985905783224818, \"MacroF1\": 0.9858386074980298, \"Memory in Mb\": 0.2133321762084961, \"Time in s\": 224.041487 }, { \"step\": 13464, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9857386912278095, \"MicroF1\": 0.9857386912278095, \"MacroF1\": 0.985725098817589, \"Memory in Mb\": 0.2128791809082031, \"Time in s\": 237.153453 }, { \"step\": 13872, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.985725614591594, \"MicroF1\": 0.985725614591594, \"MacroF1\": 0.9857526199764752, \"Memory in Mb\": 0.2134332656860351, \"Time in s\": 250.627393 }, { \"step\": 14280, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9845927585965404, \"MicroF1\": 0.9845927585965404, \"MacroF1\": 0.9843691165759658, \"Memory in Mb\": 0.2129802703857422, \"Time in s\": 264.463365 }, { \"step\": 14688, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9848845918158916, \"MicroF1\": 0.9848845918158916, \"MacroF1\": 0.9849709956409892, \"Memory in Mb\": 0.2130308151245117, \"Time in s\": 278.661824 }, { \"step\": 15096, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9851606492215964, \"MicroF1\": 0.9851606492215964, \"MacroF1\": 0.9852374033885688, \"Memory in Mb\": 0.2135848999023437, \"Time in s\": 293.222186 }, { \"step\": 15504, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9843901180416692, \"MicroF1\": 0.9843901180416692, \"MacroF1\": 0.9842921251481088, \"Memory in Mb\": 0.2131319046020507, \"Time in s\": 308.144297 }, { \"step\": 15912, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9840362013701212, \"MicroF1\": 0.9840362013701212, \"MacroF1\": 0.9840127534225096, \"Memory in Mb\": 0.2136859893798828, \"Time in s\": 323.428421 }, { \"step\": 16320, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.984067651204118, \"MicroF1\": 0.984067651204118, \"MacroF1\": 0.98409717640125, \"Memory in Mb\": 0.2137365341186523, \"Time in s\": 339.073895 }, { \"step\": 16728, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9838584324744424, \"MicroF1\": 0.9838584324744424, \"MacroF1\": 0.9838587519327452, \"Memory in Mb\": 0.2132835388183593, \"Time in s\": 355.082258 }, { \"step\": 17136, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9840676976947768, \"MicroF1\": 0.9840676976947768, \"MacroF1\": 0.9841085979018744, \"Memory in Mb\": 0.2138376235961914, \"Time in s\": 371.45447 }, { \"step\": 17544, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9840962207148152, \"MicroF1\": 0.9840962207148152, \"MacroF1\": 0.9841170088782344, \"Memory in Mb\": 0.2133846282958984, \"Time in s\": 388.187765 }, { \"step\": 17952, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9840120327558354, \"MicroF1\": 0.9840120327558354, \"MacroF1\": 0.98402212072501, \"Memory in Mb\": 0.2134351730346679, \"Time in s\": 405.281273 }, { \"step\": 18360, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9842039326760716, \"MicroF1\": 0.9842039326760716, \"MacroF1\": 0.9842275892846344, \"Memory in Mb\": 0.2139892578125, \"Time in s\": 422.735746 }, { \"step\": 18768, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.984280918633772, \"MicroF1\": 0.984280918633772, \"MacroF1\": 0.9842944297848302, \"Memory in Mb\": 0.213536262512207, \"Time in s\": 440.550971 }, { \"step\": 19176, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9843024771838332, \"MicroF1\": 0.9843024771838332, \"MacroF1\": 0.9843104669951572, \"Memory in Mb\": 0.214090347290039, \"Time in s\": 458.72564299999993 }, { \"step\": 19584, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9843742021140786, \"MicroF1\": 0.9843742021140786, \"MacroF1\": 0.9843801024949196, \"Memory in Mb\": 0.2141408920288086, \"Time in s\": 477.2614029999999 }, { \"step\": 19992, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9845430443699664, \"MicroF1\": 0.9845430443699664, \"MacroF1\": 0.984546236206973, \"Memory in Mb\": 0.2136878967285156, \"Time in s\": 496.1581819999999 }, { \"step\": 20400, \"track\": \"Multiclass classification\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.984509044561008, \"MicroF1\": 0.984509044561008, \"MacroF1\": 0.984507607652182, \"Memory in Mb\": 0.2142419815063476, \"Time in s\": 515.4151939999999 }, { \"step\": 46, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.3111111111111111, \"MicroF1\": 0.3111111111111111, \"MacroF1\": 0.2457649726557289, \"Memory in Mb\": 4.137397766113281, \"Time in s\": 1.064188 }, { \"step\": 92, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.4835164835164835, \"MicroF1\": 0.4835164835164835, \"MacroF1\": 0.4934752395581889, \"Memory in Mb\": 4.140613555908203, \"Time in s\": 2.631663 }, { \"step\": 138, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.5328467153284672, \"MicroF1\": 0.5328467153284672, \"MacroF1\": 0.5528821792646677, \"Memory in Mb\": 4.140277862548828, \"Time in s\": 4.836076 }, { \"step\": 184, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.5956284153005464, \"MicroF1\": 0.5956284153005464, \"MacroF1\": 0.614143164890895, \"Memory in Mb\": 4.141227722167969, \"Time in s\": 7.573955 }, { \"step\": 230, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.62882096069869, \"MicroF1\": 0.62882096069869, \"MacroF1\": 0.6441389332893815, \"Memory in Mb\": 3.913887023925781, \"Time in s\": 10.681288 }, { \"step\": 276, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.64, \"MicroF1\": 0.64, \"MacroF1\": 0.6559607038460422, \"Memory in Mb\": 4.028352737426758, \"Time in s\": 14.131211 }, { \"step\": 322, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.6697819314641744, \"MicroF1\": 0.6697819314641744, \"MacroF1\": 0.6706320385346652, \"Memory in Mb\": 4.144774436950684, \"Time in s\": 17.917192 }, { \"step\": 368, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.6948228882833788, \"MicroF1\": 0.6948228882833788, \"MacroF1\": 0.6897433526546474, \"Memory in Mb\": 4.144762992858887, \"Time in s\": 22.052519 }, { \"step\": 414, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.711864406779661, \"MicroF1\": 0.711864406779661, \"MacroF1\": 0.706570530482581, \"Memory in Mb\": 4.148934364318848, \"Time in s\": 26.547524 }, { \"step\": 460, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7145969498910676, \"MicroF1\": 0.7145969498910676, \"MacroF1\": 0.7071122267088653, \"Memory in Mb\": 4.148022651672363, \"Time in s\": 31.388583 }, { \"step\": 506, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7247524752475247, \"MicroF1\": 0.7247524752475247, \"MacroF1\": 0.7147973207987898, \"Memory in Mb\": 4.147336006164551, \"Time in s\": 36.558334 }, { \"step\": 552, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7295825771324864, \"MicroF1\": 0.7295825771324864, \"MacroF1\": 0.7210771168277493, \"Memory in Mb\": 4.147068977355957, \"Time in s\": 42.057708 }, { \"step\": 598, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7336683417085427, \"MicroF1\": 0.7336683417085426, \"MacroF1\": 0.7250288715672424, \"Memory in Mb\": 4.14684009552002, \"Time in s\": 47.89146 }, { \"step\": 644, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7325038880248833, \"MicroF1\": 0.7325038880248833, \"MacroF1\": 0.725892488365903, \"Memory in Mb\": 4.150084495544434, \"Time in s\": 54.057887 }, { \"step\": 690, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.737300435413643, \"MicroF1\": 0.737300435413643, \"MacroF1\": 0.730253637873586, \"Memory in Mb\": 4.149851799011231, \"Time in s\": 60.540489 }, { \"step\": 736, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7387755102040816, \"MicroF1\": 0.7387755102040816, \"MacroF1\": 0.7329631379486717, \"Memory in Mb\": 4.149523735046387, \"Time in s\": 67.34570099999999 }, { \"step\": 782, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7439180537772087, \"MicroF1\": 0.7439180537772088, \"MacroF1\": 0.7387105187530085, \"Memory in Mb\": 4.149043083190918, \"Time in s\": 74.478308 }, { \"step\": 828, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7460701330108828, \"MicroF1\": 0.7460701330108827, \"MacroF1\": 0.7425025596154724, \"Memory in Mb\": 4.1487531661987305, \"Time in s\": 81.934305 }, { \"step\": 874, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7514318442153494, \"MicroF1\": 0.7514318442153494, \"MacroF1\": 0.7467163857842192, \"Memory in Mb\": 4.148730278015137, \"Time in s\": 89.700464 }, { \"step\": 920, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.750816104461371, \"MicroF1\": 0.750816104461371, \"MacroF1\": 0.7453933609147307, \"Memory in Mb\": 4.148531913757324, \"Time in s\": 97.776444 }, { \"step\": 966, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7512953367875648, \"MicroF1\": 0.7512953367875648, \"MacroF1\": 0.7451117895470661, \"Memory in Mb\": 4.148127555847168, \"Time in s\": 106.157006 }, { \"step\": 1012, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7507418397626113, \"MicroF1\": 0.7507418397626113, \"MacroF1\": 0.7449630804815479, \"Memory in Mb\": 4.147826194763184, \"Time in s\": 114.848603 }, { \"step\": 1058, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7511825922421949, \"MicroF1\": 0.7511825922421949, \"MacroF1\": 0.7446315489945474, \"Memory in Mb\": 4.149008750915527, \"Time in s\": 123.845956 }, { \"step\": 1104, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7533998186763372, \"MicroF1\": 0.7533998186763373, \"MacroF1\": 0.7466082689908061, \"Memory in Mb\": 4.149382591247559, \"Time in s\": 133.146638 }, { \"step\": 1150, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7563098346388164, \"MicroF1\": 0.7563098346388164, \"MacroF1\": 0.7491651771194965, \"Memory in Mb\": 4.148917198181152, \"Time in s\": 142.738156 }, { \"step\": 1196, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7589958158995815, \"MicroF1\": 0.7589958158995815, \"MacroF1\": 0.7526420027035882, \"Memory in Mb\": 4.148730278015137, \"Time in s\": 152.636035 }, { \"step\": 1242, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.75825946817083, \"MicroF1\": 0.7582594681708301, \"MacroF1\": 0.7524016178277559, \"Memory in Mb\": 4.148566246032715, \"Time in s\": 162.845279 }, { \"step\": 1288, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7637917637917638, \"MicroF1\": 0.7637917637917638, \"MacroF1\": 0.75666252908711, \"Memory in Mb\": 4.14877986907959, \"Time in s\": 173.368823 }, { \"step\": 1334, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7636909227306826, \"MicroF1\": 0.7636909227306825, \"MacroF1\": 0.7569484848610158, \"Memory in Mb\": 4.148688316345215, \"Time in s\": 184.200835 }, { \"step\": 1380, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7650471356055112, \"MicroF1\": 0.7650471356055112, \"MacroF1\": 0.7590436403579585, \"Memory in Mb\": 4.1487226486206055, \"Time in s\": 195.341139 }, { \"step\": 1426, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.767719298245614, \"MicroF1\": 0.767719298245614, \"MacroF1\": 0.7612112896959209, \"Memory in Mb\": 4.148562431335449, \"Time in s\": 206.790743 }, { \"step\": 1472, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7722637661454793, \"MicroF1\": 0.7722637661454793, \"MacroF1\": 0.7640566966433581, \"Memory in Mb\": 4.148623466491699, \"Time in s\": 218.546701 }, { \"step\": 1518, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7732366512854317, \"MicroF1\": 0.7732366512854317, \"MacroF1\": 0.7642341334147652, \"Memory in Mb\": 4.148673057556152, \"Time in s\": 230.6041 }, { \"step\": 1564, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7735124760076776, \"MicroF1\": 0.7735124760076776, \"MacroF1\": 0.7653316001442942, \"Memory in Mb\": 4.148703575134277, \"Time in s\": 242.961148 }, { \"step\": 1610, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7737725295214419, \"MicroF1\": 0.7737725295214419, \"MacroF1\": 0.7647353044337892, \"Memory in Mb\": 4.148566246032715, \"Time in s\": 255.638001 }, { \"step\": 1656, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7734138972809668, \"MicroF1\": 0.7734138972809667, \"MacroF1\": 0.7645730180903106, \"Memory in Mb\": 4.148055076599121, \"Time in s\": 268.628995 }, { \"step\": 1702, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7724867724867724, \"MicroF1\": 0.7724867724867724, \"MacroF1\": 0.7656182355666586, \"Memory in Mb\": 4.148245811462402, \"Time in s\": 281.916269 }, { \"step\": 1748, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7750429307384087, \"MicroF1\": 0.7750429307384087, \"MacroF1\": 0.7677424040514297, \"Memory in Mb\": 4.148360252380371, \"Time in s\": 295.50082 }, { \"step\": 1794, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7763524818739542, \"MicroF1\": 0.7763524818739542, \"MacroF1\": 0.7677176136548695, \"Memory in Mb\": 4.148287773132324, \"Time in s\": 309.399686 }, { \"step\": 1840, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7775965198477434, \"MicroF1\": 0.7775965198477434, \"MacroF1\": 0.7691578918725354, \"Memory in Mb\": 4.147894859313965, \"Time in s\": 323.61456 }, { \"step\": 1886, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7761273209549071, \"MicroF1\": 0.7761273209549071, \"MacroF1\": 0.7681560201617949, \"Memory in Mb\": 4.147856712341309, \"Time in s\": 338.130858 }, { \"step\": 1932, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7762817193164163, \"MicroF1\": 0.7762817193164163, \"MacroF1\": 0.7674170460709654, \"Memory in Mb\": 4.147791862487793, \"Time in s\": 352.957512 }, { \"step\": 1978, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7769347496206374, \"MicroF1\": 0.7769347496206374, \"MacroF1\": 0.7672843880004774, \"Memory in Mb\": 4.147627830505371, \"Time in s\": 368.093168 }, { \"step\": 2024, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7790410281759763, \"MicroF1\": 0.7790410281759763, \"MacroF1\": 0.7681802739952505, \"Memory in Mb\": 4.147582054138184, \"Time in s\": 383.545184 }, { \"step\": 2070, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.778153697438376, \"MicroF1\": 0.7781536974383759, \"MacroF1\": 0.7675304391667319, \"Memory in Mb\": 4.147578239440918, \"Time in s\": 399.300197 }, { \"step\": 2116, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7787234042553192, \"MicroF1\": 0.778723404255319, \"MacroF1\": 0.7673415220519754, \"Memory in Mb\": 4.147555351257324, \"Time in s\": 415.3667640000001 }, { \"step\": 2162, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7797316057380842, \"MicroF1\": 0.7797316057380842, \"MacroF1\": 0.7679341969633587, \"Memory in Mb\": 4.147627830505371, \"Time in s\": 431.738527 }, { \"step\": 2208, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7816039873130947, \"MicroF1\": 0.7816039873130947, \"MacroF1\": 0.7687944234581563, \"Memory in Mb\": 4.1476240158081055, \"Time in s\": 448.4359490000001 }, { \"step\": 2254, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7785175321793165, \"MicroF1\": 0.7785175321793165, \"MacroF1\": 0.7657018899401807, \"Memory in Mb\": 4.147597312927246, \"Time in s\": 465.434175 }, { \"step\": 2300, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7777294475859069, \"MicroF1\": 0.7777294475859068, \"MacroF1\": 0.7649119672933201, \"Memory in Mb\": 4.14768123626709, \"Time in s\": 482.736348 }, { \"step\": 1056, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.6360189573459716, \"MicroF1\": 0.6360189573459716, \"MacroF1\": 0.5970323052762561, \"Memory in Mb\": 6.54005241394043, \"Time in s\": 11.482596 }, { \"step\": 2112, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.62482235907153, \"MicroF1\": 0.62482235907153, \"MacroF1\": 0.5890580890213498, \"Memory in Mb\": 6.540731430053711, \"Time in s\": 32.930503 }, { \"step\": 3168, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.6157246605620461, \"MicroF1\": 0.6157246605620461, \"MacroF1\": 0.5802533923244892, \"Memory in Mb\": 6.541685104370117, \"Time in s\": 64.407359 }, { \"step\": 4224, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.6107032914989344, \"MicroF1\": 0.6107032914989344, \"MacroF1\": 0.574850135712032, \"Memory in Mb\": 6.54176139831543, \"Time in s\": 105.889955 }, { \"step\": 5280, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.614889183557492, \"MicroF1\": 0.614889183557492, \"MacroF1\": 0.5777842549225518, \"Memory in Mb\": 6.542509078979492, \"Time in s\": 157.317574 }, { \"step\": 6336, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.608997632202052, \"MicroF1\": 0.608997632202052, \"MacroF1\": 0.5733157350789627, \"Memory in Mb\": 6.541296005249023, \"Time in s\": 218.706259 }, { \"step\": 7392, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.6057367068055743, \"MicroF1\": 0.6057367068055743, \"MacroF1\": 0.5703382690867538, \"Memory in Mb\": 6.541265487670898, \"Time in s\": 290.118972 }, { \"step\": 8448, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.6069610512608027, \"MicroF1\": 0.6069610512608027, \"MacroF1\": 0.5711427916016896, \"Memory in Mb\": 6.541204452514648, \"Time in s\": 371.535386 }, { \"step\": 9504, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.6039145532989583, \"MicroF1\": 0.6039145532989583, \"MacroF1\": 0.5678102867297488, \"Memory in Mb\": 6.541570663452148, \"Time in s\": 462.975881 }, { \"step\": 10560, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.6034662373330808, \"MicroF1\": 0.6034662373330808, \"MacroF1\": 0.567425153452482, \"Memory in Mb\": 6.541746139526367, \"Time in s\": 564.403412 }, { \"step\": 11616, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.6005165733964701, \"MicroF1\": 0.6005165733964701, \"MacroF1\": 0.5651283239572901, \"Memory in Mb\": 6.541906356811523, \"Time in s\": 675.845474 }, { \"step\": 12672, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.6031883829216321, \"MicroF1\": 0.6031883829216321, \"MacroF1\": 0.5703828979306639, \"Memory in Mb\": 6.542104721069336, \"Time in s\": 797.3461219999999 }, { \"step\": 13728, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.6152108982297662, \"MicroF1\": 0.6152108982297662, \"MacroF1\": 0.5959760515786451, \"Memory in Mb\": 6.026429176330566, \"Time in s\": 928.242385 }, { \"step\": 14784, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.6060339579246432, \"MicroF1\": 0.6060339579246432, \"MacroF1\": 0.5869142505177357, \"Memory in Mb\": 6.546758651733398, \"Time in s\": 1068.594912 }, { \"step\": 15840, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.5713744554580465, \"MicroF1\": 0.5713744554580465, \"MacroF1\": 0.5537658591956377, \"Memory in Mb\": 6.547109603881836, \"Time in s\": 1218.89566 }, { \"step\": 16896, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.545546019532406, \"MicroF1\": 0.545546019532406, \"MacroF1\": 0.5286479939306437, \"Memory in Mb\": 6.431658744812012, \"Time in s\": 1379.1626680000002 }, { \"step\": 17952, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.526767311013314, \"MicroF1\": 0.526767311013314, \"MacroF1\": 0.509587529402725, \"Memory in Mb\": 6.54762077331543, \"Time in s\": 1549.227957 }, { \"step\": 19008, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.517756615983585, \"MicroF1\": 0.517756615983585, \"MacroF1\": 0.4976462434137419, \"Memory in Mb\": 4.743686676025391, \"Time in s\": 1728.135143 }, { \"step\": 20064, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.5296815032647162, \"MicroF1\": 0.5296815032647162, \"MacroF1\": 0.5080882715573688, \"Memory in Mb\": 10.447637557983398, \"Time in s\": 1914.608483 }, { \"step\": 21120, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.539750935176855, \"MicroF1\": 0.539750935176855, \"MacroF1\": 0.5184934777423561, \"Memory in Mb\": 11.000249862670898, \"Time in s\": 2110.852221 }, { \"step\": 22176, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.5468771138669674, \"MicroF1\": 0.5468771138669674, \"MacroF1\": 0.5259709774382829, \"Memory in Mb\": 10.998456954956056, \"Time in s\": 2316.603266 }, { \"step\": 23232, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.5551633593043778, \"MicroF1\": 0.5551633593043778, \"MacroF1\": 0.5340735310276195, \"Memory in Mb\": 12.317106246948242, \"Time in s\": 2531.7014360000003 }, { \"step\": 24288, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.5615761518507844, \"MicroF1\": 0.5615761518507844, \"MacroF1\": 0.5396852076547555, \"Memory in Mb\": 12.966436386108398, \"Time in s\": 2756.048529 }, { \"step\": 25344, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.5679280274632048, \"MicroF1\": 0.5679280274632048, \"MacroF1\": 0.5455634192548012, \"Memory in Mb\": 13.622279167175291, \"Time in s\": 2989.801767 }, { \"step\": 26400, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.5727868479866661, \"MicroF1\": 0.5727868479866661, \"MacroF1\": 0.5496374434570931, \"Memory in Mb\": 13.72577953338623, \"Time in s\": 3232.991513 }, { \"step\": 27456, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.5754143143325442, \"MicroF1\": 0.5754143143325442, \"MacroF1\": 0.5513680135969626, \"Memory in Mb\": 13.724169731140137, \"Time in s\": 3485.670293 }, { \"step\": 28512, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.5772859598049875, \"MicroF1\": 0.5772859598049875, \"MacroF1\": 0.5551350356863173, \"Memory in Mb\": 13.7214994430542, \"Time in s\": 3747.946766 }, { \"step\": 29568, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.577772516657084, \"MicroF1\": 0.577772516657084, \"MacroF1\": 0.559086133229251, \"Memory in Mb\": 13.720248222351074, \"Time in s\": 4020.178573 }, { \"step\": 30624, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.578225516768442, \"MicroF1\": 0.578225516768442, \"MacroF1\": 0.5625516131192055, \"Memory in Mb\": 12.85925006866455, \"Time in s\": 4302.520044 }, { \"step\": 31680, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.5795637488557088, \"MicroF1\": 0.5795637488557088, \"MacroF1\": 0.5663363640160618, \"Memory in Mb\": 12.858540534973145, \"Time in s\": 4594.920222 }, { \"step\": 32736, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.5811211241790133, \"MicroF1\": 0.5811211241790133, \"MacroF1\": 0.5696723582178382, \"Memory in Mb\": 12.857670783996582, \"Time in s\": 4897.257331 }, { \"step\": 33792, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.575804208221124, \"MicroF1\": 0.575804208221124, \"MacroF1\": 0.5647934119551398, \"Memory in Mb\": 13.070902824401855, \"Time in s\": 5209.806865 }, { \"step\": 34848, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.5701495107182828, \"MicroF1\": 0.5701495107182828, \"MacroF1\": 0.559068023359177, \"Memory in Mb\": 13.0708646774292, \"Time in s\": 5532.790806 }, { \"step\": 35904, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.5657744478177311, \"MicroF1\": 0.5657744478177311, \"MacroF1\": 0.5542573482740074, \"Memory in Mb\": 13.072970390319824, \"Time in s\": 5866.228602 }, { \"step\": 36960, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.5611894261208366, \"MicroF1\": 0.5611894261208366, \"MacroF1\": 0.5493152777162592, \"Memory in Mb\": 13.618464469909668, \"Time in s\": 6210.1901960000005 }, { \"step\": 38016, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.558779429172695, \"MicroF1\": 0.558779429172695, \"MacroF1\": 0.5463982360776033, \"Memory in Mb\": 13.620196342468262, \"Time in s\": 6564.708960000001 }, { \"step\": 39072, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.5546825010877633, \"MicroF1\": 0.5546825010877633, \"MacroF1\": 0.5426283860139581, \"Memory in Mb\": 14.406596183776855, \"Time in s\": 6929.528370000001 }, { \"step\": 40128, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.5542153662122761, \"MicroF1\": 0.5542153662122761, \"MacroF1\": 0.5429626632180721, \"Memory in Mb\": 15.257904052734377, \"Time in s\": 7304.280479000001 }, { \"step\": 41184, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.5541364155112547, \"MicroF1\": 0.5541364155112547, \"MacroF1\": 0.5435420562964656, \"Memory in Mb\": 15.358447074890137, \"Time in s\": 7688.491709000001 }, { \"step\": 42240, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.5542981604678141, \"MicroF1\": 0.5542981604678141, \"MacroF1\": 0.5443914000180358, \"Memory in Mb\": 15.357035636901855, \"Time in s\": 8082.149982000001 }, { \"step\": 43296, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.554151749624668, \"MicroF1\": 0.554151749624668, \"MacroF1\": 0.5448486588729108, \"Memory in Mb\": 13.518179893493652, \"Time in s\": 8485.318247000001 }, { \"step\": 44352, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.5536290049829767, \"MicroF1\": 0.5536290049829767, \"MacroF1\": 0.5448029815059025, \"Memory in Mb\": 13.742095947265623, \"Time in s\": 8897.925593000002 }, { \"step\": 45408, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.5541436342414165, \"MicroF1\": 0.5541436342414165, \"MacroF1\": 0.5454957405719211, \"Memory in Mb\": 14.286569595336914, \"Time in s\": 9319.685836000002 }, { \"step\": 46464, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.5553020683124207, \"MicroF1\": 0.5553020683124207, \"MacroF1\": 0.546961663735647, \"Memory in Mb\": 15.266200065612791, \"Time in s\": 9750.326660000002 }, { \"step\": 47520, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.5579662871693428, \"MicroF1\": 0.5579662871693428, \"MacroF1\": 0.5498636684303295, \"Memory in Mb\": 14.323851585388184, \"Time in s\": 10190.236686000002 }, { \"step\": 48576, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.5627586206896552, \"MicroF1\": 0.5627586206896552, \"MacroF1\": 0.5545030394801858, \"Memory in Mb\": 14.950955390930176, \"Time in s\": 10639.519510000002 }, { \"step\": 49632, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.5677701436602124, \"MicroF1\": 0.5677701436602124, \"MacroF1\": 0.5591808574875289, \"Memory in Mb\": 15.350643157958984, \"Time in s\": 11098.085262000002 }, { \"step\": 50688, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.5730463432438297, \"MicroF1\": 0.5730463432438297, \"MacroF1\": 0.5639878919164368, \"Memory in Mb\": 16.015583038330078, \"Time in s\": 11565.627356000005 }, { \"step\": 51744, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.5791894555785324, \"MicroF1\": 0.5791894555785324, \"MacroF1\": 0.5695807960578061, \"Memory in Mb\": 16.33325481414795, \"Time in s\": 12041.828478000005 }, { \"step\": 52800, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.5794238527244834, \"MicroF1\": 0.5794238527244834, \"MacroF1\": 0.5701364277094956, \"Memory in Mb\": 15.444610595703123, \"Time in s\": 12525.893185000004 }, { \"step\": 408, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9828009828009828, \"MicroF1\": 0.9828009828009828, \"MacroF1\": 0.6067632850241546, \"Memory in Mb\": 2.2430334091186523, \"Time in s\": 1.187339 }, { \"step\": 816, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9411042944785276, \"MicroF1\": 0.9411042944785276, \"MacroF1\": 0.7377235942917068, \"Memory in Mb\": 3.19038200378418, \"Time in s\": 4.698555 }, { \"step\": 1224, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8879803761242846, \"MicroF1\": 0.8879803761242846, \"MacroF1\": 0.873420796574987, \"Memory in Mb\": 4.134529113769531, \"Time in s\": 10.466629 }, { \"step\": 1632, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8988350705088902, \"MicroF1\": 0.8988350705088902, \"MacroF1\": 0.8792834531664682, \"Memory in Mb\": 5.086630821228027, \"Time in s\": 18.824542 }, { \"step\": 2040, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8950465914664051, \"MicroF1\": 0.8950465914664051, \"MacroF1\": 0.8828407845486113, \"Memory in Mb\": 6.147420883178711, \"Time in s\": 30.316821 }, { \"step\": 2448, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.856559051900286, \"MicroF1\": 0.856559051900286, \"MacroF1\": 0.8543242501248514, \"Memory in Mb\": 6.513773918151856, \"Time in s\": 45.490026 }, { \"step\": 2856, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8640980735551663, \"MicroF1\": 0.8640980735551663, \"MacroF1\": 0.8525227127090282, \"Memory in Mb\": 7.461577415466309, \"Time in s\": 64.600537 }, { \"step\": 3264, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.855654305853509, \"MicroF1\": 0.855654305853509, \"MacroF1\": 0.8307453339686874, \"Memory in Mb\": 8.407819747924805, \"Time in s\": 88.28106700000001 }, { \"step\": 3672, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8469081994007083, \"MicroF1\": 0.8469081994007084, \"MacroF1\": 0.8445950801753395, \"Memory in Mb\": 9.06855297088623, \"Time in s\": 117.068452 }, { \"step\": 4080, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.839911743074283, \"MicroF1\": 0.839911743074283, \"MacroF1\": 0.8273018519986841, \"Memory in Mb\": 10.241823196411133, \"Time in s\": 151.440392 }, { \"step\": 4488, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8279474036104302, \"MicroF1\": 0.8279474036104302, \"MacroF1\": 0.8381848634946416, \"Memory in Mb\": 11.187081336975098, \"Time in s\": 191.919368 }, { \"step\": 4896, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8294177732379979, \"MicroF1\": 0.8294177732379979, \"MacroF1\": 0.8370944525285466, \"Memory in Mb\": 8.72095775604248, \"Time in s\": 237.683057 }, { \"step\": 5304, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.832736187063926, \"MicroF1\": 0.832736187063926, \"MacroF1\": 0.8304665020850452, \"Memory in Mb\": 9.573864936828612, \"Time in s\": 288.722663 }, { \"step\": 5712, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8254246191560147, \"MicroF1\": 0.8254246191560147, \"MacroF1\": 0.8318293629616008, \"Memory in Mb\": 10.363824844360352, \"Time in s\": 345.486273 }, { \"step\": 6120, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8238274227815002, \"MicroF1\": 0.8238274227815002, \"MacroF1\": 0.8134447828524414, \"Memory in Mb\": 11.40614128112793, \"Time in s\": 408.45138 }, { \"step\": 6528, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8043511567335683, \"MicroF1\": 0.8043511567335683, \"MacroF1\": 0.8054460603633147, \"Memory in Mb\": 12.15697956085205, \"Time in s\": 478.205713 }, { \"step\": 6936, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8005767844268205, \"MicroF1\": 0.8005767844268206, \"MacroF1\": 0.8067791986535922, \"Memory in Mb\": 11.58870792388916, \"Time in s\": 555.142755 }, { \"step\": 7344, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8081165736075173, \"MicroF1\": 0.8081165736075173, \"MacroF1\": 0.8106639227074198, \"Memory in Mb\": 12.00939655303955, \"Time in s\": 638.5654069999999 }, { \"step\": 7752, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8097019739388466, \"MicroF1\": 0.8097019739388466, \"MacroF1\": 0.8127585051729247, \"Memory in Mb\": 13.156713485717772, \"Time in s\": 728.9317779999999 }, { \"step\": 8160, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8134575315602403, \"MicroF1\": 0.8134575315602401, \"MacroF1\": 0.8148392057777913, \"Memory in Mb\": 13.88283348083496, \"Time in s\": 826.9412199999999 }, { \"step\": 8568, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.817672464106455, \"MicroF1\": 0.817672464106455, \"MacroF1\": 0.8208026583224199, \"Memory in Mb\": 15.191060066223145, \"Time in s\": 933.116233 }, { \"step\": 8976, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8213927576601672, \"MicroF1\": 0.8213927576601672, \"MacroF1\": 0.8243856825821874, \"Memory in Mb\": 16.25563907623291, \"Time in s\": 1048.180156 }, { \"step\": 9384, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8219119684535863, \"MicroF1\": 0.8219119684535864, \"MacroF1\": 0.8243183344026902, \"Memory in Mb\": 17.071918487548828, \"Time in s\": 1172.5966959999998 }, { \"step\": 9792, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8224900418751915, \"MicroF1\": 0.8224900418751915, \"MacroF1\": 0.8248306232761192, \"Memory in Mb\": 18.233381271362305, \"Time in s\": 1306.7477949999998 }, { \"step\": 10200, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.819197960584371, \"MicroF1\": 0.819197960584371, \"MacroF1\": 0.8170259665463304, \"Memory in Mb\": 19.36789894104004, \"Time in s\": 1451.4292829999995 }, { \"step\": 10608, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.805788630149901, \"MicroF1\": 0.8057886301499011, \"MacroF1\": 0.8022367569175978, \"Memory in Mb\": 20.506345748901367, \"Time in s\": 1607.1296199999997 }, { \"step\": 11016, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.802088061733999, \"MicroF1\": 0.8020880617339992, \"MacroF1\": 0.8038074645550285, \"Memory in Mb\": 19.16464138031006, \"Time in s\": 1773.9352609999996 }, { \"step\": 11424, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8018909218243894, \"MicroF1\": 0.8018909218243894, \"MacroF1\": 0.8005729972530424, \"Memory in Mb\": 19.133995056152344, \"Time in s\": 1951.246828 }, { \"step\": 11832, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8002704758684811, \"MicroF1\": 0.800270475868481, \"MacroF1\": 0.8004166941842216, \"Memory in Mb\": 20.079543113708496, \"Time in s\": 2139.48669 }, { \"step\": 12240, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8035787237519405, \"MicroF1\": 0.8035787237519405, \"MacroF1\": 0.8060123607032721, \"Memory in Mb\": 17.827110290527344, \"Time in s\": 2338.9088759999995 }, { \"step\": 12648, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8088084130623864, \"MicroF1\": 0.8088084130623864, \"MacroF1\": 0.8108606005777994, \"Memory in Mb\": 15.629319190979004, \"Time in s\": 2547.8446989999998 }, { \"step\": 13056, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8079662964381463, \"MicroF1\": 0.8079662964381463, \"MacroF1\": 0.8077709771623751, \"Memory in Mb\": 16.39698314666748, \"Time in s\": 2766.5130659999995 }, { \"step\": 13464, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8068038327267325, \"MicroF1\": 0.8068038327267325, \"MacroF1\": 0.807905549135964, \"Memory in Mb\": 17.24907112121582, \"Time in s\": 2995.4784789999994 }, { \"step\": 13872, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.810107418354841, \"MicroF1\": 0.810107418354841, \"MacroF1\": 0.8115061911206084, \"Memory in Mb\": 18.023069381713867, \"Time in s\": 3234.9291429999994 }, { \"step\": 14280, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.813432313187198, \"MicroF1\": 0.813432313187198, \"MacroF1\": 0.814709519180665, \"Memory in Mb\": 19.25601577758789, \"Time in s\": 3485.414837 }, { \"step\": 14688, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8169810036086335, \"MicroF1\": 0.8169810036086335, \"MacroF1\": 0.8183348126971706, \"Memory in Mb\": 19.79203414916992, \"Time in s\": 3747.476331 }, { \"step\": 15096, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8210665783371978, \"MicroF1\": 0.8210665783371978, \"MacroF1\": 0.8224533109934684, \"Memory in Mb\": 20.76410961151123, \"Time in s\": 4021.7266099999993 }, { \"step\": 15504, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8227439850351544, \"MicroF1\": 0.8227439850351544, \"MacroF1\": 0.8236860076332361, \"Memory in Mb\": 21.70703220367432, \"Time in s\": 4308.908110999999 }, { \"step\": 15912, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8177361573754006, \"MicroF1\": 0.8177361573754006, \"MacroF1\": 0.8170714187961161, \"Memory in Mb\": 22.97433376312256, \"Time in s\": 4609.388637999999 }, { \"step\": 16320, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8135915190881794, \"MicroF1\": 0.8135915190881794, \"MacroF1\": 0.8136474897036394, \"Memory in Mb\": 23.700613975524902, \"Time in s\": 4923.859395999999 }, { \"step\": 16728, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8133556525378132, \"MicroF1\": 0.8133556525378132, \"MacroF1\": 0.8142218072403056, \"Memory in Mb\": 24.764389038085938, \"Time in s\": 5252.522612999999 }, { \"step\": 17136, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8092792529909542, \"MicroF1\": 0.8092792529909542, \"MacroF1\": 0.8090411402278314, \"Memory in Mb\": 26.198601722717285, \"Time in s\": 5595.909021 }, { \"step\": 17544, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8062475061278003, \"MicroF1\": 0.8062475061278003, \"MacroF1\": 0.8065701979489333, \"Memory in Mb\": 25.60452175140381, \"Time in s\": 5954.543495 }, { \"step\": 17952, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8078101498523759, \"MicroF1\": 0.8078101498523759, \"MacroF1\": 0.8084559739072698, \"Memory in Mb\": 26.552149772644043, \"Time in s\": 6328.536349 }, { \"step\": 18360, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8103927229151915, \"MicroF1\": 0.8103927229151915, \"MacroF1\": 0.8111272646261444, \"Memory in Mb\": 27.50138759613037, \"Time in s\": 6718.347567 }, { \"step\": 18768, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.813022859274258, \"MicroF1\": 0.813022859274258, \"MacroF1\": 0.8138485204649677, \"Memory in Mb\": 28.353083610534668, \"Time in s\": 7124.657721 }, { \"step\": 19176, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8090743155149935, \"MicroF1\": 0.8090743155149935, \"MacroF1\": 0.8093701596568051, \"Memory in Mb\": 29.32584285736084, \"Time in s\": 7547.838048 }, { \"step\": 19584, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8110606137976817, \"MicroF1\": 0.8110606137976817, \"MacroF1\": 0.8116953495842238, \"Memory in Mb\": 30.33370780944824, \"Time in s\": 7988.342591 }, { \"step\": 19992, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8081136511430144, \"MicroF1\": 0.8081136511430144, \"MacroF1\": 0.8084718836746521, \"Memory in Mb\": 31.28043556213379, \"Time in s\": 8446.728501 }, { \"step\": 20400, \"track\": \"Multiclass classification\", \"model\": \"ADWIN Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8058238148928869, \"MicroF1\": 0.805823814892887, \"MacroF1\": 0.8062504565207905, \"Memory in Mb\": 32.1812219619751, \"Time in s\": 8923.60629 }, { \"step\": 46, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.1111111111111111, \"MicroF1\": 0.1111111111111111, \"MacroF1\": 0.0815018315018315, \"Memory in Mb\": 3.44619369506836, \"Time in s\": 0.804099 }, { \"step\": 92, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.2307692307692307, \"MicroF1\": 0.2307692307692307, \"MacroF1\": 0.2226391771283412, \"Memory in Mb\": 4.129319190979004, \"Time in s\": 2.027411 }, { \"step\": 138, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.4233576642335766, \"MicroF1\": 0.4233576642335766, \"MacroF1\": 0.4463537718619156, \"Memory in Mb\": 4.129193305969238, \"Time in s\": 3.599985 }, { \"step\": 184, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.5355191256830601, \"MicroF1\": 0.5355191256830601, \"MacroF1\": 0.5617062146473911, \"Memory in Mb\": 4.129368782043457, \"Time in s\": 5.452412 }, { \"step\": 230, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.5938864628820961, \"MicroF1\": 0.5938864628820961, \"MacroF1\": 0.6236530662596055, \"Memory in Mb\": 4.12935733795166, \"Time in s\": 7.651963 }, { \"step\": 276, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.6290909090909091, \"MicroF1\": 0.6290909090909091, \"MacroF1\": 0.6558170665459355, \"Memory in Mb\": 4.129300117492676, \"Time in s\": 10.207424 }, { \"step\": 322, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.660436137071651, \"MicroF1\": 0.660436137071651, \"MacroF1\": 0.6785747202615152, \"Memory in Mb\": 4.128628730773926, \"Time in s\": 13.08877 }, { \"step\": 368, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.6920980926430518, \"MicroF1\": 0.6920980926430518, \"MacroF1\": 0.7041680355881775, \"Memory in Mb\": 4.12868595123291, \"Time in s\": 16.291428 }, { \"step\": 414, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7167070217917676, \"MicroF1\": 0.7167070217917676, \"MacroF1\": 0.7259075149442815, \"Memory in Mb\": 4.128170967102051, \"Time in s\": 19.832325 }, { \"step\": 460, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7254901960784313, \"MicroF1\": 0.7254901960784313, \"MacroF1\": 0.732501171084948, \"Memory in Mb\": 4.128491401672363, \"Time in s\": 23.76135 }, { \"step\": 506, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7386138613861386, \"MicroF1\": 0.7386138613861386, \"MacroF1\": 0.7428621938273078, \"Memory in Mb\": 4.128743171691895, \"Time in s\": 28.024553 }, { \"step\": 552, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7422867513611615, \"MicroF1\": 0.7422867513611615, \"MacroF1\": 0.7453719085253248, \"Memory in Mb\": 4.128548622131348, \"Time in s\": 32.646215 }, { \"step\": 598, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7487437185929648, \"MicroF1\": 0.7487437185929648, \"MacroF1\": 0.7504522188790484, \"Memory in Mb\": 4.128659248352051, \"Time in s\": 37.596323 }, { \"step\": 644, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7465007776049767, \"MicroF1\": 0.7465007776049767, \"MacroF1\": 0.7482323503576439, \"Memory in Mb\": 4.128731727600098, \"Time in s\": 42.92857 }, { \"step\": 690, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7489114658925979, \"MicroF1\": 0.748911465892598, \"MacroF1\": 0.7488472102580619, \"Memory in Mb\": 4.128785133361816, \"Time in s\": 48.576103 }, { \"step\": 736, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7523809523809524, \"MicroF1\": 0.7523809523809524, \"MacroF1\": 0.75182837230991, \"Memory in Mb\": 4.1286211013793945, \"Time in s\": 54.551686 }, { \"step\": 782, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7541613316261203, \"MicroF1\": 0.7541613316261204, \"MacroF1\": 0.7531089046321313, \"Memory in Mb\": 4.128552436828613, \"Time in s\": 60.838379 }, { \"step\": 828, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7557436517533253, \"MicroF1\": 0.7557436517533253, \"MacroF1\": 0.7552013614952863, \"Memory in Mb\": 4.128499031066895, \"Time in s\": 67.464563 }, { \"step\": 874, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7617411225658648, \"MicroF1\": 0.7617411225658649, \"MacroF1\": 0.7601066395856337, \"Memory in Mb\": 4.128571510314941, \"Time in s\": 74.378096 }, { \"step\": 920, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.763873775843308, \"MicroF1\": 0.763873775843308, \"MacroF1\": 0.7623480483274478, \"Memory in Mb\": 4.1285905838012695, \"Time in s\": 81.591089 }, { \"step\": 966, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7678756476683938, \"MicroF1\": 0.7678756476683938, \"MacroF1\": 0.7646598072570266, \"Memory in Mb\": 4.128613471984863, \"Time in s\": 89.10581599999999 }, { \"step\": 1012, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7705242334322453, \"MicroF1\": 0.7705242334322453, \"MacroF1\": 0.7668271197983112, \"Memory in Mb\": 4.128720283508301, \"Time in s\": 96.930863 }, { \"step\": 1058, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7757805108798487, \"MicroF1\": 0.7757805108798487, \"MacroF1\": 0.7714920336037776, \"Memory in Mb\": 4.128579139709473, \"Time in s\": 105.051956 }, { \"step\": 1104, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7760652765185857, \"MicroF1\": 0.7760652765185856, \"MacroF1\": 0.7719206139767609, \"Memory in Mb\": 4.128727912902832, \"Time in s\": 113.491292 }, { \"step\": 1150, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7789382071366405, \"MicroF1\": 0.7789382071366405, \"MacroF1\": 0.7750313949659529, \"Memory in Mb\": 4.128632545471191, \"Time in s\": 122.217257 }, { \"step\": 1196, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7849372384937239, \"MicroF1\": 0.7849372384937239, \"MacroF1\": 0.782000389047251, \"Memory in Mb\": 4.128678321838379, \"Time in s\": 131.239072 }, { \"step\": 1242, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7856567284448026, \"MicroF1\": 0.7856567284448026, \"MacroF1\": 0.7827470902102025, \"Memory in Mb\": 4.128628730773926, \"Time in s\": 140.604336 }, { \"step\": 1288, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7894327894327894, \"MicroF1\": 0.7894327894327894, \"MacroF1\": 0.785982924599392, \"Memory in Mb\": 4.128533363342285, \"Time in s\": 150.25346299999998 }, { \"step\": 1334, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7906976744186046, \"MicroF1\": 0.7906976744186046, \"MacroF1\": 0.7876424482584368, \"Memory in Mb\": 4.128628730773926, \"Time in s\": 160.232262 }, { \"step\": 1380, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7933284989122552, \"MicroF1\": 0.7933284989122552, \"MacroF1\": 0.7906471924204203, \"Memory in Mb\": 4.128582954406738, \"Time in s\": 170.496442 }, { \"step\": 1426, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7978947368421052, \"MicroF1\": 0.7978947368421052, \"MacroF1\": 0.7945020166797493, \"Memory in Mb\": 4.128670692443848, \"Time in s\": 181.024488 }, { \"step\": 1472, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8028552005438477, \"MicroF1\": 0.8028552005438477, \"MacroF1\": 0.7982243751921435, \"Memory in Mb\": 4.128663063049316, \"Time in s\": 191.820653 }, { \"step\": 1518, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8035596572181938, \"MicroF1\": 0.8035596572181938, \"MacroF1\": 0.7981876534181911, \"Memory in Mb\": 4.1286821365356445, \"Time in s\": 202.942587 }, { \"step\": 1564, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8035828534868842, \"MicroF1\": 0.8035828534868842, \"MacroF1\": 0.798634974540431, \"Memory in Mb\": 4.128708839416504, \"Time in s\": 214.370001 }, { \"step\": 1610, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8048477315102548, \"MicroF1\": 0.8048477315102549, \"MacroF1\": 0.7997380784882049, \"Memory in Mb\": 4.128571510314941, \"Time in s\": 226.09596 }, { \"step\": 1656, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8066465256797583, \"MicroF1\": 0.8066465256797583, \"MacroF1\": 0.80161945439383, \"Memory in Mb\": 4.128567695617676, \"Time in s\": 238.115396 }, { \"step\": 1702, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8059964726631393, \"MicroF1\": 0.8059964726631393, \"MacroF1\": 0.8024858564723996, \"Memory in Mb\": 4.128705024719238, \"Time in s\": 250.457722 }, { \"step\": 1748, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8070978820835718, \"MicroF1\": 0.8070978820835718, \"MacroF1\": 0.8029124203507954, \"Memory in Mb\": 4.128613471984863, \"Time in s\": 263.107237 }, { \"step\": 1794, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8081427774679308, \"MicroF1\": 0.8081427774679307, \"MacroF1\": 0.8029834045630978, \"Memory in Mb\": 4.12865161895752, \"Time in s\": 276.028168 }, { \"step\": 1840, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8069603045133225, \"MicroF1\": 0.8069603045133223, \"MacroF1\": 0.8019276227162541, \"Memory in Mb\": 4.128785133361816, \"Time in s\": 289.253241 }, { \"step\": 1886, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8053050397877984, \"MicroF1\": 0.8053050397877984, \"MacroF1\": 0.8006727596367826, \"Memory in Mb\": 4.1285905838012695, \"Time in s\": 302.793979 }, { \"step\": 1932, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8047643707923355, \"MicroF1\": 0.8047643707923355, \"MacroF1\": 0.7995493059800364, \"Memory in Mb\": 4.128586769104004, \"Time in s\": 316.637791 }, { \"step\": 1978, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8057663125948407, \"MicroF1\": 0.8057663125948407, \"MacroF1\": 0.8003960406612561, \"Memory in Mb\": 4.12862491607666, \"Time in s\": 330.791782 }, { \"step\": 2024, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8072170044488384, \"MicroF1\": 0.8072170044488384, \"MacroF1\": 0.8005625942078284, \"Memory in Mb\": 4.1286211013793945, \"Time in s\": 345.23827800000004 }, { \"step\": 2070, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8066698888351861, \"MicroF1\": 0.8066698888351861, \"MacroF1\": 0.8002110568368, \"Memory in Mb\": 4.128506660461426, \"Time in s\": 360.0031 }, { \"step\": 2116, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.807565011820331, \"MicroF1\": 0.807565011820331, \"MacroF1\": 0.8005131307885663, \"Memory in Mb\": 4.128533363342285, \"Time in s\": 375.059377 }, { \"step\": 2162, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8079592781119852, \"MicroF1\": 0.8079592781119852, \"MacroF1\": 0.8006755955605838, \"Memory in Mb\": 4.128510475158691, \"Time in s\": 390.39582400000006 }, { \"step\": 2208, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8087902129587675, \"MicroF1\": 0.8087902129587675, \"MacroF1\": 0.8009921695193861, \"Memory in Mb\": 4.128510475158691, \"Time in s\": 405.9976 }, { \"step\": 2254, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8060363959165557, \"MicroF1\": 0.8060363959165557, \"MacroF1\": 0.7987732120640717, \"Memory in Mb\": 4.128533363342285, \"Time in s\": 421.95304 }, { \"step\": 2300, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8051326663766856, \"MicroF1\": 0.8051326663766856, \"MacroF1\": 0.7980778928096751, \"Memory in Mb\": 4.128533363342285, \"Time in s\": 438.2190000000001 }, { \"step\": 1056, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Insects\", \"Accuracy\": 0.6360189573459716, \"MicroF1\": 0.6360189573459716, \"MacroF1\": 0.5992691812827112, \"Memory in Mb\": 6.522543907165527, \"Time in s\": 11.237994 }, { \"step\": 2112, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Insects\", \"Accuracy\": 0.6110847939365229, \"MicroF1\": 0.6110847939365229, \"MacroF1\": 0.5773210074897359, \"Memory in Mb\": 6.522406578063965, \"Time in s\": 32.531305 }, { \"step\": 3168, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Insects\", \"Accuracy\": 0.6043574360593622, \"MicroF1\": 0.6043574360593622, \"MacroF1\": 0.5704368753709179, \"Memory in Mb\": 6.521971702575684, \"Time in s\": 63.87417000000001 }, { \"step\": 4224, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Insects\", \"Accuracy\": 0.6014681506038362, \"MicroF1\": 0.6014681506038362, \"MacroF1\": 0.5676969561642587, \"Memory in Mb\": 6.521697044372559, \"Time in s\": 105.215764 }, { \"step\": 5280, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Insects\", \"Accuracy\": 0.6057965523773442, \"MicroF1\": 0.6057965523773442, \"MacroF1\": 0.5710016183775801, \"Memory in Mb\": 6.521697044372559, \"Time in s\": 156.441097 }, { \"step\": 6336, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Insects\", \"Accuracy\": 0.5966850828729282, \"MicroF1\": 0.5966850828729282, \"MacroF1\": 0.5635903588556204, \"Memory in Mb\": 6.521857261657715, \"Time in s\": 217.680264 }, { \"step\": 7392, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Insects\", \"Accuracy\": 0.5957245298335814, \"MicroF1\": 0.5957245298335814, \"MacroF1\": 0.5625002603439991, \"Memory in Mb\": 6.52231502532959, \"Time in s\": 288.908384 }, { \"step\": 8448, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Insects\", \"Accuracy\": 0.5982005445720374, \"MicroF1\": 0.5982005445720374, \"MacroF1\": 0.5646892369665863, \"Memory in Mb\": 6.522658348083496, \"Time in s\": 370.082682 }, { \"step\": 9504, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Insects\", \"Accuracy\": 0.596337998526781, \"MicroF1\": 0.596337998526781, \"MacroF1\": 0.5627085514562804, \"Memory in Mb\": 6.523001670837402, \"Time in s\": 461.259323 }, { \"step\": 10560, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Insects\", \"Accuracy\": 0.5965527038545316, \"MicroF1\": 0.5965527038545316, \"MacroF1\": 0.5631320282838163, \"Memory in Mb\": 6.523184776306152, \"Time in s\": 562.3727710000001 }, { \"step\": 11616, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Insects\", \"Accuracy\": 0.5953508394317693, \"MicroF1\": 0.5953508394317693, \"MacroF1\": 0.562671447170627, \"Memory in Mb\": 6.523184776306152, \"Time in s\": 673.482974 }, { \"step\": 12672, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Insects\", \"Accuracy\": 0.5979796385447084, \"MicroF1\": 0.5979796385447084, \"MacroF1\": 0.5680559575776837, \"Memory in Mb\": 6.522841453552246, \"Time in s\": 794.598712 }, { \"step\": 13728, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Insects\", \"Accuracy\": 0.610767101333139, \"MicroF1\": 0.610767101333139, \"MacroF1\": 0.5941277335666079, \"Memory in Mb\": 6.522337913513184, \"Time in s\": 925.382156 }, { \"step\": 14784, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Insects\", \"Accuracy\": 0.6019752418318338, \"MicroF1\": 0.6019752418318338, \"MacroF1\": 0.5851264744797858, \"Memory in Mb\": 6.522246360778809, \"Time in s\": 1066.407983 }, { \"step\": 15840, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Insects\", \"Accuracy\": 0.5705536965717533, \"MicroF1\": 0.5705536965717533, \"MacroF1\": 0.5545059657048704, \"Memory in Mb\": 6.522475242614746, \"Time in s\": 1217.74448 }, { \"step\": 16896, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Insects\", \"Accuracy\": 0.548091151228174, \"MicroF1\": 0.548091151228174, \"MacroF1\": 0.5320735507355622, \"Memory in Mb\": 6.522887229919434, \"Time in s\": 1378.917078 }, { \"step\": 17952, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Insects\", \"Accuracy\": 0.5307225224221492, \"MicroF1\": 0.5307225224221492, \"MacroF1\": 0.5138536287616571, \"Memory in Mb\": 6.523138999938965, \"Time in s\": 1549.794627 }, { \"step\": 19008, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Insects\", \"Accuracy\": 0.5182827379386542, \"MicroF1\": 0.5182827379386542, \"MacroF1\": 0.4990809738484312, \"Memory in Mb\": 6.523367881774902, \"Time in s\": 1730.586936 }, { \"step\": 20064, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Insects\", \"Accuracy\": 0.5182176145142801, \"MicroF1\": 0.5182176145142801, \"MacroF1\": 0.497867701567998, \"Memory in Mb\": 8.711265563964844, \"Time in s\": 1921.389763 }, { \"step\": 21120, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Insects\", \"Accuracy\": 0.5272503432927695, \"MicroF1\": 0.5272503432927695, \"MacroF1\": 0.5067114684709674, \"Memory in Mb\": 15.55071258544922, \"Time in s\": 2121.77156 }, { \"step\": 22176, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Insects\", \"Accuracy\": 0.533032694475761, \"MicroF1\": 0.533032694475761, \"MacroF1\": 0.5127471323280748, \"Memory in Mb\": 16.9340763092041, \"Time in s\": 2331.759567 }, { \"step\": 23232, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Insects\", \"Accuracy\": 0.5410442942619775, \"MicroF1\": 0.5410442942619775, \"MacroF1\": 0.5207771198745245, \"Memory in Mb\": 17.15799903869629, \"Time in s\": 2551.0181620000003 }, { \"step\": 24288, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Insects\", \"Accuracy\": 0.5459710956478775, \"MicroF1\": 0.5459710956478775, \"MacroF1\": 0.5251711652768186, \"Memory in Mb\": 17.155046463012695, \"Time in s\": 2778.8449820000005 }, { \"step\": 25344, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Insects\", \"Accuracy\": 0.5532099593576135, \"MicroF1\": 0.5532099593576135, \"MacroF1\": 0.5314216535856217, \"Memory in Mb\": 17.15360450744629, \"Time in s\": 3015.0560080000005 }, { \"step\": 26400, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Insects\", \"Accuracy\": 0.5607788173794462, \"MicroF1\": 0.5607788173794462, \"MacroF1\": 0.5375130024626694, \"Memory in Mb\": 17.265982627868652, \"Time in s\": 3259.2678140000003 }, { \"step\": 27456, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Insects\", \"Accuracy\": 0.5667091604443635, \"MicroF1\": 0.5667091604443635, \"MacroF1\": 0.5418496825562071, \"Memory in Mb\": 17.378803253173828, \"Time in s\": 3511.9238520000004 }, { \"step\": 28512, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Insects\", \"Accuracy\": 0.5692890463329943, \"MicroF1\": 0.5692890463329943, \"MacroF1\": 0.5455529487931667, \"Memory in Mb\": 17.379924774169922, \"Time in s\": 3773.544178 }, { \"step\": 29568, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Insects\", \"Accuracy\": 0.5688436432509216, \"MicroF1\": 0.5688436432509216, \"MacroF1\": 0.5481992899375988, \"Memory in Mb\": 17.380359649658203, \"Time in s\": 4045.250953 }, { \"step\": 30624, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Insects\", \"Accuracy\": 0.5687228553701467, \"MicroF1\": 0.5687228553701467, \"MacroF1\": 0.5505043481720591, \"Memory in Mb\": 17.380290985107422, \"Time in s\": 4327.317909 }, { \"step\": 31680, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Insects\", \"Accuracy\": 0.5691467533697402, \"MicroF1\": 0.5691467533697402, \"MacroF1\": 0.5529220328647554, \"Memory in Mb\": 17.37978744506836, \"Time in s\": 4619.673811000001 }, { \"step\": 32736, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Insects\", \"Accuracy\": 0.5703986558729189, \"MicroF1\": 0.5703986558729189, \"MacroF1\": 0.5556828084411201, \"Memory in Mb\": 17.37948989868164, \"Time in s\": 4922.186926 }, { \"step\": 33792, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Insects\", \"Accuracy\": 0.5650025154626972, \"MicroF1\": 0.5650025154626972, \"MacroF1\": 0.5507695387439543, \"Memory in Mb\": 17.604686737060547, \"Time in s\": 5235.061695 }, { \"step\": 34848, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Insects\", \"Accuracy\": 0.5587568513788849, \"MicroF1\": 0.5587568513788849, \"MacroF1\": 0.5445559443415654, \"Memory in Mb\": 18.05030918121338, \"Time in s\": 5558.17951 }, { \"step\": 35904, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Insects\", \"Accuracy\": 0.554215525165028, \"MicroF1\": 0.554215525165028, \"MacroF1\": 0.5396701176441828, \"Memory in Mb\": 18.150463104248047, \"Time in s\": 5891.808229 }, { \"step\": 36960, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Insects\", \"Accuracy\": 0.5490408290267594, \"MicroF1\": 0.5490408290267594, \"MacroF1\": 0.5342475234810463, \"Memory in Mb\": 19.505443572998047, \"Time in s\": 6235.988455000001 }, { \"step\": 38016, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Insects\", \"Accuracy\": 0.5476522425358411, \"MicroF1\": 0.5476522425358411, \"MacroF1\": 0.5324130893403342, \"Memory in Mb\": 21.324423789978027, \"Time in s\": 6590.662011 }, { \"step\": 39072, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Insects\", \"Accuracy\": 0.5427810908346344, \"MicroF1\": 0.5427810908346344, \"MacroF1\": 0.5280992603544316, \"Memory in Mb\": 22.076537132263184, \"Time in s\": 6955.865912 }, { \"step\": 40128, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Insects\", \"Accuracy\": 0.5417300072270541, \"MicroF1\": 0.5417300072270541, \"MacroF1\": 0.5282649533846114, \"Memory in Mb\": 22.738471031188965, \"Time in s\": 7330.794870000001 }, { \"step\": 41184, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Insects\", \"Accuracy\": 0.5417283830706845, \"MicroF1\": 0.5417283830706845, \"MacroF1\": 0.5295529576867488, \"Memory in Mb\": 23.16494464874268, \"Time in s\": 7714.553666000001 }, { \"step\": 42240, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Insects\", \"Accuracy\": 0.5419635881531286, \"MicroF1\": 0.5419635881531286, \"MacroF1\": 0.5308394560628455, \"Memory in Mb\": 23.606464385986328, \"Time in s\": 8107.062243 }, { \"step\": 43296, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Insects\", \"Accuracy\": 0.5438734264926666, \"MicroF1\": 0.5438734264926666, \"MacroF1\": 0.5334569208328087, \"Memory in Mb\": 23.706249237060547, \"Time in s\": 8507.305821 }, { \"step\": 44352, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Insects\", \"Accuracy\": 0.5453315596040675, \"MicroF1\": 0.5453315596040675, \"MacroF1\": 0.5354029875943346, \"Memory in Mb\": 24.299342155456543, \"Time in s\": 8915.303832 }, { \"step\": 45408, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Insects\", \"Accuracy\": 0.547140308762966, \"MicroF1\": 0.547140308762966, \"MacroF1\": 0.5374156745075451, \"Memory in Mb\": 24.818781852722168, \"Time in s\": 9330.915411 }, { \"step\": 46464, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Insects\", \"Accuracy\": 0.5492327228116997, \"MicroF1\": 0.5492327228116997, \"MacroF1\": 0.5397202270950943, \"Memory in Mb\": 25.030532836914062, \"Time in s\": 9754.309095 }, { \"step\": 47520, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Insects\", \"Accuracy\": 0.5481596834950231, \"MicroF1\": 0.5481596834950231, \"MacroF1\": 0.5387960204161004, \"Memory in Mb\": 25.669864654541016, \"Time in s\": 10186.398449 }, { \"step\": 48576, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Insects\", \"Accuracy\": 0.5455275347400926, \"MicroF1\": 0.5455275347400926, \"MacroF1\": 0.5361266295596548, \"Memory in Mb\": 25.668834686279297, \"Time in s\": 10627.539344 }, { \"step\": 49632, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Insects\", \"Accuracy\": 0.5440752755334367, \"MicroF1\": 0.5440752755334367, \"MacroF1\": 0.534604738581891, \"Memory in Mb\": 26.10423469543457, \"Time in s\": 11077.630938 }, { \"step\": 50688, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Insects\", \"Accuracy\": 0.5484443742971571, \"MicroF1\": 0.5484443742971571, \"MacroF1\": 0.538570218508335, \"Memory in Mb\": 27.224528312683105, \"Time in s\": 11536.66667 }, { \"step\": 51744, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Insects\", \"Accuracy\": 0.5534081904798717, \"MicroF1\": 0.5534081904798717, \"MacroF1\": 0.5429607704191827, \"Memory in Mb\": 28.06478881835937, \"Time in s\": 12004.483564 }, { \"step\": 52800, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Insects\", \"Accuracy\": 0.5540824636830243, \"MicroF1\": 0.5540824636830243, \"MacroF1\": 0.543927330204892, \"Memory in Mb\": 28.290183067321777, \"Time in s\": 12481.284297 }, { \"step\": 408, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9877149877149876, \"MicroF1\": 0.9877149877149876, \"MacroF1\": 0.7696139476961394, \"Memory in Mb\": 2.1705713272094727, \"Time in s\": 1.169245 }, { \"step\": 816, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.988957055214724, \"MicroF1\": 0.988957055214724, \"MacroF1\": 0.9592655637573824, \"Memory in Mb\": 2.994051933288574, \"Time in s\": 4.333642 }, { \"step\": 1224, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9852820932134096, \"MicroF1\": 0.9852820932134096, \"MacroF1\": 0.9482751483180804, \"Memory in Mb\": 4.729727745056152, \"Time in s\": 12.346709 }, { \"step\": 1632, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9822194972409564, \"MicroF1\": 0.9822194972409564, \"MacroF1\": 0.9509896151723368, \"Memory in Mb\": 5.999781608581543, \"Time in s\": 25.091939 }, { \"step\": 2040, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9725355566454144, \"MicroF1\": 0.9725355566454144, \"MacroF1\": 0.928775026512405, \"Memory in Mb\": 7.915155410766602, \"Time in s\": 43.495481 }, { \"step\": 2448, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9550469963220268, \"MicroF1\": 0.9550469963220268, \"MacroF1\": 0.9404929408648164, \"Memory in Mb\": 9.98500156402588, \"Time in s\": 68.010219 }, { \"step\": 2856, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9516637478108582, \"MicroF1\": 0.9516637478108582, \"MacroF1\": 0.9265706247083844, \"Memory in Mb\": 13.281692504882812, \"Time in s\": 95.835033 }, { \"step\": 3264, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9457554397793442, \"MicroF1\": 0.9457554397793442, \"MacroF1\": 0.9273434636455652, \"Memory in Mb\": 16.35391616821289, \"Time in s\": 127.205902 }, { \"step\": 3672, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9417052574230454, \"MicroF1\": 0.9417052574230454, \"MacroF1\": 0.925978466853896, \"Memory in Mb\": 18.91156578063965, \"Time in s\": 163.340984 }, { \"step\": 4080, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9355234126011276, \"MicroF1\": 0.9355234126011276, \"MacroF1\": 0.9181372267911062, \"Memory in Mb\": 22.338744163513184, \"Time in s\": 205.235807 }, { \"step\": 4488, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.931580120347671, \"MicroF1\": 0.931580120347671, \"MacroF1\": 0.9327276252021246, \"Memory in Mb\": 25.31070709228516, \"Time in s\": 252.959286 }, { \"step\": 4896, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9303370786516854, \"MicroF1\": 0.9303370786516854, \"MacroF1\": 0.9257176086775136, \"Memory in Mb\": 28.274658203125, \"Time in s\": 306.688529 }, { \"step\": 5304, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.925325287573072, \"MicroF1\": 0.925325287573072, \"MacroF1\": 0.9165251784293146, \"Memory in Mb\": 32.214202880859375, \"Time in s\": 367.4329340000001 }, { \"step\": 5712, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9226054981614428, \"MicroF1\": 0.9226054981614428, \"MacroF1\": 0.9209111845314156, \"Memory in Mb\": 34.49547863006592, \"Time in s\": 436.878813 }, { \"step\": 6120, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9181238764504004, \"MicroF1\": 0.9181238764504004, \"MacroF1\": 0.9091206319047904, \"Memory in Mb\": 38.86995029449463, \"Time in s\": 513.269664 }, { \"step\": 6528, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9129768653286348, \"MicroF1\": 0.9129768653286348, \"MacroF1\": 0.9114007831703168, \"Memory in Mb\": 42.32428169250488, \"Time in s\": 602.212119 }, { \"step\": 6936, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9114635904830568, \"MicroF1\": 0.9114635904830568, \"MacroF1\": 0.9134311944430068, \"Memory in Mb\": 44.19167232513428, \"Time in s\": 700.8190040000001 }, { \"step\": 7344, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9116165055154568, \"MicroF1\": 0.9116165055154568, \"MacroF1\": 0.9097332482243848, \"Memory in Mb\": 44.84274196624756, \"Time in s\": 807.0835780000001 }, { \"step\": 7752, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9112372597084248, \"MicroF1\": 0.9112372597084248, \"MacroF1\": 0.9111242959524108, \"Memory in Mb\": 46.84857273101807, \"Time in s\": 921.356409 }, { \"step\": 8160, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9094251746537566, \"MicroF1\": 0.9094251746537566, \"MacroF1\": 0.9076128910354778, \"Memory in Mb\": 51.16739654541016, \"Time in s\": 1044.662094 }, { \"step\": 8568, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9066184195167504, \"MicroF1\": 0.9066184195167504, \"MacroF1\": 0.9066450469749988, \"Memory in Mb\": 55.86186981201172, \"Time in s\": 1177.672199 }, { \"step\": 8976, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9056267409470752, \"MicroF1\": 0.9056267409470752, \"MacroF1\": 0.906335380756654, \"Memory in Mb\": 58.41574668884277, \"Time in s\": 1322.047745 }, { \"step\": 9384, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9030160929340296, \"MicroF1\": 0.9030160929340296, \"MacroF1\": 0.9022077684947396, \"Memory in Mb\": 62.26175117492676, \"Time in s\": 1477.298762 }, { \"step\": 9792, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8986824634868757, \"MicroF1\": 0.8986824634868757, \"MacroF1\": 0.8984090939041232, \"Memory in Mb\": 65.20822143554688, \"Time in s\": 1646.0177680000002 }, { \"step\": 10200, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8947936072163938, \"MicroF1\": 0.8947936072163937, \"MacroF1\": 0.8926613887647973, \"Memory in Mb\": 69.96524620056152, \"Time in s\": 1829.707382 }, { \"step\": 10608, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8881870462901857, \"MicroF1\": 0.8881870462901857, \"MacroF1\": 0.8865702773222168, \"Memory in Mb\": 73.60436820983887, \"Time in s\": 2032.787653 }, { \"step\": 11016, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8849750340444847, \"MicroF1\": 0.8849750340444847, \"MacroF1\": 0.8859866133359942, \"Memory in Mb\": 77.43610191345215, \"Time in s\": 2252.324419 }, { \"step\": 11424, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8823426420379935, \"MicroF1\": 0.8823426420379935, \"MacroF1\": 0.8811651142625456, \"Memory in Mb\": 81.9732666015625, \"Time in s\": 2486.561888 }, { \"step\": 11832, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8789620488547037, \"MicroF1\": 0.8789620488547037, \"MacroF1\": 0.8783725809837211, \"Memory in Mb\": 87.50129985809326, \"Time in s\": 2738.033946 }, { \"step\": 12240, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8803823841817142, \"MicroF1\": 0.8803823841817142, \"MacroF1\": 0.8815469015078649, \"Memory in Mb\": 88.71501064300537, \"Time in s\": 3002.626444 }, { \"step\": 12648, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.878231991776706, \"MicroF1\": 0.878231991776706, \"MacroF1\": 0.8774838611192476, \"Memory in Mb\": 93.73236656188963, \"Time in s\": 3282.43662 }, { \"step\": 13056, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8737648410570663, \"MicroF1\": 0.8737648410570663, \"MacroF1\": 0.8731746930338653, \"Memory in Mb\": 98.2464723587036, \"Time in s\": 3580.919978 }, { \"step\": 13464, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.872168164599272, \"MicroF1\": 0.872168164599272, \"MacroF1\": 0.8726091982990895, \"Memory in Mb\": 102.17141056060792, \"Time in s\": 3896.583194 }, { \"step\": 13872, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8693677456564054, \"MicroF1\": 0.8693677456564054, \"MacroF1\": 0.869586320653203, \"Memory in Mb\": 106.91088581085204, \"Time in s\": 4229.831886 }, { \"step\": 14280, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8653967364661391, \"MicroF1\": 0.8653967364661391, \"MacroF1\": 0.8650950015616714, \"Memory in Mb\": 111.61231708526611, \"Time in s\": 4581.249889 }, { \"step\": 14688, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8650507251310683, \"MicroF1\": 0.8650507251310683, \"MacroF1\": 0.8661026270223636, \"Memory in Mb\": 115.34651947021484, \"Time in s\": 4948.063453 }, { \"step\": 15096, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8667770785028155, \"MicroF1\": 0.8667770785028155, \"MacroF1\": 0.8680260482593213, \"Memory in Mb\": 118.24315452575684, \"Time in s\": 5329.445739 }, { \"step\": 15504, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8673805069986454, \"MicroF1\": 0.8673805069986454, \"MacroF1\": 0.8683176015232197, \"Memory in Mb\": 121.64087104797365, \"Time in s\": 5727.804435999999 }, { \"step\": 15912, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8630507196279303, \"MicroF1\": 0.8630507196279303, \"MacroF1\": 0.8630078198630903, \"Memory in Mb\": 126.92564392089844, \"Time in s\": 6152.632774 }, { \"step\": 16320, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8588761566272444, \"MicroF1\": 0.8588761566272444, \"MacroF1\": 0.8591617021179835, \"Memory in Mb\": 132.20891761779785, \"Time in s\": 6602.417036 }, { \"step\": 16728, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8546661086865547, \"MicroF1\": 0.8546661086865547, \"MacroF1\": 0.8551483908890608, \"Memory in Mb\": 137.6782627105713, \"Time in s\": 7072.757626 }, { \"step\": 17136, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.850539830755763, \"MicroF1\": 0.850539830755763, \"MacroF1\": 0.8506967692771638, \"Memory in Mb\": 139.40350437164307, \"Time in s\": 7574.584865 }, { \"step\": 17544, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8456934389785099, \"MicroF1\": 0.8456934389785099, \"MacroF1\": 0.845900693576748, \"Memory in Mb\": 144.32332038879397, \"Time in s\": 8103.947966 }, { \"step\": 17952, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8442983677789538, \"MicroF1\": 0.8442983677789538, \"MacroF1\": 0.8449597117669952, \"Memory in Mb\": 149.55280876159668, \"Time in s\": 8653.396068 }, { \"step\": 18360, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8462879241788769, \"MicroF1\": 0.8462879241788769, \"MacroF1\": 0.8472097835611015, \"Memory in Mb\": 154.20918083190918, \"Time in s\": 9220.958091 }, { \"step\": 18768, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8472851281504769, \"MicroF1\": 0.8472851281504769, \"MacroF1\": 0.8482490326871865, \"Memory in Mb\": 158.50969696044922, \"Time in s\": 9805.946569 }, { \"step\": 19176, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.845632333767927, \"MicroF1\": 0.845632333767927, \"MacroF1\": 0.8464906194356719, \"Memory in Mb\": 163.69990730285645, \"Time in s\": 10414.74471 }, { \"step\": 19584, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8470612265740693, \"MicroF1\": 0.8470612265740693, \"MacroF1\": 0.8481337525939465, \"Memory in Mb\": 167.61861419677734, \"Time in s\": 11041.283472 }, { \"step\": 19992, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8455805112300535, \"MicroF1\": 0.8455805112300535, \"MacroF1\": 0.8467102165017473, \"Memory in Mb\": 172.522611618042, \"Time in s\": 11691.217597 }, { \"step\": 20400, \"track\": \"Multiclass classification\", \"model\": \"AdaBoost\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8424922790332859, \"MicroF1\": 0.8424922790332859, \"MacroF1\": 0.8436347186891262, \"Memory in Mb\": 177.38492488861084, \"Time in s\": 12366.859336 }, { \"step\": 46, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.3111111111111111, \"MicroF1\": 0.3111111111111111, \"MacroF1\": 0.2457649726557289, \"Memory in Mb\": 4.181334495544434, \"Time in s\": 1.091398 }, { \"step\": 92, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.4835164835164835, \"MicroF1\": 0.4835164835164835, \"MacroF1\": 0.4934752395581889, \"Memory in Mb\": 4.184550285339356, \"Time in s\": 2.768304 }, { \"step\": 138, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.5328467153284672, \"MicroF1\": 0.5328467153284672, \"MacroF1\": 0.5528821792646677, \"Memory in Mb\": 4.184275627136231, \"Time in s\": 4.818241 }, { \"step\": 184, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.5956284153005464, \"MicroF1\": 0.5956284153005464, \"MacroF1\": 0.614143164890895, \"Memory in Mb\": 4.184859275817871, \"Time in s\": 7.25269 }, { \"step\": 230, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.62882096069869, \"MicroF1\": 0.62882096069869, \"MacroF1\": 0.6441389332893815, \"Memory in Mb\": 4.184233665466309, \"Time in s\": 10.061851 }, { \"step\": 276, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.64, \"MicroF1\": 0.64, \"MacroF1\": 0.6559607038460422, \"Memory in Mb\": 4.184771537780762, \"Time in s\": 13.254704 }, { \"step\": 322, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.6666666666666666, \"MicroF1\": 0.6666666666666666, \"MacroF1\": 0.6673617488913626, \"Memory in Mb\": 4.184481620788574, \"Time in s\": 16.809438 }, { \"step\": 368, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.6948228882833788, \"MicroF1\": 0.6948228882833788, \"MacroF1\": 0.6911959597548877, \"Memory in Mb\": 4.184699058532715, \"Time in s\": 20.719577 }, { \"step\": 414, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.711864406779661, \"MicroF1\": 0.711864406779661, \"MacroF1\": 0.7079630503641953, \"Memory in Mb\": 4.185038566589356, \"Time in s\": 25.02075 }, { \"step\": 460, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7124183006535948, \"MicroF1\": 0.7124183006535948, \"MacroF1\": 0.7065500352371009, \"Memory in Mb\": 4.184954643249512, \"Time in s\": 29.692393 }, { \"step\": 506, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7207920792079208, \"MicroF1\": 0.7207920792079208, \"MacroF1\": 0.7127593158348896, \"Memory in Mb\": 4.184813499450684, \"Time in s\": 34.691523 }, { \"step\": 552, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7259528130671506, \"MicroF1\": 0.7259528130671506, \"MacroF1\": 0.7192025503807162, \"Memory in Mb\": 4.184779167175293, \"Time in s\": 40.034435 }, { \"step\": 598, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7319932998324958, \"MicroF1\": 0.7319932998324957, \"MacroF1\": 0.7251188986558661, \"Memory in Mb\": 4.185019493103027, \"Time in s\": 45.725681 }, { \"step\": 644, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7309486780715396, \"MicroF1\": 0.7309486780715396, \"MacroF1\": 0.7259740406437202, \"Memory in Mb\": 4.184813499450684, \"Time in s\": 51.77256499999999 }, { \"step\": 690, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7358490566037735, \"MicroF1\": 0.7358490566037735, \"MacroF1\": 0.7304359912942561, \"Memory in Mb\": 4.184943199157715, \"Time in s\": 58.14505499999999 }, { \"step\": 736, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7374149659863946, \"MicroF1\": 0.7374149659863947, \"MacroF1\": 0.7331499347170709, \"Memory in Mb\": 4.185004234313965, \"Time in s\": 64.846683 }, { \"step\": 782, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7426376440460948, \"MicroF1\": 0.7426376440460948, \"MacroF1\": 0.7385597120510639, \"Memory in Mb\": 4.184893608093262, \"Time in s\": 71.895225 }, { \"step\": 828, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7436517533252721, \"MicroF1\": 0.7436517533252721, \"MacroF1\": 0.7412375783772317, \"Memory in Mb\": 4.184882164001465, \"Time in s\": 79.27328899999999 }, { \"step\": 874, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7491408934707904, \"MicroF1\": 0.7491408934707904, \"MacroF1\": 0.7454343548790068, \"Memory in Mb\": 4.185431480407715, \"Time in s\": 86.96748099999999 }, { \"step\": 920, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7486398258977149, \"MicroF1\": 0.7486398258977149, \"MacroF1\": 0.7441307384051415, \"Memory in Mb\": 4.185576438903809, \"Time in s\": 94.98591 }, { \"step\": 966, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7492227979274612, \"MicroF1\": 0.749222797927461, \"MacroF1\": 0.7439306216964365, \"Memory in Mb\": 4.185370445251465, \"Time in s\": 103.316786 }, { \"step\": 1012, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7487636003956478, \"MicroF1\": 0.7487636003956478, \"MacroF1\": 0.7437900284473965, \"Memory in Mb\": 4.185484886169434, \"Time in s\": 111.977749 }, { \"step\": 1058, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.750236518448439, \"MicroF1\": 0.7502365184484389, \"MacroF1\": 0.7448138061687654, \"Memory in Mb\": 4.185519218444824, \"Time in s\": 120.953125 }, { \"step\": 1104, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7524932003626473, \"MicroF1\": 0.7524932003626473, \"MacroF1\": 0.7468314646869902, \"Memory in Mb\": 4.185484886169434, \"Time in s\": 130.25113499999998 }, { \"step\": 1150, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7554395126196692, \"MicroF1\": 0.7554395126196692, \"MacroF1\": 0.7493227137357602, \"Memory in Mb\": 4.185664176940918, \"Time in s\": 139.83544499999996 }, { \"step\": 1196, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7581589958158996, \"MicroF1\": 0.7581589958158996, \"MacroF1\": 0.7527652773681007, \"Memory in Mb\": 4.185568809509277, \"Time in s\": 149.73636899999997 }, { \"step\": 1242, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7574536663980661, \"MicroF1\": 0.7574536663980661, \"MacroF1\": 0.7525915384194215, \"Memory in Mb\": 4.185683250427246, \"Time in s\": 159.94850499999995 }, { \"step\": 1288, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7622377622377622, \"MicroF1\": 0.7622377622377621, \"MacroF1\": 0.7563448085202399, \"Memory in Mb\": 4.185866355895996, \"Time in s\": 170.48539899999994 }, { \"step\": 1334, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7621905476369092, \"MicroF1\": 0.7621905476369092, \"MacroF1\": 0.7566636999776912, \"Memory in Mb\": 4.186026573181152, \"Time in s\": 181.34153899999995 }, { \"step\": 1380, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7635968092820885, \"MicroF1\": 0.7635968092820886, \"MacroF1\": 0.7587252257765656, \"Memory in Mb\": 4.1860761642456055, \"Time in s\": 192.51808799999995 }, { \"step\": 1426, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7663157894736842, \"MicroF1\": 0.7663157894736842, \"MacroF1\": 0.7609139797315135, \"Memory in Mb\": 4.186099052429199, \"Time in s\": 204.01178799999997 }, { \"step\": 1472, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7709041468388851, \"MicroF1\": 0.7709041468388851, \"MacroF1\": 0.763768994920769, \"Memory in Mb\": 4.186240196228027, \"Time in s\": 215.81223399999996 }, { \"step\": 1518, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7719182597231378, \"MicroF1\": 0.7719182597231378, \"MacroF1\": 0.7639714255563932, \"Memory in Mb\": 4.186617851257324, \"Time in s\": 227.92141499999997 }, { \"step\": 1564, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7722328854766475, \"MicroF1\": 0.7722328854766475, \"MacroF1\": 0.765072133508071, \"Memory in Mb\": 4.186800956726074, \"Time in s\": 240.33772499999995 }, { \"step\": 1610, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7725295214418894, \"MicroF1\": 0.7725295214418892, \"MacroF1\": 0.764505787280341, \"Memory in Mb\": 4.186892509460449, \"Time in s\": 253.082857 }, { \"step\": 1656, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7716012084592145, \"MicroF1\": 0.7716012084592145, \"MacroF1\": 0.7634170612719107, \"Memory in Mb\": 4.1867780685424805, \"Time in s\": 266.155857 }, { \"step\": 1702, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7713109935332157, \"MicroF1\": 0.7713109935332157, \"MacroF1\": 0.7652815676598499, \"Memory in Mb\": 4.187075614929199, \"Time in s\": 279.522325 }, { \"step\": 1748, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.77389811104751, \"MicroF1\": 0.77389811104751, \"MacroF1\": 0.7674409436090757, \"Memory in Mb\": 4.187258720397949, \"Time in s\": 293.187056 }, { \"step\": 1794, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7752370329057445, \"MicroF1\": 0.7752370329057446, \"MacroF1\": 0.7674318582149376, \"Memory in Mb\": 4.1872968673706055, \"Time in s\": 307.180551 }, { \"step\": 1840, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7765089722675367, \"MicroF1\": 0.7765089722675368, \"MacroF1\": 0.7688731808749575, \"Memory in Mb\": 4.187228202819824, \"Time in s\": 321.497423 }, { \"step\": 1886, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7750663129973475, \"MicroF1\": 0.7750663129973475, \"MacroF1\": 0.7678921362145585, \"Memory in Mb\": 4.187155723571777, \"Time in s\": 336.11504499999995 }, { \"step\": 1932, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7752459865354738, \"MicroF1\": 0.7752459865354739, \"MacroF1\": 0.7671636716269125, \"Memory in Mb\": 4.187251091003418, \"Time in s\": 351.053915 }, { \"step\": 1978, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7759231158320687, \"MicroF1\": 0.7759231158320687, \"MacroF1\": 0.7670573130332382, \"Memory in Mb\": 4.187151908874512, \"Time in s\": 366.316945 }, { \"step\": 2024, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7775580820563519, \"MicroF1\": 0.7775580820563519, \"MacroF1\": 0.7671264358471986, \"Memory in Mb\": 4.187129020690918, \"Time in s\": 381.896141 }, { \"step\": 2070, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.77670372160464, \"MicroF1\": 0.7767037216046399, \"MacroF1\": 0.7665050383810529, \"Memory in Mb\": 4.187205314636231, \"Time in s\": 397.783945 }, { \"step\": 2116, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7773049645390071, \"MicroF1\": 0.7773049645390071, \"MacroF1\": 0.7663404166149341, \"Memory in Mb\": 4.187205314636231, \"Time in s\": 413.995271 }, { \"step\": 2162, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7783433595557612, \"MicroF1\": 0.7783433595557612, \"MacroF1\": 0.7669657147488861, \"Memory in Mb\": 4.187277793884277, \"Time in s\": 430.524022 }, { \"step\": 2208, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.780244676030811, \"MicroF1\": 0.780244676030811, \"MacroF1\": 0.7678552364681829, \"Memory in Mb\": 4.187273979187012, \"Time in s\": 447.387318 }, { \"step\": 2254, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7776298268974701, \"MicroF1\": 0.7776298268974701, \"MacroF1\": 0.7652407320979201, \"Memory in Mb\": 4.187224388122559, \"Time in s\": 464.558569 }, { \"step\": 2300, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7768595041322314, \"MicroF1\": 0.7768595041322314, \"MacroF1\": 0.7644610611003249, \"Memory in Mb\": 4.18729305267334, \"Time in s\": 482.036228 }, { \"step\": 1056, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.6360189573459716, \"MicroF1\": 0.6360189573459716, \"MacroF1\": 0.5970323052762561, \"Memory in Mb\": 6.583989143371582, \"Time in s\": 11.628861 }, { \"step\": 2112, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.62482235907153, \"MicroF1\": 0.62482235907153, \"MacroF1\": 0.5890580890213498, \"Memory in Mb\": 6.584485054016113, \"Time in s\": 33.423105 }, { \"step\": 3168, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.6157246605620461, \"MicroF1\": 0.6157246605620461, \"MacroF1\": 0.5802533923244892, \"Memory in Mb\": 6.58519458770752, \"Time in s\": 65.495981 }, { \"step\": 4224, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.6107032914989344, \"MicroF1\": 0.6107032914989344, \"MacroF1\": 0.574850135712032, \"Memory in Mb\": 6.585576057434082, \"Time in s\": 107.816588 }, { \"step\": 5280, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.615078613373745, \"MicroF1\": 0.615078613373745, \"MacroF1\": 0.5779184071248228, \"Memory in Mb\": 6.5863847732543945, \"Time in s\": 160.258759 }, { \"step\": 6336, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.6091554853985793, \"MicroF1\": 0.6091554853985793, \"MacroF1\": 0.5734262289926554, \"Memory in Mb\": 6.586209297180176, \"Time in s\": 222.866435 }, { \"step\": 7392, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.6058720064943851, \"MicroF1\": 0.6058720064943851, \"MacroF1\": 0.5704339658550047, \"Memory in Mb\": 6.585629463195801, \"Time in s\": 295.76759400000003 }, { \"step\": 8448, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.6070794364863265, \"MicroF1\": 0.6070794364863265, \"MacroF1\": 0.5712261057542335, \"Memory in Mb\": 6.585507392883301, \"Time in s\": 378.90789100000006 }, { \"step\": 9504, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.6040197832263495, \"MicroF1\": 0.6040197832263495, \"MacroF1\": 0.567883906637128, \"Memory in Mb\": 6.585629463195801, \"Time in s\": 472.31115100000005 }, { \"step\": 10560, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.6035609432711431, \"MicroF1\": 0.6035609432711431, \"MacroF1\": 0.5674913890030829, \"Memory in Mb\": 6.5859880447387695, \"Time in s\": 575.935454 }, { \"step\": 11616, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.6006026689625484, \"MicroF1\": 0.6006026689625484, \"MacroF1\": 0.5651886352361905, \"Memory in Mb\": 6.585965156555176, \"Time in s\": 689.792083 }, { \"step\": 12672, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.6032673032909794, \"MicroF1\": 0.6032673032909794, \"MacroF1\": 0.5704386423232538, \"Memory in Mb\": 6.585919380187988, \"Time in s\": 813.953016 }, { \"step\": 13728, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.6147738034530488, \"MicroF1\": 0.6147738034530488, \"MacroF1\": 0.5955647708468143, \"Memory in Mb\": 6.584591865539551, \"Time in s\": 948.29318 }, { \"step\": 14784, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.6052222147060813, \"MicroF1\": 0.6052222147060813, \"MacroF1\": 0.586323857604342, \"Memory in Mb\": 6.583569526672363, \"Time in s\": 1092.793426 }, { \"step\": 15840, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.570427425973862, \"MicroF1\": 0.570427425973862, \"MacroF1\": 0.5530515395071289, \"Memory in Mb\": 6.584805488586426, \"Time in s\": 1247.37522 }, { \"step\": 16896, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.5441254809115122, \"MicroF1\": 0.5441254809115122, \"MacroF1\": 0.5274626123277456, \"Memory in Mb\": 6.5833024978637695, \"Time in s\": 1412.138663 }, { \"step\": 17952, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.5247061445044844, \"MicroF1\": 0.5247061445044844, \"MacroF1\": 0.5077849244821269, \"Memory in Mb\": 6.58421802520752, \"Time in s\": 1587.005582 }, { \"step\": 19008, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.5143368232756353, \"MicroF1\": 0.5143368232756353, \"MacroF1\": 0.4945891921842289, \"Memory in Mb\": 5.490016937255859, \"Time in s\": 1771.530278 }, { \"step\": 20064, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.5203110202860988, \"MicroF1\": 0.5203110202860988, \"MacroF1\": 0.4996705647403201, \"Memory in Mb\": 13.561949729919434, \"Time in s\": 1966.283082 }, { \"step\": 21120, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.5285288129172783, \"MicroF1\": 0.5285288129172783, \"MacroF1\": 0.5082662721949724, \"Memory in Mb\": 14.33274745941162, \"Time in s\": 2175.200087 }, { \"step\": 22176, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.5345208568207441, \"MicroF1\": 0.5345208568207441, \"MacroF1\": 0.5149076376433322, \"Memory in Mb\": 14.875703811645508, \"Time in s\": 2397.3423569999995 }, { \"step\": 23232, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.5431104989023288, \"MicroF1\": 0.5431104989023288, \"MacroF1\": 0.5234265380967914, \"Memory in Mb\": 14.787662506103516, \"Time in s\": 2632.304625 }, { \"step\": 24288, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.550253221888253, \"MicroF1\": 0.550253221888253, \"MacroF1\": 0.5298759738824472, \"Memory in Mb\": 16.32009983062744, \"Time in s\": 2880.082024 }, { \"step\": 25344, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.5564455668231859, \"MicroF1\": 0.5564455668231859, \"MacroF1\": 0.5355827199778521, \"Memory in Mb\": 16.310463905334473, \"Time in s\": 3141.997661 }, { \"step\": 26400, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.5614985416114247, \"MicroF1\": 0.5614985416114247, \"MacroF1\": 0.5398687013453174, \"Memory in Mb\": 16.303704261779785, \"Time in s\": 3417.905768 }, { \"step\": 27456, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.5647787288289929, \"MicroF1\": 0.5647787288289929, \"MacroF1\": 0.5421799635248432, \"Memory in Mb\": 15.336288452148438, \"Time in s\": 3708.118695 }, { \"step\": 28512, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.5680965241485743, \"MicroF1\": 0.5680965241485743, \"MacroF1\": 0.5473162851674372, \"Memory in Mb\": 13.81827449798584, \"Time in s\": 4011.685358 }, { \"step\": 29568, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.5701626813677411, \"MicroF1\": 0.5701626813677411, \"MacroF1\": 0.5529817475842932, \"Memory in Mb\": 11.429868698120115, \"Time in s\": 4328.316425 }, { \"step\": 30624, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.5724455474643242, \"MicroF1\": 0.5724455474643242, \"MacroF1\": 0.5586057023406553, \"Memory in Mb\": 9.4625883102417, \"Time in s\": 4656.70214 }, { \"step\": 31680, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.5750181508254679, \"MicroF1\": 0.5750181508254679, \"MacroF1\": 0.5636300266647484, \"Memory in Mb\": 9.46137523651123, \"Time in s\": 4996.240557 }, { \"step\": 32736, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.5782190316175347, \"MicroF1\": 0.5782190316175347, \"MacroF1\": 0.5684825891024486, \"Memory in Mb\": 9.4603910446167, \"Time in s\": 5346.796886 }, { \"step\": 33792, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.5756858335059631, \"MicroF1\": 0.5756858335059631, \"MacroF1\": 0.5663669622675245, \"Memory in Mb\": 7.919375419616699, \"Time in s\": 5710.001071 }, { \"step\": 34848, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.5754871294516027, \"MicroF1\": 0.5754871294516027, \"MacroF1\": 0.5660193869557423, \"Memory in Mb\": 7.260138511657715, \"Time in s\": 6084.365793 }, { \"step\": 35904, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.5764142272233518, \"MicroF1\": 0.5764142272233518, \"MacroF1\": 0.5665362650344427, \"Memory in Mb\": 6.60003662109375, \"Time in s\": 6469.590309 }, { \"step\": 36960, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.575908439081144, \"MicroF1\": 0.575908439081144, \"MacroF1\": 0.5657420280651625, \"Memory in Mb\": 6.597938537597656, \"Time in s\": 6865.284174 }, { \"step\": 38016, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.5767723267131396, \"MicroF1\": 0.5767723267131396, \"MacroF1\": 0.5661330182942309, \"Memory in Mb\": 6.597076416015625, \"Time in s\": 7271.517818 }, { \"step\": 39072, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.5764889560031737, \"MicroF1\": 0.5764889560031737, \"MacroF1\": 0.5659501482422926, \"Memory in Mb\": 6.593917846679688, \"Time in s\": 7688.178552 }, { \"step\": 40128, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.5734792035287961, \"MicroF1\": 0.5734792035287961, \"MacroF1\": 0.5636824355748769, \"Memory in Mb\": 8.576746940612793, \"Time in s\": 8115.281794 }, { \"step\": 41184, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.5726634776485443, \"MicroF1\": 0.5726634776485443, \"MacroF1\": 0.563417094879665, \"Memory in Mb\": 8.779536247253418, \"Time in s\": 8552.341759 }, { \"step\": 42240, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.5723383602831507, \"MicroF1\": 0.5723383602831507, \"MacroF1\": 0.5635995837049609, \"Memory in Mb\": 10.21227741241455, \"Time in s\": 8998.890652 }, { \"step\": 43296, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.5718212264695692, \"MicroF1\": 0.5718212264695692, \"MacroF1\": 0.5636175088230181, \"Memory in Mb\": 10.21111011505127, \"Time in s\": 9455.260723 }, { \"step\": 44352, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.571125791977633, \"MicroF1\": 0.571125791977633, \"MacroF1\": 0.5633830644644046, \"Memory in Mb\": 10.209759712219238, \"Time in s\": 9921.572486 }, { \"step\": 45408, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.5712555332878191, \"MicroF1\": 0.5712555332878191, \"MacroF1\": 0.5638292127585011, \"Memory in Mb\": 11.4169340133667, \"Time in s\": 10398.038321 }, { \"step\": 46464, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.5728429072595399, \"MicroF1\": 0.5728429072595399, \"MacroF1\": 0.565898409914518, \"Memory in Mb\": 12.449102401733398, \"Time in s\": 10884.980897 }, { \"step\": 47520, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.5768850354595004, \"MicroF1\": 0.5768850354595004, \"MacroF1\": 0.57039744062367, \"Memory in Mb\": 16.244935989379883, \"Time in s\": 11383.832017 }, { \"step\": 48576, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.5828718476582604, \"MicroF1\": 0.5828718476582604, \"MacroF1\": 0.5764217258661826, \"Memory in Mb\": 15.377230644226074, \"Time in s\": 11896.186788 }, { \"step\": 49632, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.5890471681005823, \"MicroF1\": 0.5890471681005823, \"MacroF1\": 0.5823842044431963, \"Memory in Mb\": 14.950640678405762, \"Time in s\": 12421.939031 }, { \"step\": 50688, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.594728431353207, \"MicroF1\": 0.594728431353207, \"MacroF1\": 0.5876258810149467, \"Memory in Mb\": 12.564711570739746, \"Time in s\": 12959.806662 }, { \"step\": 51744, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.6007382641130201, \"MicroF1\": 0.6007382641130201, \"MacroF1\": 0.5930524375976366, \"Memory in Mb\": 11.987659454345703, \"Time in s\": 13508.895544 }, { \"step\": 52800, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.606053144945927, \"MicroF1\": 0.606053144945927, \"MacroF1\": 0.5982224401760299, \"Memory in Mb\": 3.750063896179199, \"Time in s\": 14067.159334999998 }, { \"step\": 408, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9828009828009828, \"MicroF1\": 0.9828009828009828, \"MacroF1\": 0.6067632850241546, \"Memory in Mb\": 2.2869701385498047, \"Time in s\": 1.329477 }, { \"step\": 816, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9411042944785276, \"MicroF1\": 0.9411042944785276, \"MacroF1\": 0.7377235942917068, \"Memory in Mb\": 3.233952522277832, \"Time in s\": 4.406974 }, { \"step\": 1224, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8879803761242846, \"MicroF1\": 0.8879803761242846, \"MacroF1\": 0.873420796574987, \"Memory in Mb\": 4.178282737731934, \"Time in s\": 9.866805 }, { \"step\": 1632, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8988350705088902, \"MicroF1\": 0.8988350705088902, \"MacroF1\": 0.8792834531664682, \"Memory in Mb\": 5.13068962097168, \"Time in s\": 18.093903 }, { \"step\": 2040, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8950465914664051, \"MicroF1\": 0.8950465914664051, \"MacroF1\": 0.8828407845486113, \"Memory in Mb\": 6.191357612609863, \"Time in s\": 29.527087 }, { \"step\": 2448, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8561503882304863, \"MicroF1\": 0.8561503882304863, \"MacroF1\": 0.8521381720173345, \"Memory in Mb\": 7.13577938079834, \"Time in s\": 44.765436 }, { \"step\": 2856, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8623467600700525, \"MicroF1\": 0.8623467600700525, \"MacroF1\": 0.8461129037988256, \"Memory in Mb\": 8.082098007202148, \"Time in s\": 64.393699 }, { \"step\": 3264, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8528961078761875, \"MicroF1\": 0.8528961078761875, \"MacroF1\": 0.828204357625989, \"Memory in Mb\": 9.02734088897705, \"Time in s\": 89.05171 }, { \"step\": 3672, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8428221193135386, \"MicroF1\": 0.8428221193135386, \"MacroF1\": 0.8381978174360706, \"Memory in Mb\": 9.972960472106934, \"Time in s\": 119.28991 }, { \"step\": 4080, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8350085805344447, \"MicroF1\": 0.8350085805344447, \"MacroF1\": 0.8208915725311207, \"Memory in Mb\": 11.171079635620115, \"Time in s\": 155.799996 }, { \"step\": 4488, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.822821484287943, \"MicroF1\": 0.822821484287943, \"MacroF1\": 0.832874806475175, \"Memory in Mb\": 12.116948127746582, \"Time in s\": 198.957143 }, { \"step\": 4896, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8212461695607763, \"MicroF1\": 0.8212461695607763, \"MacroF1\": 0.8275900848879882, \"Memory in Mb\": 13.061814308166504, \"Time in s\": 249.493693 }, { \"step\": 5304, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8178389590797661, \"MicroF1\": 0.8178389590797661, \"MacroF1\": 0.8022229037941512, \"Memory in Mb\": 14.007365226745604, \"Time in s\": 307.913741 }, { \"step\": 5712, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7974085098931886, \"MicroF1\": 0.7974085098931886, \"MacroF1\": 0.8005324816804641, \"Memory in Mb\": 14.95396614074707, \"Time in s\": 374.813654 }, { \"step\": 6120, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7947377022389279, \"MicroF1\": 0.7947377022389279, \"MacroF1\": 0.7763699164747573, \"Memory in Mb\": 15.899590492248535, \"Time in s\": 450.788506 }, { \"step\": 6528, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7695725448138502, \"MicroF1\": 0.7695725448138502, \"MacroF1\": 0.7646092489325799, \"Memory in Mb\": 16.84642791748047, \"Time in s\": 536.475639 }, { \"step\": 6936, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7614996395097332, \"MicroF1\": 0.7614996395097332, \"MacroF1\": 0.7633186803137438, \"Memory in Mb\": 17.791536331176758, \"Time in s\": 632.653421 }, { \"step\": 7344, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.770393572109492, \"MicroF1\": 0.770393572109492, \"MacroF1\": 0.7679684376178252, \"Memory in Mb\": 18.753963470458984, \"Time in s\": 739.829913 }, { \"step\": 7752, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7709972906721714, \"MicroF1\": 0.7709972906721715, \"MacroF1\": 0.7694364393340193, \"Memory in Mb\": 19.70015239715576, \"Time in s\": 858.3068800000001 }, { \"step\": 8160, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7739919107733791, \"MicroF1\": 0.7739919107733791, \"MacroF1\": 0.7702264725589797, \"Memory in Mb\": 20.646059036254883, \"Time in s\": 989.0801780000002 }, { \"step\": 8568, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.770631492938018, \"MicroF1\": 0.770631492938018, \"MacroF1\": 0.7706502591714904, \"Memory in Mb\": 22.072673797607425, \"Time in s\": 1132.67323 }, { \"step\": 8976, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7691364902506964, \"MicroF1\": 0.7691364902506964, \"MacroF1\": 0.7697475673922982, \"Memory in Mb\": 23.017619132995605, \"Time in s\": 1289.930532 }, { \"step\": 9384, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7679846530960247, \"MicroF1\": 0.7679846530960248, \"MacroF1\": 0.7675735514139922, \"Memory in Mb\": 23.96499538421631, \"Time in s\": 1461.233929 }, { \"step\": 9792, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7634562353181493, \"MicroF1\": 0.7634562353181493, \"MacroF1\": 0.7626887405791724, \"Memory in Mb\": 24.91041660308838, \"Time in s\": 1647.051427 }, { \"step\": 10200, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7552701245220119, \"MicroF1\": 0.7552701245220118, \"MacroF1\": 0.7474447650479976, \"Memory in Mb\": 25.855456352233887, \"Time in s\": 1848.111209 }, { \"step\": 10608, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.734326388234185, \"MicroF1\": 0.734326388234185, \"MacroF1\": 0.7218544335091276, \"Memory in Mb\": 26.80277729034424, \"Time in s\": 2064.993893 }, { \"step\": 11016, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.727099409895597, \"MicroF1\": 0.727099409895597, \"MacroF1\": 0.7232704418570853, \"Memory in Mb\": 27.74752426147461, \"Time in s\": 2298.467217 }, { \"step\": 11424, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7203011468090694, \"MicroF1\": 0.7203011468090693, \"MacroF1\": 0.7069709690618045, \"Memory in Mb\": 28.693537712097168, \"Time in s\": 2549.106467 }, { \"step\": 11832, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7107598681430141, \"MicroF1\": 0.7107598681430141, \"MacroF1\": 0.7032019097144009, \"Memory in Mb\": 29.638681411743164, \"Time in s\": 2817.5197820000003 }, { \"step\": 12240, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7152545142577008, \"MicroF1\": 0.7152545142577007, \"MacroF1\": 0.7117335483783439, \"Memory in Mb\": 30.584209442138672, \"Time in s\": 3104.4910310000005 }, { \"step\": 12648, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7121056377006405, \"MicroF1\": 0.7121056377006404, \"MacroF1\": 0.7043178518121461, \"Memory in Mb\": 31.53076934814453, \"Time in s\": 3410.5360360000004 }, { \"step\": 13056, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7005744925315971, \"MicroF1\": 0.7005744925315971, \"MacroF1\": 0.6932522175542292, \"Memory in Mb\": 32.476640701293945, \"Time in s\": 3735.542008 }, { \"step\": 13464, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.6985070192379114, \"MicroF1\": 0.6985070192379114, \"MacroF1\": 0.6945196760058037, \"Memory in Mb\": 33.421990394592285, \"Time in s\": 4080.277425 }, { \"step\": 13872, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.6980751207555331, \"MicroF1\": 0.6980751207555331, \"MacroF1\": 0.6949558493849793, \"Memory in Mb\": 34.36870098114014, \"Time in s\": 4445.365121 }, { \"step\": 14280, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.6936760277330345, \"MicroF1\": 0.6936760277330345, \"MacroF1\": 0.6891645690411646, \"Memory in Mb\": 35.313669204711914, \"Time in s\": 4831.313826 }, { \"step\": 14688, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.6963300878327773, \"MicroF1\": 0.6963300878327773, \"MacroF1\": 0.6946500105809528, \"Memory in Mb\": 36.259453773498535, \"Time in s\": 5238.529619 }, { \"step\": 15096, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7024180192116595, \"MicroF1\": 0.7024180192116595, \"MacroF1\": 0.7008836593188431, \"Memory in Mb\": 37.20665740966797, \"Time in s\": 5667.803474 }, { \"step\": 15504, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.702509191769335, \"MicroF1\": 0.702509191769335, \"MacroF1\": 0.6995855030221436, \"Memory in Mb\": 38.15153884887695, \"Time in s\": 6119.5687720000005 }, { \"step\": 15912, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.6934824963861479, \"MicroF1\": 0.6934824963861479, \"MacroF1\": 0.687175788748239, \"Memory in Mb\": 39.09754180908203, \"Time in s\": 6594.573754000001 }, { \"step\": 16320, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.6848458851645322, \"MicroF1\": 0.6848458851645322, \"MacroF1\": 0.6802460349069701, \"Memory in Mb\": 40.04375648498535, \"Time in s\": 7093.395211000001 }, { \"step\": 16728, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.6819513361630896, \"MicroF1\": 0.6819513361630896, \"MacroF1\": 0.6795788912922722, \"Memory in Mb\": 40.98903942108154, \"Time in s\": 7616.277784000001 }, { \"step\": 17136, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.6779107090749927, \"MicroF1\": 0.6779107090749927, \"MacroF1\": 0.6747648209169417, \"Memory in Mb\": 42.91780757904053, \"Time in s\": 8164.158576000001 }, { \"step\": 17544, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.6705808584620646, \"MicroF1\": 0.6705808584620646, \"MacroF1\": 0.6680341530684186, \"Memory in Mb\": 43.864423751831055, \"Time in s\": 8737.125467000002 }, { \"step\": 17952, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.6695448721519692, \"MicroF1\": 0.6695448721519692, \"MacroF1\": 0.6687363294804706, \"Memory in Mb\": 44.8110933303833, \"Time in s\": 9335.759485000002 }, { \"step\": 18360, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.674927828313089, \"MicroF1\": 0.674927828313089, \"MacroF1\": 0.6747300618557481, \"Memory in Mb\": 45.75679397583008, \"Time in s\": 9961.084735000002 }, { \"step\": 18768, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.6799701603879149, \"MicroF1\": 0.6799701603879149, \"MacroF1\": 0.6801519832282531, \"Memory in Mb\": 46.703369140625, \"Time in s\": 10614.122071000002 }, { \"step\": 19176, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.6720730117340287, \"MicroF1\": 0.6720730117340287, \"MacroF1\": 0.6711666831354974, \"Memory in Mb\": 47.648451805114746, \"Time in s\": 11294.958186000002 }, { \"step\": 19584, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.6760455497114845, \"MicroF1\": 0.6760455497114845, \"MacroF1\": 0.6762772840246767, \"Memory in Mb\": 48.59414768218994, \"Time in s\": 12004.137364000002 }, { \"step\": 19992, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.6715521984893202, \"MicroF1\": 0.6715521984893202, \"MacroF1\": 0.6718362805013157, \"Memory in Mb\": 49.5405502319336, \"Time in s\": 12741.947206000004 }, { \"step\": 20400, \"track\": \"Multiclass classification\", \"model\": \"Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.6679739202902103, \"MicroF1\": 0.6679739202902103, \"MacroF1\": 0.6688529665037395, \"Memory in Mb\": 50.48721218109131, \"Time in s\": 13509.112779000005 }, { \"step\": 46, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.3777777777777777, \"MicroF1\": 0.3777777777777777, \"MacroF1\": 0.2811210847975554, \"Memory in Mb\": 4.12965202331543, \"Time in s\": 2.162623 }, { \"step\": 92, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.5164835164835165, \"MicroF1\": 0.5164835164835165, \"MacroF1\": 0.5316649744849407, \"Memory in Mb\": 4.130231857299805, \"Time in s\": 5.385739 }, { \"step\": 138, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.5547445255474452, \"MicroF1\": 0.5547445255474452, \"MacroF1\": 0.5804654781117262, \"Memory in Mb\": 4.130353927612305, \"Time in s\": 9.498091 }, { \"step\": 184, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.6174863387978142, \"MicroF1\": 0.6174863387978142, \"MacroF1\": 0.6394923756219437, \"Memory in Mb\": 4.130964279174805, \"Time in s\": 14.44781 }, { \"step\": 230, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.6506550218340611, \"MicroF1\": 0.6506550218340611, \"MacroF1\": 0.66859135700569, \"Memory in Mb\": 4.130964279174805, \"Time in s\": 20.213143 }, { \"step\": 276, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.6618181818181819, \"MicroF1\": 0.6618181818181819, \"MacroF1\": 0.6795855359270878, \"Memory in Mb\": 4.131082534790039, \"Time in s\": 26.807818 }, { \"step\": 322, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.6853582554517134, \"MicroF1\": 0.6853582554517134, \"MacroF1\": 0.6872635633687633, \"Memory in Mb\": 4.131624221801758, \"Time in s\": 34.210100999999995 }, { \"step\": 368, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7111716621253406, \"MicroF1\": 0.7111716621253404, \"MacroF1\": 0.7098417316927395, \"Memory in Mb\": 4.131597518920898, \"Time in s\": 42.42663699999999 }, { \"step\": 414, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7215496368038741, \"MicroF1\": 0.7215496368038742, \"MacroF1\": 0.7201557312728714, \"Memory in Mb\": 4.13151741027832, \"Time in s\": 51.47481599999999 }, { \"step\": 460, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7211328976034859, \"MicroF1\": 0.721132897603486, \"MacroF1\": 0.7175330036146421, \"Memory in Mb\": 4.131570816040039, \"Time in s\": 61.34488599999999 }, { \"step\": 506, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7287128712871287, \"MicroF1\": 0.7287128712871287, \"MacroF1\": 0.7233455022590812, \"Memory in Mb\": 4.131570816040039, \"Time in s\": 72.04824799999999 }, { \"step\": 552, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7295825771324864, \"MicroF1\": 0.7295825771324864, \"MacroF1\": 0.7255599965917697, \"Memory in Mb\": 4.131490707397461, \"Time in s\": 83.59783199999998 }, { \"step\": 598, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7353433835845896, \"MicroF1\": 0.7353433835845896, \"MacroF1\": 0.7308494254186014, \"Memory in Mb\": 4.131513595581055, \"Time in s\": 95.96874199999998 }, { \"step\": 644, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7340590979782271, \"MicroF1\": 0.7340590979782271, \"MacroF1\": 0.7314183982762247, \"Memory in Mb\": 4.132074356079102, \"Time in s\": 109.15469699999996 }, { \"step\": 690, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.737300435413643, \"MicroF1\": 0.737300435413643, \"MacroF1\": 0.7343909641298695, \"Memory in Mb\": 4.132074356079102, \"Time in s\": 123.16042199999995 }, { \"step\": 736, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7387755102040816, \"MicroF1\": 0.7387755102040816, \"MacroF1\": 0.7369557659594496, \"Memory in Mb\": 4.132101058959961, \"Time in s\": 138.00293199999996 }, { \"step\": 782, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7439180537772087, \"MicroF1\": 0.7439180537772088, \"MacroF1\": 0.7419020281650245, \"Memory in Mb\": 4.132101058959961, \"Time in s\": 153.67249999999996 }, { \"step\": 828, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7436517533252721, \"MicroF1\": 0.7436517533252721, \"MacroF1\": 0.7432199627682998, \"Memory in Mb\": 4.132101058959961, \"Time in s\": 170.15907299999995 }, { \"step\": 874, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7502863688430699, \"MicroF1\": 0.7502863688430699, \"MacroF1\": 0.7482089866208982, \"Memory in Mb\": 4.132101058959961, \"Time in s\": 187.48903599999997 }, { \"step\": 920, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.750816104461371, \"MicroF1\": 0.750816104461371, \"MacroF1\": 0.7477650187313973, \"Memory in Mb\": 4.132074356079102, \"Time in s\": 205.64141899999996 }, { \"step\": 966, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7512953367875648, \"MicroF1\": 0.7512953367875648, \"MacroF1\": 0.747322646811651, \"Memory in Mb\": 4.132074356079102, \"Time in s\": 224.60247399999992 }, { \"step\": 1012, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7507418397626113, \"MicroF1\": 0.7507418397626113, \"MacroF1\": 0.7469783619055548, \"Memory in Mb\": 4.132074356079102, \"Time in s\": 244.38522099999992 }, { \"step\": 1058, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7530747398297067, \"MicroF1\": 0.7530747398297066, \"MacroF1\": 0.7482363934596314, \"Memory in Mb\": 4.132074356079102, \"Time in s\": 264.9944789999999 }, { \"step\": 1104, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7552130553037172, \"MicroF1\": 0.7552130553037172, \"MacroF1\": 0.750118495060715, \"Memory in Mb\": 4.132123947143555, \"Time in s\": 286.4318919999999 }, { \"step\": 1150, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7571801566579635, \"MicroF1\": 0.7571801566579635, \"MacroF1\": 0.7516199800653578, \"Memory in Mb\": 4.132123947143555, \"Time in s\": 308.6933539999999 }, { \"step\": 1196, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7598326359832636, \"MicroF1\": 0.7598326359832636, \"MacroF1\": 0.7548841797367704, \"Memory in Mb\": 4.132123947143555, \"Time in s\": 331.8138849999999 }, { \"step\": 1242, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7598710717163578, \"MicroF1\": 0.7598710717163577, \"MacroF1\": 0.7553301531902636, \"Memory in Mb\": 4.132123947143555, \"Time in s\": 355.7543559999999 }, { \"step\": 1288, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7645687645687645, \"MicroF1\": 0.7645687645687647, \"MacroF1\": 0.7590078532621816, \"Memory in Mb\": 4.132734298706055, \"Time in s\": 380.5041089999999 }, { \"step\": 1334, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7644411102775694, \"MicroF1\": 0.7644411102775694, \"MacroF1\": 0.7591993978414527, \"Memory in Mb\": 4.132757186889648, \"Time in s\": 406.0948849999999 }, { \"step\": 1380, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7650471356055112, \"MicroF1\": 0.7650471356055112, \"MacroF1\": 0.7601575050520946, \"Memory in Mb\": 4.132757186889648, \"Time in s\": 432.504415 }, { \"step\": 1426, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7670175438596492, \"MicroF1\": 0.7670175438596492, \"MacroF1\": 0.7613339877221927, \"Memory in Mb\": 4.132757186889648, \"Time in s\": 459.75425999999993 }, { \"step\": 1472, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7715839564921821, \"MicroF1\": 0.7715839564921821, \"MacroF1\": 0.76413964752182, \"Memory in Mb\": 4.132802963256836, \"Time in s\": 487.813313 }, { \"step\": 1518, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7732366512854317, \"MicroF1\": 0.7732366512854317, \"MacroF1\": 0.7648275341801108, \"Memory in Mb\": 4.132802963256836, \"Time in s\": 516.6922259999999 }, { \"step\": 1564, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7735124760076776, \"MicroF1\": 0.7735124760076776, \"MacroF1\": 0.7657569341108763, \"Memory in Mb\": 4.132802963256836, \"Time in s\": 546.3975239999999 }, { \"step\": 1610, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7737725295214419, \"MicroF1\": 0.7737725295214419, \"MacroF1\": 0.7651494083475014, \"Memory in Mb\": 4.13282585144043, \"Time in s\": 576.9286479999998 }, { \"step\": 1656, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7740181268882175, \"MicroF1\": 0.7740181268882175, \"MacroF1\": 0.7654813489818475, \"Memory in Mb\": 4.132780075073242, \"Time in s\": 608.2971799999998 }, { \"step\": 1702, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7730746619635509, \"MicroF1\": 0.7730746619635509, \"MacroF1\": 0.7664930279619061, \"Memory in Mb\": 4.132780075073242, \"Time in s\": 640.4789209999998 }, { \"step\": 1748, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7756153405838581, \"MicroF1\": 0.7756153405838581, \"MacroF1\": 0.7686072256536652, \"Memory in Mb\": 4.132780075073242, \"Time in s\": 673.4948829999998 }, { \"step\": 1794, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7769102063580591, \"MicroF1\": 0.7769102063580591, \"MacroF1\": 0.7685414235990153, \"Memory in Mb\": 4.132753372192383, \"Time in s\": 707.3642379999999 }, { \"step\": 1840, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7781402936378466, \"MicroF1\": 0.7781402936378466, \"MacroF1\": 0.7699957723931324, \"Memory in Mb\": 4.132753372192383, \"Time in s\": 742.0792459999999 }, { \"step\": 1886, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7761273209549071, \"MicroF1\": 0.7761273209549071, \"MacroF1\": 0.7684985598909853, \"Memory in Mb\": 4.132753372192383, \"Time in s\": 777.6304799999999 }, { \"step\": 1932, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7762817193164163, \"MicroF1\": 0.7762817193164163, \"MacroF1\": 0.767743441804642, \"Memory in Mb\": 4.132753372192383, \"Time in s\": 814.0157899999999 }, { \"step\": 1978, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7774405665149215, \"MicroF1\": 0.7774405665149215, \"MacroF1\": 0.7684788817649146, \"Memory in Mb\": 4.132753372192383, \"Time in s\": 851.2372119999999 }, { \"step\": 2024, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7790410281759763, \"MicroF1\": 0.7790410281759763, \"MacroF1\": 0.7689103339153599, \"Memory in Mb\": 4.132753372192383, \"Time in s\": 889.2963629999999 }, { \"step\": 2070, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7786370227162881, \"MicroF1\": 0.7786370227162881, \"MacroF1\": 0.7686288077529282, \"Memory in Mb\": 4.132753372192383, \"Time in s\": 928.20257 }, { \"step\": 2116, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7791962174940898, \"MicroF1\": 0.7791962174940898, \"MacroF1\": 0.768391950800897, \"Memory in Mb\": 4.132753372192383, \"Time in s\": 967.945094 }, { \"step\": 2162, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7801943544655252, \"MicroF1\": 0.7801943544655253, \"MacroF1\": 0.768962628827985, \"Memory in Mb\": 4.132776260375977, \"Time in s\": 1008.503462 }, { \"step\": 2208, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7820570910738559, \"MicroF1\": 0.7820570910738559, \"MacroF1\": 0.7698068761587117, \"Memory in Mb\": 4.132749557495117, \"Time in s\": 1049.8865549999998 }, { \"step\": 2254, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7789613848202397, \"MicroF1\": 0.7789613848202397, \"MacroF1\": 0.7667173742344939, \"Memory in Mb\": 4.132749557495117, \"Time in s\": 1092.1021469999998 }, { \"step\": 2300, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7781644193127447, \"MicroF1\": 0.7781644193127447, \"MacroF1\": 0.7659138381656089, \"Memory in Mb\": 4.132749557495117, \"Time in s\": 1135.1554539999995 }, { \"step\": 1056, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.6218009478672986, \"MicroF1\": 0.6218009478672986, \"MacroF1\": 0.5857016652718547, \"Memory in Mb\": 6.522056579589844, \"Time in s\": 30.289418 }, { \"step\": 2112, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.6196115585030791, \"MicroF1\": 0.6196115585030791, \"MacroF1\": 0.5856756432415232, \"Memory in Mb\": 10.389650344848633, \"Time in s\": 88.669185 }, { \"step\": 3168, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.628986422481844, \"MicroF1\": 0.628986422481844, \"MacroF1\": 0.5949930595607558, \"Memory in Mb\": 19.16711711883545, \"Time in s\": 174.550284 }, { \"step\": 4224, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.6294103717736207, \"MicroF1\": 0.6294103717736207, \"MacroF1\": 0.5952675443708706, \"Memory in Mb\": 19.668034553527832, \"Time in s\": 287.918965 }, { \"step\": 5280, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.6364841826103429, \"MicroF1\": 0.6364841826103429, \"MacroF1\": 0.5994911272790604, \"Memory in Mb\": 18.96163558959961, \"Time in s\": 428.854975 }, { \"step\": 6336, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.6352012628255722, \"MicroF1\": 0.6352012628255722, \"MacroF1\": 0.5993891820807257, \"Memory in Mb\": 20.14603328704834, \"Time in s\": 597.190787 }, { \"step\": 7392, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.638749830875389, \"MicroF1\": 0.638749830875389, \"MacroF1\": 0.6030343276880051, \"Memory in Mb\": 21.10132884979248, \"Time in s\": 793.049033 }, { \"step\": 8448, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.6405824553095774, \"MicroF1\": 0.6405824553095774, \"MacroF1\": 0.6028521616895871, \"Memory in Mb\": 24.15276908874512, \"Time in s\": 1016.521492 }, { \"step\": 9504, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.6449542249815847, \"MicroF1\": 0.6449542249815847, \"MacroF1\": 0.6055705492028415, \"Memory in Mb\": 24.86981773376465, \"Time in s\": 1266.726445 }, { \"step\": 10560, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.6485462638507434, \"MicroF1\": 0.6485462638507434, \"MacroF1\": 0.6081614166360886, \"Memory in Mb\": 28.971991539001465, \"Time in s\": 1544.137884 }, { \"step\": 11616, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.6490744726646578, \"MicroF1\": 0.6490744726646578, \"MacroF1\": 0.6078786452761632, \"Memory in Mb\": 31.018654823303223, \"Time in s\": 1848.95866 }, { \"step\": 12672, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.6514876489621971, \"MicroF1\": 0.6514876489621971, \"MacroF1\": 0.6111938480023121, \"Memory in Mb\": 35.39500713348389, \"Time in s\": 2179.477442 }, { \"step\": 13728, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.6707947840023312, \"MicroF1\": 0.6707947840023312, \"MacroF1\": 0.6607574394823456, \"Memory in Mb\": 17.66313648223877, \"Time in s\": 2527.754593 }, { \"step\": 14784, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.6814584319826829, \"MicroF1\": 0.6814584319826829, \"MacroF1\": 0.6724584381879511, \"Memory in Mb\": 11.128533363342283, \"Time in s\": 2896.354015 }, { \"step\": 15840, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.6762421870067554, \"MicroF1\": 0.6762421870067554, \"MacroF1\": 0.6688785181435096, \"Memory in Mb\": 14.811795234680176, \"Time in s\": 3290.113809 }, { \"step\": 16896, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.6741639538324948, \"MicroF1\": 0.6741639538324948, \"MacroF1\": 0.6676833597101233, \"Memory in Mb\": 15.36542510986328, \"Time in s\": 3710.779607 }, { \"step\": 17952, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.670491894601972, \"MicroF1\": 0.670491894601972, \"MacroF1\": 0.6643621029883554, \"Memory in Mb\": 15.98740005493164, \"Time in s\": 4158.727334 }, { \"step\": 19008, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.6754353659178197, \"MicroF1\": 0.6754353659178197, \"MacroF1\": 0.6656526175716114, \"Memory in Mb\": 17.100504875183105, \"Time in s\": 4627.925426 }, { \"step\": 20064, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.6800079748791308, \"MicroF1\": 0.6800079748791308, \"MacroF1\": 0.6670489534490986, \"Memory in Mb\": 26.370519638061523, \"Time in s\": 5118.761149 }, { \"step\": 21120, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.6835550925706709, \"MicroF1\": 0.6835550925706709, \"MacroF1\": 0.6685883462655132, \"Memory in Mb\": 32.78877353668213, \"Time in s\": 5635.605831 }, { \"step\": 22176, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.6869447576099211, \"MicroF1\": 0.6869447576099211, \"MacroF1\": 0.6701495347804184, \"Memory in Mb\": 36.29740715026856, \"Time in s\": 6178.169973 }, { \"step\": 23232, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.6912745899875167, \"MicroF1\": 0.6912745899875167, \"MacroF1\": 0.6726358783249661, \"Memory in Mb\": 38.26123523712158, \"Time in s\": 6746.46947 }, { \"step\": 24288, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.6940338452670153, \"MicroF1\": 0.6940338452670153, \"MacroF1\": 0.673442702110033, \"Memory in Mb\": 39.100372314453125, \"Time in s\": 7340.085466 }, { \"step\": 25344, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.6976679951071302, \"MicroF1\": 0.6976679951071302, \"MacroF1\": 0.67525701759611, \"Memory in Mb\": 42.24958515167236, \"Time in s\": 7958.602362 }, { \"step\": 26400, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.7000643963786507, \"MicroF1\": 0.7000643963786507, \"MacroF1\": 0.6759116206749555, \"Memory in Mb\": 41.52747917175293, \"Time in s\": 8602.02664 }, { \"step\": 27456, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.7027135312329266, \"MicroF1\": 0.7027135312329266, \"MacroF1\": 0.6765494742782628, \"Memory in Mb\": 43.56198120117188, \"Time in s\": 9269.998307 }, { \"step\": 28512, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.7018343797130931, \"MicroF1\": 0.7018343797130931, \"MacroF1\": 0.6771545550561098, \"Memory in Mb\": 24.23386573791504, \"Time in s\": 9962.817287 }, { \"step\": 29568, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.7013224202658369, \"MicroF1\": 0.7013224202658369, \"MacroF1\": 0.681362451564682, \"Memory in Mb\": 5.156903266906738, \"Time in s\": 10676.641313 }, { \"step\": 30624, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.699702837736342, \"MicroF1\": 0.699702837736342, \"MacroF1\": 0.6839521261644582, \"Memory in Mb\": 8.359548568725586, \"Time in s\": 11409.958608 }, { \"step\": 31680, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.6993907635973358, \"MicroF1\": 0.6993907635973358, \"MacroF1\": 0.6874853197903658, \"Memory in Mb\": 12.837088584899902, \"Time in s\": 12162.299998 }, { \"step\": 32736, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.7005651443409195, \"MicroF1\": 0.7005651443409195, \"MacroF1\": 0.692127614099415, \"Memory in Mb\": 14.392640113830566, \"Time in s\": 12933.190651 }, { \"step\": 33792, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.6971678849397769, \"MicroF1\": 0.6971678849397769, \"MacroF1\": 0.6903104823999882, \"Memory in Mb\": 22.11440753936768, \"Time in s\": 13726.605059 }, { \"step\": 34848, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.6941487072057853, \"MicroF1\": 0.6941487072057853, \"MacroF1\": 0.6871648754350796, \"Memory in Mb\": 16.369569778442383, \"Time in s\": 14546.496494 }, { \"step\": 35904, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.6917527783193606, \"MicroF1\": 0.6917527783193606, \"MacroF1\": 0.684473708604621, \"Memory in Mb\": 15.783265113830566, \"Time in s\": 15393.169285 }, { \"step\": 36960, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.6883303119673151, \"MicroF1\": 0.6883303119673151, \"MacroF1\": 0.6807777972894504, \"Memory in Mb\": 18.195876121521, \"Time in s\": 16266.308937 }, { \"step\": 38016, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.6865973957648297, \"MicroF1\": 0.6865973957648297, \"MacroF1\": 0.6786744939637405, \"Memory in Mb\": 21.092598915100098, \"Time in s\": 17165.821217 }, { \"step\": 39072, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.6857259860254409, \"MicroF1\": 0.6857259860254409, \"MacroF1\": 0.6778492437957201, \"Memory in Mb\": 16.29904079437256, \"Time in s\": 18090.697656 }, { \"step\": 40128, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.6837540807934807, \"MicroF1\": 0.6837540807934807, \"MacroF1\": 0.6766238977666043, \"Memory in Mb\": 13.538718223571776, \"Time in s\": 19041.492123 }, { \"step\": 41184, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.6814462278124469, \"MicroF1\": 0.6814462278124469, \"MacroF1\": 0.675074837604149, \"Memory in Mb\": 15.844508171081545, \"Time in s\": 20015.794843 }, { \"step\": 42240, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.6790643717891048, \"MicroF1\": 0.6790643717891048, \"MacroF1\": 0.6733686277261395, \"Memory in Mb\": 15.962260246276855, \"Time in s\": 21014.495285 }, { \"step\": 43296, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.6762443700196328, \"MicroF1\": 0.6762443700196328, \"MacroF1\": 0.6713719096586489, \"Memory in Mb\": 17.128825187683105, \"Time in s\": 22038.291411 }, { \"step\": 44352, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.6738066785416338, \"MicroF1\": 0.6738066785416338, \"MacroF1\": 0.6696205967919768, \"Memory in Mb\": 16.462289810180664, \"Time in s\": 23087.153485000003 }, { \"step\": 45408, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.6717686700288502, \"MicroF1\": 0.6717686700288502, \"MacroF1\": 0.6680705737277651, \"Memory in Mb\": 17.22057342529297, \"Time in s\": 24160.866409 }, { \"step\": 46464, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.6708994253492025, \"MicroF1\": 0.6708994253492025, \"MacroF1\": 0.6677330044499646, \"Memory in Mb\": 17.752578735351562, \"Time in s\": 25258.281990000003 }, { \"step\": 47520, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.6729939603106126, \"MicroF1\": 0.6729939603106126, \"MacroF1\": 0.6699611714455135, \"Memory in Mb\": 18.93515110015869, \"Time in s\": 26380.044308000004 }, { \"step\": 48576, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.6767061245496655, \"MicroF1\": 0.6767061245496655, \"MacroF1\": 0.6733691077464542, \"Memory in Mb\": 20.549713134765625, \"Time in s\": 27525.907792000005 }, { \"step\": 49632, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.6807237412101308, \"MicroF1\": 0.6807237412101308, \"MacroF1\": 0.6769109137483648, \"Memory in Mb\": 20.974443435668945, \"Time in s\": 28695.60352400001 }, { \"step\": 50688, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.6845147671000453, \"MicroF1\": 0.6845147671000453, \"MacroF1\": 0.6800104952374638, \"Memory in Mb\": 22.97932243347168, \"Time in s\": 29888.26411600001 }, { \"step\": 51744, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.6885182536768258, \"MicroF1\": 0.6885182536768258, \"MacroF1\": 0.6832561756017089, \"Memory in Mb\": 24.11430263519287, \"Time in s\": 31103.175398000007 }, { \"step\": 52800, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Insects\", \"Accuracy\": 0.6915471883937195, \"MicroF1\": 0.6915471883937195, \"MacroF1\": 0.6864107325641782, \"Memory in Mb\": 18.141328811645508, \"Time in s\": 32334.061725000007 }, { \"step\": 408, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9828009828009828, \"MicroF1\": 0.9828009828009828, \"MacroF1\": 0.6067632850241546, \"Memory in Mb\": 2.238800048828125, \"Time in s\": 2.971982 }, { \"step\": 816, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9460122699386504, \"MicroF1\": 0.9460122699386504, \"MacroF1\": 0.8367492469040564, \"Memory in Mb\": 4.44326114654541, \"Time in s\": 9.847778 }, { \"step\": 1224, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9411283728536386, \"MicroF1\": 0.9411283728536386, \"MacroF1\": 0.9276213812296338, \"Memory in Mb\": 6.153376579284668, \"Time in s\": 20.932147 }, { \"step\": 1632, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.950337216431637, \"MicroF1\": 0.950337216431637, \"MacroF1\": 0.9330502878949444, \"Memory in Mb\": 7.8991851806640625, \"Time in s\": 36.717851 }, { \"step\": 2040, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9494850416871016, \"MicroF1\": 0.9494850416871016, \"MacroF1\": 0.932928877406915, \"Memory in Mb\": 10.965654373168944, \"Time in s\": 57.922702 }, { \"step\": 2448, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9525950143032283, \"MicroF1\": 0.9525950143032283, \"MacroF1\": 0.9502305130509756, \"Memory in Mb\": 10.694184303283691, \"Time in s\": 83.450682 }, { \"step\": 2856, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9544658493870404, \"MicroF1\": 0.9544658493870404, \"MacroF1\": 0.943855127765724, \"Memory in Mb\": 15.53213119506836, \"Time in s\": 113.412194 }, { \"step\": 3264, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9515783021759118, \"MicroF1\": 0.9515783021759118, \"MacroF1\": 0.944582727256988, \"Memory in Mb\": 15.652314186096191, \"Time in s\": 149.290909 }, { \"step\": 3672, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9526014709888314, \"MicroF1\": 0.9526014709888314, \"MacroF1\": 0.9497542235388344, \"Memory in Mb\": 16.11695098876953, \"Time in s\": 190.984607 }, { \"step\": 4080, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9499877420936504, \"MicroF1\": 0.9499877420936504, \"MacroF1\": 0.9391633661003512, \"Memory in Mb\": 16.578293800354004, \"Time in s\": 238.599211 }, { \"step\": 4488, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9474036104301314, \"MicroF1\": 0.9474036104301314, \"MacroF1\": 0.9496969875723204, \"Memory in Mb\": 13.190230369567873, \"Time in s\": 292.48392399999994 }, { \"step\": 4896, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9493360572012256, \"MicroF1\": 0.9493360572012256, \"MacroF1\": 0.9494027577495958, \"Memory in Mb\": 12.864276885986328, \"Time in s\": 353.18946299999993 }, { \"step\": 5304, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.951159720912691, \"MicroF1\": 0.951159720912691, \"MacroF1\": 0.9518992835106976, \"Memory in Mb\": 11.604743957519531, \"Time in s\": 419.932067 }, { \"step\": 5712, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.951146909472947, \"MicroF1\": 0.951146909472947, \"MacroF1\": 0.9505351682914018, \"Memory in Mb\": 13.577879905700684, \"Time in s\": 492.702395 }, { \"step\": 6120, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9478672985781992, \"MicroF1\": 0.9478672985781992, \"MacroF1\": 0.9429356622084736, \"Memory in Mb\": 16.35688304901123, \"Time in s\": 572.32886 }, { \"step\": 6528, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9480618967366324, \"MicroF1\": 0.9480618967366324, \"MacroF1\": 0.9478348775735732, \"Memory in Mb\": 10.670846939086914, \"Time in s\": 659.058815 }, { \"step\": 6936, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9495313626532084, \"MicroF1\": 0.9495313626532084, \"MacroF1\": 0.9511497142125284, \"Memory in Mb\": 10.614124298095703, \"Time in s\": 751.777349 }, { \"step\": 7344, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9500204276181398, \"MicroF1\": 0.9500204276181398, \"MacroF1\": 0.9502583235097112, \"Memory in Mb\": 12.824416160583496, \"Time in s\": 851.689186 }, { \"step\": 7752, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9505870210295446, \"MicroF1\": 0.9505870210295446, \"MacroF1\": 0.9508630550075082, \"Memory in Mb\": 11.99438190460205, \"Time in s\": 957.810381 }, { \"step\": 8160, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9487682314009068, \"MicroF1\": 0.9487682314009068, \"MacroF1\": 0.9466937008923912, \"Memory in Mb\": 16.34542465209961, \"Time in s\": 1069.487963 }, { \"step\": 8568, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9491070386366288, \"MicroF1\": 0.9491070386366288, \"MacroF1\": 0.9496258519963297, \"Memory in Mb\": 14.096193313598633, \"Time in s\": 1187.33597 }, { \"step\": 8976, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.95041782729805, \"MicroF1\": 0.95041782729805, \"MacroF1\": 0.95112303337496, \"Memory in Mb\": 8.487105369567871, \"Time in s\": 1310.539589 }, { \"step\": 9384, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9507620164126612, \"MicroF1\": 0.9507620164126612, \"MacroF1\": 0.9509680125568912, \"Memory in Mb\": 10.491826057434082, \"Time in s\": 1439.276187 }, { \"step\": 9792, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9505668471044836, \"MicroF1\": 0.9505668471044836, \"MacroF1\": 0.9508008066421794, \"Memory in Mb\": 12.578843116760254, \"Time in s\": 1574.391157 }, { \"step\": 10200, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9496029022453182, \"MicroF1\": 0.9496029022453182, \"MacroF1\": 0.9490825188137642, \"Memory in Mb\": 15.329971313476562, \"Time in s\": 1716.648577 }, { \"step\": 10608, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9462619025172057, \"MicroF1\": 0.9462619025172057, \"MacroF1\": 0.9448381382156612, \"Memory in Mb\": 14.149526596069336, \"Time in s\": 1865.207745 }, { \"step\": 11016, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.94734453018611, \"MicroF1\": 0.94734453018611, \"MacroF1\": 0.9480489849360164, \"Memory in Mb\": 15.43016529083252, \"Time in s\": 2018.851918 }, { \"step\": 11424, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9476494791210716, \"MicroF1\": 0.9476494791210716, \"MacroF1\": 0.947763256048792, \"Memory in Mb\": 19.33940696716309, \"Time in s\": 2178.069416 }, { \"step\": 11832, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9466655396838812, \"MicroF1\": 0.9466655396838812, \"MacroF1\": 0.9465646854570324, \"Memory in Mb\": 20.836685180664062, \"Time in s\": 2343.241513 }, { \"step\": 12240, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9473813220034316, \"MicroF1\": 0.9473813220034316, \"MacroF1\": 0.9477056335712672, \"Memory in Mb\": 22.594761848449707, \"Time in s\": 2516.191517 }, { \"step\": 12648, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9481299913022851, \"MicroF1\": 0.9481299913022851, \"MacroF1\": 0.9484695727303012, \"Memory in Mb\": 17.12001132965088, \"Time in s\": 2696.111746 }, { \"step\": 13056, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9465338950593642, \"MicroF1\": 0.9465338950593642, \"MacroF1\": 0.9461537407653536, \"Memory in Mb\": 16.317899703979492, \"Time in s\": 2881.285632 }, { \"step\": 13464, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.947559979202258, \"MicroF1\": 0.947559979202258, \"MacroF1\": 0.9479124389900307, \"Memory in Mb\": 17.175325393676758, \"Time in s\": 3071.301855 }, { \"step\": 13872, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9480931439694328, \"MicroF1\": 0.9480931439694328, \"MacroF1\": 0.9483129032895908, \"Memory in Mb\": 20.13454818725586, \"Time in s\": 3267.097668 }, { \"step\": 14280, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.947265214650886, \"MicroF1\": 0.9472652146508858, \"MacroF1\": 0.9472495958535088, \"Memory in Mb\": 23.271190643310547, \"Time in s\": 3470.238619 }, { \"step\": 14688, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9470960713556206, \"MicroF1\": 0.9470960713556206, \"MacroF1\": 0.9472715831304288, \"Memory in Mb\": 24.70554256439209, \"Time in s\": 3681.555614 }, { \"step\": 15096, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9477310367671414, \"MicroF1\": 0.9477310367671414, \"MacroF1\": 0.948023523282346, \"Memory in Mb\": 12.28943920135498, \"Time in s\": 3901.326497 }, { \"step\": 15504, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9481390698574468, \"MicroF1\": 0.9481390698574468, \"MacroF1\": 0.9483821660022894, \"Memory in Mb\": 8.167351722717285, \"Time in s\": 4127.852482 }, { \"step\": 15912, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9489661240651122, \"MicroF1\": 0.9489661240651122, \"MacroF1\": 0.949259317367439, \"Memory in Mb\": 10.055158615112305, \"Time in s\": 4359.152379 }, { \"step\": 16320, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9489552055885776, \"MicroF1\": 0.9489552055885776, \"MacroF1\": 0.949102505659295, \"Memory in Mb\": 8.927614212036133, \"Time in s\": 4595.973694 }, { \"step\": 16728, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.948645901835356, \"MicroF1\": 0.948645901835356, \"MacroF1\": 0.9487532899546076, \"Memory in Mb\": 9.772565841674805, \"Time in s\": 4838.16456 }, { \"step\": 17136, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9491683688357164, \"MicroF1\": 0.9491683688357164, \"MacroF1\": 0.9493664270655614, \"Memory in Mb\": 8.885259628295898, \"Time in s\": 5086.602422 }, { \"step\": 17544, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9497235364532862, \"MicroF1\": 0.9497235364532862, \"MacroF1\": 0.9498997400720456, \"Memory in Mb\": 7.432655334472656, \"Time in s\": 5340.103049 }, { \"step\": 17952, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9492507381204388, \"MicroF1\": 0.9492507381204388, \"MacroF1\": 0.94932994822919, \"Memory in Mb\": 6.564939498901367, \"Time in s\": 5598.267976 }, { \"step\": 18360, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9498883381447792, \"MicroF1\": 0.949888338144779, \"MacroF1\": 0.9500369712738612, \"Memory in Mb\": 9.445448875427246, \"Time in s\": 5861.742373 }, { \"step\": 18768, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.950125219800714, \"MicroF1\": 0.950125219800714, \"MacroF1\": 0.950244810756275, \"Memory in Mb\": 8.985580444335938, \"Time in s\": 6130.741927 }, { \"step\": 19176, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9502477183833116, \"MicroF1\": 0.9502477183833116, \"MacroF1\": 0.950357710715448, \"Memory in Mb\": 10.539984703063965, \"Time in s\": 6405.708121 }, { \"step\": 19584, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9504672419956084, \"MicroF1\": 0.9504672419956084, \"MacroF1\": 0.9505675543483478, \"Memory in Mb\": 12.433100700378418, \"Time in s\": 6686.90918 }, { \"step\": 19992, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9504777149717372, \"MicroF1\": 0.9504777149717372, \"MacroF1\": 0.95056596570352, \"Memory in Mb\": 12.245397567749023, \"Time in s\": 6973.48828 }, { \"step\": 20400, \"track\": \"Multiclass classification\", \"model\": \"Leveraging Bagging\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.950389724986519, \"MicroF1\": 0.950389724986519, \"MacroF1\": 0.9504675266923704, \"Memory in Mb\": 10.420146942138672, \"Time in s\": 7265.024106 }, { \"step\": 46, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.4, \"MicroF1\": 0.4000000000000001, \"MacroF1\": 0.3289160825620571, \"Memory in Mb\": 1.8703498840332031, \"Time in s\": 0.761019 }, { \"step\": 92, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.5494505494505495, \"MicroF1\": 0.5494505494505495, \"MacroF1\": 0.5607526488856412, \"Memory in Mb\": 2.0432376861572266, \"Time in s\": 2.058459 }, { \"step\": 138, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.5620437956204379, \"MicroF1\": 0.5620437956204379, \"MacroF1\": 0.5814352652080846, \"Memory in Mb\": 2.2601184844970703, \"Time in s\": 3.877596 }, { \"step\": 184, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.6174863387978142, \"MicroF1\": 0.6174863387978142, \"MacroF1\": 0.6349823285289026, \"Memory in Mb\": 2.5773630142211914, \"Time in s\": 5.988367 }, { \"step\": 230, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.6550218340611353, \"MicroF1\": 0.6550218340611353, \"MacroF1\": 0.6697464616246889, \"Memory in Mb\": 2.673569679260254, \"Time in s\": 8.36838 }, { \"step\": 276, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.68, \"MicroF1\": 0.68, \"MacroF1\": 0.6977451412884614, \"Memory in Mb\": 2.705929756164551, \"Time in s\": 11.015152 }, { \"step\": 322, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7040498442367601, \"MicroF1\": 0.7040498442367601, \"MacroF1\": 0.708655608864303, \"Memory in Mb\": 2.747677803039551, \"Time in s\": 13.922748 }, { \"step\": 368, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7302452316076294, \"MicroF1\": 0.7302452316076294, \"MacroF1\": 0.731555248839775, \"Memory in Mb\": 2.91958236694336, \"Time in s\": 17.085856 }, { \"step\": 414, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7481840193704601, \"MicroF1\": 0.7481840193704601, \"MacroF1\": 0.7498869297449521, \"Memory in Mb\": 3.2087087631225586, \"Time in s\": 20.515594 }, { \"step\": 460, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7429193899782135, \"MicroF1\": 0.7429193899782135, \"MacroF1\": 0.7431113090395209, \"Memory in Mb\": 2.874252319335937, \"Time in s\": 24.226579 }, { \"step\": 506, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7465346534653465, \"MicroF1\": 0.7465346534653465, \"MacroF1\": 0.7453691625646783, \"Memory in Mb\": 3.051929473876953, \"Time in s\": 28.205653 }, { \"step\": 552, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7531760435571688, \"MicroF1\": 0.7531760435571688, \"MacroF1\": 0.7537204076398122, \"Memory in Mb\": 3.133829116821289, \"Time in s\": 32.454626000000005 }, { \"step\": 598, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7587939698492462, \"MicroF1\": 0.7587939698492462, \"MacroF1\": 0.7612399908296416, \"Memory in Mb\": 3.14900016784668, \"Time in s\": 36.970245000000006 }, { \"step\": 644, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7589424572317263, \"MicroF1\": 0.7589424572317262, \"MacroF1\": 0.7628637146980985, \"Memory in Mb\": 3.4707136154174805, \"Time in s\": 41.75427400000001 }, { \"step\": 690, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7619738751814223, \"MicroF1\": 0.7619738751814223, \"MacroF1\": 0.76530464273308, \"Memory in Mb\": 3.455944061279297, \"Time in s\": 46.80492900000001 }, { \"step\": 736, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7687074829931972, \"MicroF1\": 0.7687074829931972, \"MacroF1\": 0.7727990926768868, \"Memory in Mb\": 3.680045127868652, \"Time in s\": 52.11999800000001 }, { \"step\": 782, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7733674775928298, \"MicroF1\": 0.7733674775928298, \"MacroF1\": 0.7767963295410655, \"Memory in Mb\": 3.8801565170288086, \"Time in s\": 57.706528000000006 }, { \"step\": 828, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7738814993954051, \"MicroF1\": 0.7738814993954051, \"MacroF1\": 0.7787678467755003, \"Memory in Mb\": 3.867655754089356, \"Time in s\": 63.567513000000005 }, { \"step\": 874, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7812142038946163, \"MicroF1\": 0.7812142038946163, \"MacroF1\": 0.7848289172220594, \"Memory in Mb\": 3.691183090209961, \"Time in s\": 69.704029 }, { \"step\": 920, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7878128400435256, \"MicroF1\": 0.7878128400435256, \"MacroF1\": 0.7905661589338376, \"Memory in Mb\": 3.770216941833496, \"Time in s\": 76.10799700000001 }, { \"step\": 966, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7917098445595855, \"MicroF1\": 0.7917098445595855, \"MacroF1\": 0.7936972979049142, \"Memory in Mb\": 3.8226003646850586, \"Time in s\": 82.78424000000001 }, { \"step\": 1012, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7952522255192879, \"MicroF1\": 0.7952522255192878, \"MacroF1\": 0.796484514345152, \"Memory in Mb\": 4.098711967468262, \"Time in s\": 89.73293500000001 }, { \"step\": 1058, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8032166508987701, \"MicroF1\": 0.8032166508987703, \"MacroF1\": 0.8038465931831994, \"Memory in Mb\": 4.173297882080078, \"Time in s\": 96.952593 }, { \"step\": 1104, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8041704442429737, \"MicroF1\": 0.8041704442429737, \"MacroF1\": 0.8051724065917674, \"Memory in Mb\": 4.39574146270752, \"Time in s\": 104.442345 }, { \"step\": 1150, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8102697998259356, \"MicroF1\": 0.8102697998259357, \"MacroF1\": 0.8109646011887589, \"Memory in Mb\": 4.552497863769531, \"Time in s\": 112.202361 }, { \"step\": 1196, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8142259414225942, \"MicroF1\": 0.8142259414225941, \"MacroF1\": 0.8149917549940485, \"Memory in Mb\": 4.571473121643066, \"Time in s\": 120.234064 }, { \"step\": 1242, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8186946011281225, \"MicroF1\": 0.8186946011281225, \"MacroF1\": 0.8196592056494876, \"Memory in Mb\": 4.626148223876953, \"Time in s\": 128.53886899999998 }, { \"step\": 1288, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8212898212898213, \"MicroF1\": 0.8212898212898213, \"MacroF1\": 0.822176577441966, \"Memory in Mb\": 5.001523017883301, \"Time in s\": 137.11683999999997 }, { \"step\": 1334, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8229557389347337, \"MicroF1\": 0.8229557389347337, \"MacroF1\": 0.8237863794336502, \"Memory in Mb\": 5.142135620117188, \"Time in s\": 145.97222099999996 }, { \"step\": 1380, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8245105148658448, \"MicroF1\": 0.8245105148658448, \"MacroF1\": 0.8256018780761997, \"Memory in Mb\": 5.339564323425293, \"Time in s\": 155.11012299999996 }, { \"step\": 1426, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8287719298245614, \"MicroF1\": 0.8287719298245614, \"MacroF1\": 0.8290084946618356, \"Memory in Mb\": 5.337568283081055, \"Time in s\": 164.53666099999995 }, { \"step\": 1472, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8334466349422162, \"MicroF1\": 0.8334466349422162, \"MacroF1\": 0.8325983603187124, \"Memory in Mb\": 5.299435615539551, \"Time in s\": 174.25653999999994 }, { \"step\": 1518, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8358602504943968, \"MicroF1\": 0.8358602504943968, \"MacroF1\": 0.8344617749849152, \"Memory in Mb\": 5.345264434814453, \"Time in s\": 184.26794299999997 }, { \"step\": 1564, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8387715930902111, \"MicroF1\": 0.8387715930902111, \"MacroF1\": 0.837784263767798, \"Memory in Mb\": 4.9267168045043945, \"Time in s\": 194.57830699999997 }, { \"step\": 1610, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.839030453697949, \"MicroF1\": 0.839030453697949, \"MacroF1\": 0.838065870841574, \"Memory in Mb\": 4.685762405395508, \"Time in s\": 205.19165999999996 }, { \"step\": 1656, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8416918429003021, \"MicroF1\": 0.8416918429003022, \"MacroF1\": 0.8408915736149335, \"Memory in Mb\": 4.737677574157715, \"Time in s\": 216.09684699999997 }, { \"step\": 1702, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8418577307466196, \"MicroF1\": 0.8418577307466195, \"MacroF1\": 0.8423710518418951, \"Memory in Mb\": 4.180461883544922, \"Time in s\": 227.29100499999996 }, { \"step\": 1748, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8431597023468803, \"MicroF1\": 0.8431597023468804, \"MacroF1\": 0.8432643493367186, \"Memory in Mb\": 4.331151962280273, \"Time in s\": 238.7660419999999 }, { \"step\": 1794, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8455103179029559, \"MicroF1\": 0.8455103179029559, \"MacroF1\": 0.8449435902582664, \"Memory in Mb\": 4.424459457397461, \"Time in s\": 250.52640599999992 }, { \"step\": 1840, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8466557911908646, \"MicroF1\": 0.8466557911908648, \"MacroF1\": 0.8462222022075542, \"Memory in Mb\": 4.3217973709106445, \"Time in s\": 262.5845499999999 }, { \"step\": 1886, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8477453580901857, \"MicroF1\": 0.8477453580901856, \"MacroF1\": 0.84772474367672, \"Memory in Mb\": 4.364754676818848, \"Time in s\": 274.94178099999993 }, { \"step\": 1932, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8487830139823925, \"MicroF1\": 0.8487830139823925, \"MacroF1\": 0.8484572581714136, \"Memory in Mb\": 4.410244941711426, \"Time in s\": 287.5965719999999 }, { \"step\": 1978, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.849772382397572, \"MicroF1\": 0.849772382397572, \"MacroF1\": 0.8495372758679525, \"Memory in Mb\": 4.436578750610352, \"Time in s\": 300.5458569999999 }, { \"step\": 2024, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8507167572911517, \"MicroF1\": 0.8507167572911517, \"MacroF1\": 0.8496927624131454, \"Memory in Mb\": 4.292850494384766, \"Time in s\": 313.7872069999999 }, { \"step\": 2070, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8511358144030933, \"MicroF1\": 0.8511358144030933, \"MacroF1\": 0.8503705992191455, \"Memory in Mb\": 4.422323226928711, \"Time in s\": 327.3192999999999 }, { \"step\": 2116, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8515366430260047, \"MicroF1\": 0.8515366430260047, \"MacroF1\": 0.850305284692234, \"Memory in Mb\": 4.440757751464844, \"Time in s\": 341.1384159999999 }, { \"step\": 2162, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8505321610365572, \"MicroF1\": 0.850532161036557, \"MacroF1\": 0.84908675540822, \"Memory in Mb\": 4.611151695251465, \"Time in s\": 355.24865599999987 }, { \"step\": 2208, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.851835070231083, \"MicroF1\": 0.851835070231083, \"MacroF1\": 0.8501011345319502, \"Memory in Mb\": 4.809813499450684, \"Time in s\": 369.64566299999984 }, { \"step\": 2254, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8490901020861075, \"MicroF1\": 0.8490901020861075, \"MacroF1\": 0.847799327251759, \"Memory in Mb\": 4.949430465698242, \"Time in s\": 384.3275549999999 }, { \"step\": 2300, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8490648107872988, \"MicroF1\": 0.8490648107872988, \"MacroF1\": 0.8479218608351832, \"Memory in Mb\": 5.295671463012695, \"Time in s\": 399.2890019999999 }, { \"step\": 1056, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Insects\", \"Accuracy\": 0.6454976303317536, \"MicroF1\": 0.6454976303317536, \"MacroF1\": 0.5867724425586438, \"Memory in Mb\": 7.441350936889648, \"Time in s\": 8.583896 }, { \"step\": 2112, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Insects\", \"Accuracy\": 0.6826148744670772, \"MicroF1\": 0.6826148744670772, \"MacroF1\": 0.6053874539212664, \"Memory in Mb\": 11.234929084777832, \"Time in s\": 25.378115 }, { \"step\": 3168, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Insects\", \"Accuracy\": 0.6896116198294916, \"MicroF1\": 0.6896116198294916, \"MacroF1\": 0.6083758872885286, \"Memory in Mb\": 13.40891933441162, \"Time in s\": 50.237849 }, { \"step\": 4224, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Insects\", \"Accuracy\": 0.6954771489462468, \"MicroF1\": 0.6954771489462468, \"MacroF1\": 0.6085129807470798, \"Memory in Mb\": 18.096717834472656, \"Time in s\": 83.102519 }, { \"step\": 5280, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Insects\", \"Accuracy\": 0.7014586095851487, \"MicroF1\": 0.7014586095851487, \"MacroF1\": 0.6122692721162352, \"Memory in Mb\": 21.446727752685547, \"Time in s\": 123.914838 }, { \"step\": 6336, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Insects\", \"Accuracy\": 0.7021310181531176, \"MicroF1\": 0.7021310181531176, \"MacroF1\": 0.6116513676781078, \"Memory in Mb\": 27.182113647460938, \"Time in s\": 172.776504 }, { \"step\": 7392, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Insects\", \"Accuracy\": 0.7054525774590719, \"MicroF1\": 0.7054525774590719, \"MacroF1\": 0.6129808753663538, \"Memory in Mb\": 26.876797676086422, \"Time in s\": 229.478895 }, { \"step\": 8448, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Insects\", \"Accuracy\": 0.70853557476027, \"MicroF1\": 0.70853557476027, \"MacroF1\": 0.6147213044531655, \"Memory in Mb\": 31.851184844970703, \"Time in s\": 293.916345 }, { \"step\": 9504, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Insects\", \"Accuracy\": 0.7137745974955277, \"MicroF1\": 0.7137745974955277, \"MacroF1\": 0.6175531178778296, \"Memory in Mb\": 22.848219871521, \"Time in s\": 366.230765 }, { \"step\": 10560, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Insects\", \"Accuracy\": 0.7174921867601098, \"MicroF1\": 0.7174921867601098, \"MacroF1\": 0.619713417782018, \"Memory in Mb\": 19.317788124084476, \"Time in s\": 446.248603 }, { \"step\": 11616, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Insects\", \"Accuracy\": 0.717606543263022, \"MicroF1\": 0.717606543263022, \"MacroF1\": 0.618960125586482, \"Memory in Mb\": 19.568995475769043, \"Time in s\": 533.7141509999999 }, { \"step\": 12672, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Insects\", \"Accuracy\": 0.7184121221687317, \"MicroF1\": 0.7184121221687317, \"MacroF1\": 0.6302774396409263, \"Memory in Mb\": 23.59817409515381, \"Time in s\": 628.5255009999998 }, { \"step\": 13728, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Insects\", \"Accuracy\": 0.7373060391928317, \"MicroF1\": 0.7373060391928317, \"MacroF1\": 0.7337291247132964, \"Memory in Mb\": 6.318717002868652, \"Time in s\": 729.5138609999999 }, { \"step\": 14784, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Insects\", \"Accuracy\": 0.744774403030508, \"MicroF1\": 0.7447744030305079, \"MacroF1\": 0.7439388578060665, \"Memory in Mb\": 4.747281074523926, \"Time in s\": 836.7314339999999 }, { \"step\": 15840, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Insects\", \"Accuracy\": 0.7374834269840268, \"MicroF1\": 0.7374834269840268, \"MacroF1\": 0.7388535634976899, \"Memory in Mb\": 8.635688781738281, \"Time in s\": 951.47119 }, { \"step\": 16896, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Insects\", \"Accuracy\": 0.7324652263983427, \"MicroF1\": 0.7324652263983427, \"MacroF1\": 0.736003592775451, \"Memory in Mb\": 13.742908477783203, \"Time in s\": 1073.59215 }, { \"step\": 17952, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Insects\", \"Accuracy\": 0.7253077822962509, \"MicroF1\": 0.7253077822962509, \"MacroF1\": 0.7305072565778182, \"Memory in Mb\": 20.00777053833008, \"Time in s\": 1203.171307 }, { \"step\": 19008, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Insects\", \"Accuracy\": 0.7387804493081497, \"MicroF1\": 0.7387804493081497, \"MacroF1\": 0.7395324944779035, \"Memory in Mb\": 6.087196350097656, \"Time in s\": 1339.825862 }, { \"step\": 20064, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Insects\", \"Accuracy\": 0.7439066939141704, \"MicroF1\": 0.7439066939141704, \"MacroF1\": 0.7399287274487314, \"Memory in Mb\": 7.841000556945801, \"Time in s\": 1483.477757 }, { \"step\": 21120, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Insects\", \"Accuracy\": 0.7456792461764288, \"MicroF1\": 0.7456792461764288, \"MacroF1\": 0.738136498436516, \"Memory in Mb\": 11.804688453674316, \"Time in s\": 1634.941518 }, { \"step\": 22176, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Insects\", \"Accuracy\": 0.7464712514092446, \"MicroF1\": 0.7464712514092445, \"MacroF1\": 0.7355899333520025, \"Memory in Mb\": 17.346091270446777, \"Time in s\": 1793.971452 }, { \"step\": 23232, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Insects\", \"Accuracy\": 0.7486548146872714, \"MicroF1\": 0.7486548146872714, \"MacroF1\": 0.7347795423630049, \"Memory in Mb\": 20.6541748046875, \"Time in s\": 1960.617459 }, { \"step\": 24288, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Insects\", \"Accuracy\": 0.7502367521719439, \"MicroF1\": 0.7502367521719437, \"MacroF1\": 0.7334324471857778, \"Memory in Mb\": 21.727876663208008, \"Time in s\": 2134.847723 }, { \"step\": 25344, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Insects\", \"Accuracy\": 0.7523576530008287, \"MicroF1\": 0.7523576530008288, \"MacroF1\": 0.7330792890892175, \"Memory in Mb\": 28.11653995513916, \"Time in s\": 2316.991759 }, { \"step\": 26400, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Insects\", \"Accuracy\": 0.7535891511042085, \"MicroF1\": 0.7535891511042085, \"MacroF1\": 0.731955812013067, \"Memory in Mb\": 28.993709564208984, \"Time in s\": 2507.189995 }, { \"step\": 27456, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Insects\", \"Accuracy\": 0.7540338736113641, \"MicroF1\": 0.7540338736113641, \"MacroF1\": 0.7298780765329144, \"Memory in Mb\": 35.52100467681885, \"Time in s\": 2705.597193 }, { \"step\": 28512, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Insects\", \"Accuracy\": 0.7522710532776823, \"MicroF1\": 0.7522710532776823, \"MacroF1\": 0.7301216768723076, \"Memory in Mb\": 19.597237586975098, \"Time in s\": 2912.282888 }, { \"step\": 29568, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Insects\", \"Accuracy\": 0.7502621165488551, \"MicroF1\": 0.7502621165488552, \"MacroF1\": 0.733319854895679, \"Memory in Mb\": 15.064599990844728, \"Time in s\": 3126.360865 }, { \"step\": 30624, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Insects\", \"Accuracy\": 0.7500244913953564, \"MicroF1\": 0.7500244913953564, \"MacroF1\": 0.7381499467352403, \"Memory in Mb\": 21.998522758483887, \"Time in s\": 3348.070871 }, { \"step\": 31680, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Insects\", \"Accuracy\": 0.7493292086240096, \"MicroF1\": 0.7493292086240096, \"MacroF1\": 0.7414716120706107, \"Memory in Mb\": 28.99252319335937, \"Time in s\": 3577.2433 }, { \"step\": 32736, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Insects\", \"Accuracy\": 0.7494424927447686, \"MicroF1\": 0.7494424927447686, \"MacroF1\": 0.7447602446394828, \"Memory in Mb\": 36.39131259918213, \"Time in s\": 3813.876585 }, { \"step\": 33792, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Insects\", \"Accuracy\": 0.7448137077920156, \"MicroF1\": 0.7448137077920156, \"MacroF1\": 0.7415559043607837, \"Memory in Mb\": 7.406244277954102, \"Time in s\": 4059.132247 }, { \"step\": 34848, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Insects\", \"Accuracy\": 0.7397193445633771, \"MicroF1\": 0.739719344563377, \"MacroF1\": 0.7363475181006618, \"Memory in Mb\": 8.795232772827148, \"Time in s\": 4312.474973 }, { \"step\": 35904, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Insects\", \"Accuracy\": 0.7365679748210456, \"MicroF1\": 0.7365679748210455, \"MacroF1\": 0.7329849736783064, \"Memory in Mb\": 11.10138702392578, \"Time in s\": 4573.597154 }, { \"step\": 36960, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Insects\", \"Accuracy\": 0.7330014340214832, \"MicroF1\": 0.7330014340214832, \"MacroF1\": 0.7293557861681861, \"Memory in Mb\": 15.830912590026855, \"Time in s\": 4842.477951 }, { \"step\": 38016, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Insects\", \"Accuracy\": 0.7302643693278968, \"MicroF1\": 0.7302643693278967, \"MacroF1\": 0.7264691718738406, \"Memory in Mb\": 19.795815467834476, \"Time in s\": 5119.095735 }, { \"step\": 39072, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Insects\", \"Accuracy\": 0.7309513449873307, \"MicroF1\": 0.7309513449873307, \"MacroF1\": 0.7270525503986339, \"Memory in Mb\": 9.05908489227295, \"Time in s\": 5403.358583 }, { \"step\": 40128, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Insects\", \"Accuracy\": 0.729284521643781, \"MicroF1\": 0.729284521643781, \"MacroF1\": 0.7256952486493923, \"Memory in Mb\": 11.242535591125488, \"Time in s\": 5695.769582 }, { \"step\": 41184, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Insects\", \"Accuracy\": 0.7294029089672923, \"MicroF1\": 0.7294029089672922, \"MacroF1\": 0.7260996194485368, \"Memory in Mb\": 9.506610870361328, \"Time in s\": 5995.177374 }, { \"step\": 42240, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Insects\", \"Accuracy\": 0.7298941736310045, \"MicroF1\": 0.7298941736310045, \"MacroF1\": 0.7269475794208268, \"Memory in Mb\": 15.258689880371094, \"Time in s\": 6301.374087 }, { \"step\": 43296, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Insects\", \"Accuracy\": 0.7306848365862109, \"MicroF1\": 0.7306848365862109, \"MacroF1\": 0.7280100891072271, \"Memory in Mb\": 20.2097225189209, \"Time in s\": 6614.284978 }, { \"step\": 44352, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Insects\", \"Accuracy\": 0.7312574688282113, \"MicroF1\": 0.7312574688282113, \"MacroF1\": 0.7287466644577517, \"Memory in Mb\": 26.378506660461422, \"Time in s\": 6934.278340999999 }, { \"step\": 45408, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Insects\", \"Accuracy\": 0.7317814433897857, \"MicroF1\": 0.7317814433897857, \"MacroF1\": 0.7291491859846939, \"Memory in Mb\": 32.061384201049805, \"Time in s\": 7261.498196999999 }, { \"step\": 46464, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Insects\", \"Accuracy\": 0.732776617954071, \"MicroF1\": 0.732776617954071, \"MacroF1\": 0.7299865007540453, \"Memory in Mb\": 32.25613784790039, \"Time in s\": 7595.964266999999 }, { \"step\": 47520, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Insects\", \"Accuracy\": 0.7334329426124288, \"MicroF1\": 0.7334329426124286, \"MacroF1\": 0.7309449816547512, \"Memory in Mb\": 16.162960052490234, \"Time in s\": 7937.466337999999 }, { \"step\": 48576, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Insects\", \"Accuracy\": 0.7373751930005147, \"MicroF1\": 0.7373751930005147, \"MacroF1\": 0.7352697035426822, \"Memory in Mb\": 12.93554973602295, \"Time in s\": 8285.412244 }, { \"step\": 49632, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Insects\", \"Accuracy\": 0.741210130765046, \"MicroF1\": 0.741210130765046, \"MacroF1\": 0.739269872700679, \"Memory in Mb\": 15.798639297485352, \"Time in s\": 8639.863329999998 }, { \"step\": 50688, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Insects\", \"Accuracy\": 0.7446682581332491, \"MicroF1\": 0.7446682581332491, \"MacroF1\": 0.7426657147430288, \"Memory in Mb\": 14.252553939819336, \"Time in s\": 9000.792304999999 }, { \"step\": 51744, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Insects\", \"Accuracy\": 0.7485650232881742, \"MicroF1\": 0.7485650232881743, \"MacroF1\": 0.7463959215624629, \"Memory in Mb\": 16.087495803833008, \"Time in s\": 9368.035475 }, { \"step\": 52800, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Insects\", \"Accuracy\": 0.752154396863577, \"MicroF1\": 0.752154396863577, \"MacroF1\": 0.7502511872752614, \"Memory in Mb\": 11.339015007019045, \"Time in s\": 9741.135739 }, { \"step\": 408, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9803439803439804, \"MicroF1\": 0.9803439803439804, \"MacroF1\": 0.4950372208436724, \"Memory in Mb\": 1.0347824096679688, \"Time in s\": 1.562947 }, { \"step\": 816, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9840490797546012, \"MicroF1\": 0.9840490797546012, \"MacroF1\": 0.9559273479637392, \"Memory in Mb\": 2.137660026550293, \"Time in s\": 5.08284 }, { \"step\": 1224, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.983646770237122, \"MicroF1\": 0.983646770237122, \"MacroF1\": 0.9660207101584454, \"Memory in Mb\": 3.2939910888671875, \"Time in s\": 10.794145 }, { \"step\": 1632, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9803801348865726, \"MicroF1\": 0.9803801348865726, \"MacroF1\": 0.9452685517164728, \"Memory in Mb\": 4.760180473327637, \"Time in s\": 18.719227 }, { \"step\": 2040, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.973516429622364, \"MicroF1\": 0.973516429622364, \"MacroF1\": 0.9361195161551138, \"Memory in Mb\": 6.6425981521606445, \"Time in s\": 28.938428 }, { \"step\": 2448, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.973028197793216, \"MicroF1\": 0.973028197793216, \"MacroF1\": 0.9615988180290456, \"Memory in Mb\": 5.552071571350098, \"Time in s\": 41.439403 }, { \"step\": 2856, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9747810858143608, \"MicroF1\": 0.9747810858143608, \"MacroF1\": 0.9713591464752812, \"Memory in Mb\": 6.8436784744262695, \"Time in s\": 56.378536 }, { \"step\": 3264, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.974869751762182, \"MicroF1\": 0.974869751762182, \"MacroF1\": 0.9692034094625394, \"Memory in Mb\": 6.567030906677246, \"Time in s\": 73.934123 }, { \"step\": 3672, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9743938981204032, \"MicroF1\": 0.9743938981204032, \"MacroF1\": 0.9689232613591288, \"Memory in Mb\": 8.48116397857666, \"Time in s\": 94.245901 }, { \"step\": 4080, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9722971316499142, \"MicroF1\": 0.9722971316499142, \"MacroF1\": 0.96426610548244, \"Memory in Mb\": 5.934853553771973, \"Time in s\": 117.581237 }, { \"step\": 4488, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9732560731000668, \"MicroF1\": 0.9732560731000668, \"MacroF1\": 0.9722719909296184, \"Memory in Mb\": 3.644045829772949, \"Time in s\": 143.695153 }, { \"step\": 4896, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9748723186925434, \"MicroF1\": 0.9748723186925434, \"MacroF1\": 0.9754037061196345, \"Memory in Mb\": 4.787886619567871, \"Time in s\": 172.738198 }, { \"step\": 5304, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9741655666603808, \"MicroF1\": 0.9741655666603808, \"MacroF1\": 0.9716360242916738, \"Memory in Mb\": 5.742301940917969, \"Time in s\": 204.85114700000003 }, { \"step\": 5712, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9746104009805638, \"MicroF1\": 0.9746104009805638, \"MacroF1\": 0.9740216295290516, \"Memory in Mb\": 6.782421112060547, \"Time in s\": 240.11050800000004 }, { \"step\": 6120, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9738519365909464, \"MicroF1\": 0.9738519365909464, \"MacroF1\": 0.9722333406974256, \"Memory in Mb\": 8.176769256591797, \"Time in s\": 278.69027800000003 }, { \"step\": 6528, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9742607629845258, \"MicroF1\": 0.9742607629845258, \"MacroF1\": 0.9741504405159308, \"Memory in Mb\": 4.433716773986816, \"Time in s\": 320.74209900000005 }, { \"step\": 6936, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9751982696467196, \"MicroF1\": 0.9751982696467196, \"MacroF1\": 0.9755523782693606, \"Memory in Mb\": 5.133135795593262, \"Time in s\": 366.1265460000001 }, { \"step\": 7344, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9757592264741932, \"MicroF1\": 0.9757592264741932, \"MacroF1\": 0.9758485662267348, \"Memory in Mb\": 5.760107040405273, \"Time in s\": 415.04014400000005 }, { \"step\": 7752, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9760030963746612, \"MicroF1\": 0.9760030963746612, \"MacroF1\": 0.9758957983961688, \"Memory in Mb\": 6.842521667480469, \"Time in s\": 467.5877 }, { \"step\": 8160, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9758548841769824, \"MicroF1\": 0.9758548841769824, \"MacroF1\": 0.9755087152005796, \"Memory in Mb\": 8.403876304626465, \"Time in s\": 523.9395460000001 }, { \"step\": 8568, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9751371541963348, \"MicroF1\": 0.9751371541963348, \"MacroF1\": 0.9744422302091884, \"Memory in Mb\": 8.9804105758667, \"Time in s\": 584.299164 }, { \"step\": 8976, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.975598885793872, \"MicroF1\": 0.975598885793872, \"MacroF1\": 0.9757626053423432, \"Memory in Mb\": 8.348807334899902, \"Time in s\": 648.764254 }, { \"step\": 9384, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.97527443248428, \"MicroF1\": 0.97527443248428, \"MacroF1\": 0.9749874884381716, \"Memory in Mb\": 8.780474662780762, \"Time in s\": 717.435188 }, { \"step\": 9792, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9751812889388214, \"MicroF1\": 0.9751812889388214, \"MacroF1\": 0.9751287694103772, \"Memory in Mb\": 7.27089786529541, \"Time in s\": 790.366072 }, { \"step\": 10200, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9748994999509756, \"MicroF1\": 0.9748994999509756, \"MacroF1\": 0.9747198913701116, \"Memory in Mb\": 8.182950019836426, \"Time in s\": 867.658962 }, { \"step\": 10608, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9744508343546714, \"MicroF1\": 0.9744508343546714, \"MacroF1\": 0.9742218409220016, \"Memory in Mb\": 8.212386131286621, \"Time in s\": 949.401841 }, { \"step\": 11016, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9747616886064456, \"MicroF1\": 0.9747616886064456, \"MacroF1\": 0.9748981365239816, \"Memory in Mb\": 7.736974716186523, \"Time in s\": 1035.676852 }, { \"step\": 11424, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9748752516851964, \"MicroF1\": 0.9748752516851964, \"MacroF1\": 0.9749367981815978, \"Memory in Mb\": 8.583486557006836, \"Time in s\": 1126.650079 }, { \"step\": 11832, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9749809821654974, \"MicroF1\": 0.9749809821654974, \"MacroF1\": 0.9750463661723392, \"Memory in Mb\": 8.887914657592773, \"Time in s\": 1222.478726 }, { \"step\": 12240, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9755698995015932, \"MicroF1\": 0.9755698995015932, \"MacroF1\": 0.9757989853757532, \"Memory in Mb\": 9.402573585510254, \"Time in s\": 1323.224256 }, { \"step\": 12648, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9760417490313908, \"MicroF1\": 0.9760417490313908, \"MacroF1\": 0.9762258400907322, \"Memory in Mb\": 9.833843231201172, \"Time in s\": 1429.020384 }, { \"step\": 13056, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9760245116813482, \"MicroF1\": 0.9760245116813482, \"MacroF1\": 0.9760626338918788, \"Memory in Mb\": 9.939188957214355, \"Time in s\": 1539.962955 }, { \"step\": 13464, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9762311520463492, \"MicroF1\": 0.9762311520463492, \"MacroF1\": 0.976330045562598, \"Memory in Mb\": 10.46349811553955, \"Time in s\": 1656.191893 }, { \"step\": 13872, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9763535433638526, \"MicroF1\": 0.9763535433638526, \"MacroF1\": 0.9764287224231292, \"Memory in Mb\": 11.428705215454102, \"Time in s\": 1777.90617 }, { \"step\": 14280, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9758386441627565, \"MicroF1\": 0.9758386441627565, \"MacroF1\": 0.9757700210755772, \"Memory in Mb\": 10.687178611755373, \"Time in s\": 1905.20097 }, { \"step\": 14688, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.976101314087288, \"MicroF1\": 0.976101314087288, \"MacroF1\": 0.9761996431080104, \"Memory in Mb\": 10.750173568725586, \"Time in s\": 2038.11614 }, { \"step\": 15096, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9764822789002982, \"MicroF1\": 0.9764822789002982, \"MacroF1\": 0.9765941858257003, \"Memory in Mb\": 10.87511920928955, \"Time in s\": 2176.860193 }, { \"step\": 15504, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9762626588402245, \"MicroF1\": 0.9762626588402245, \"MacroF1\": 0.9762697293829714, \"Memory in Mb\": 11.144217491149902, \"Time in s\": 2321.518715 }, { \"step\": 15912, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.976305700458802, \"MicroF1\": 0.976305700458802, \"MacroF1\": 0.9763523962033862, \"Memory in Mb\": 10.881969451904297, \"Time in s\": 2472.196158 }, { \"step\": 16320, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9761627550707764, \"MicroF1\": 0.9761627550707764, \"MacroF1\": 0.9761821898526978, \"Memory in Mb\": 10.72462272644043, \"Time in s\": 2629.0181850000004 }, { \"step\": 16728, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9760267830453756, \"MicroF1\": 0.9760267830453756, \"MacroF1\": 0.9760462981867312, \"Memory in Mb\": 10.397873878479004, \"Time in s\": 2792.0314960000005 }, { \"step\": 17136, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9763641669098336, \"MicroF1\": 0.9763641669098336, \"MacroF1\": 0.976427628373518, \"Memory in Mb\": 11.5784912109375, \"Time in s\": 2961.4220420000006 }, { \"step\": 17544, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9762868380550648, \"MicroF1\": 0.9762868380550648, \"MacroF1\": 0.9763077393136288, \"Memory in Mb\": 10.780138969421388, \"Time in s\": 3137.2839260000005 }, { \"step\": 17952, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9763801459528716, \"MicroF1\": 0.9763801459528716, \"MacroF1\": 0.9764101118400772, \"Memory in Mb\": 11.24526309967041, \"Time in s\": 3319.622350000001 }, { \"step\": 18360, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.97663271420012, \"MicroF1\": 0.97663271420012, \"MacroF1\": 0.976666198082788, \"Memory in Mb\": 11.450252532958984, \"Time in s\": 3508.4984120000004 }, { \"step\": 18768, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9769808706772526, \"MicroF1\": 0.9769808706772526, \"MacroF1\": 0.9770112706505792, \"Memory in Mb\": 12.824438095092772, \"Time in s\": 3704.110850000001 }, { \"step\": 19176, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9767926988265972, \"MicroF1\": 0.9767926988265972, \"MacroF1\": 0.976797459665624, \"Memory in Mb\": 13.463789939880373, \"Time in s\": 3906.654607 }, { \"step\": 19584, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9766123678700914, \"MicroF1\": 0.9766123678700914, \"MacroF1\": 0.97661532368473, \"Memory in Mb\": 12.60595417022705, \"Time in s\": 4116.138220000001 }, { \"step\": 19992, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9765894652593669, \"MicroF1\": 0.9765894652593669, \"MacroF1\": 0.976591825772772, \"Memory in Mb\": 11.445868492126465, \"Time in s\": 4332.655771000001 }, { \"step\": 20400, \"track\": \"Multiclass classification\", \"model\": \"Stacking\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9765184567870974, \"MicroF1\": 0.9765184567870974, \"MacroF1\": 0.9765167109502484, \"Memory in Mb\": 12.220311164855955, \"Time in s\": 4556.333966000001 }, { \"step\": 46, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.4888888888888889, \"MicroF1\": 0.4888888888888889, \"MacroF1\": 0.4138888888888889, \"Memory in Mb\": 0.8855724334716797, \"Time in s\": 0.380739 }, { \"step\": 92, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.6263736263736264, \"MicroF1\": 0.6263736263736264, \"MacroF1\": 0.6295417331131617, \"Memory in Mb\": 0.9400959014892578, \"Time in s\": 0.906366 }, { \"step\": 138, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.6788321167883211, \"MicroF1\": 0.6788321167883211, \"MacroF1\": 0.6955125455614023, \"Memory in Mb\": 0.9512205123901368, \"Time in s\": 1.596335 }, { \"step\": 184, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7158469945355191, \"MicroF1\": 0.7158469945355191, \"MacroF1\": 0.7293605295181818, \"Memory in Mb\": 0.9506902694702148, \"Time in s\": 2.451892 }, { \"step\": 230, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.74235807860262, \"MicroF1\": 0.74235807860262, \"MacroF1\": 0.7560849066334576, \"Memory in Mb\": 0.9507265090942384, \"Time in s\": 3.478975 }, { \"step\": 276, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7490909090909091, \"MicroF1\": 0.7490909090909091, \"MacroF1\": 0.7654899494294127, \"Memory in Mb\": 0.9522123336791992, \"Time in s\": 4.6524790000000005 }, { \"step\": 322, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7632398753894081, \"MicroF1\": 0.7632398753894081, \"MacroF1\": 0.7699967547900484, \"Memory in Mb\": 0.9522132873535156, \"Time in s\": 5.915859 }, { \"step\": 368, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.782016348773842, \"MicroF1\": 0.782016348773842, \"MacroF1\": 0.7847454642968661, \"Memory in Mb\": 0.9517135620117188, \"Time in s\": 7.268692 }, { \"step\": 414, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7869249394673123, \"MicroF1\": 0.7869249394673122, \"MacroF1\": 0.7891209865588749, \"Memory in Mb\": 0.952162742614746, \"Time in s\": 8.714221 }, { \"step\": 460, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7821350762527233, \"MicroF1\": 0.7821350762527233, \"MacroF1\": 0.7829889615631377, \"Memory in Mb\": 0.9522056579589844, \"Time in s\": 10.249346 }, { \"step\": 506, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7861386138613862, \"MicroF1\": 0.7861386138613862, \"MacroF1\": 0.7872755051739567, \"Memory in Mb\": 0.9517154693603516, \"Time in s\": 11.874447 }, { \"step\": 552, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7858439201451906, \"MicroF1\": 0.7858439201451906, \"MacroF1\": 0.7876565639439724, \"Memory in Mb\": 0.9515762329101562, \"Time in s\": 13.588949 }, { \"step\": 598, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7872696817420436, \"MicroF1\": 0.7872696817420435, \"MacroF1\": 0.7897468061485311, \"Memory in Mb\": 0.9521427154541016, \"Time in s\": 15.393404 }, { \"step\": 644, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7822706065318819, \"MicroF1\": 0.7822706065318819, \"MacroF1\": 0.7858452362125997, \"Memory in Mb\": 0.9521217346191406, \"Time in s\": 17.2908 }, { \"step\": 690, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7851959361393324, \"MicroF1\": 0.7851959361393324, \"MacroF1\": 0.788215888108031, \"Memory in Mb\": 0.9515953063964844, \"Time in s\": 19.280843 }, { \"step\": 736, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7836734693877551, \"MicroF1\": 0.783673469387755, \"MacroF1\": 0.7873581098337732, \"Memory in Mb\": 0.9521245956420898, \"Time in s\": 21.362069 }, { \"step\": 782, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7861715749039693, \"MicroF1\": 0.7861715749039692, \"MacroF1\": 0.7892834149474556, \"Memory in Mb\": 0.9521360397338868, \"Time in s\": 23.534270000000003 }, { \"step\": 828, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7847642079806529, \"MicroF1\": 0.7847642079806529, \"MacroF1\": 0.7891292080670234, \"Memory in Mb\": 0.951629638671875, \"Time in s\": 25.799776 }, { \"step\": 874, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7892325315005727, \"MicroF1\": 0.7892325315005727, \"MacroF1\": 0.7922023317831084, \"Memory in Mb\": 0.9516172409057616, \"Time in s\": 28.155901 }, { \"step\": 920, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7889009793253536, \"MicroF1\": 0.7889009793253536, \"MacroF1\": 0.7905862723276574, \"Memory in Mb\": 0.9520702362060548, \"Time in s\": 30.602138 }, { \"step\": 966, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.78860103626943, \"MicroF1\": 0.78860103626943, \"MacroF1\": 0.7894031693051725, \"Memory in Mb\": 0.952082633972168, \"Time in s\": 33.138731 }, { \"step\": 1012, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7873392680514342, \"MicroF1\": 0.7873392680514342, \"MacroF1\": 0.7878835011583499, \"Memory in Mb\": 0.9515609741210938, \"Time in s\": 35.768379 }, { \"step\": 1058, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7899716177861873, \"MicroF1\": 0.7899716177861873, \"MacroF1\": 0.7897146415510686, \"Memory in Mb\": 0.9520339965820312, \"Time in s\": 38.488246 }, { \"step\": 1104, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7905711695376246, \"MicroF1\": 0.7905711695376246, \"MacroF1\": 0.7902707663283154, \"Memory in Mb\": 0.9521427154541016, \"Time in s\": 41.298010000000005 }, { \"step\": 1150, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7919930374238469, \"MicroF1\": 0.7919930374238469, \"MacroF1\": 0.7910217164829003, \"Memory in Mb\": 0.9516496658325196, \"Time in s\": 44.198117 }, { \"step\": 1196, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.793305439330544, \"MicroF1\": 0.793305439330544, \"MacroF1\": 0.7926565595792737, \"Memory in Mb\": 0.9516582489013672, \"Time in s\": 47.188338 }, { \"step\": 1242, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7921031426269137, \"MicroF1\": 0.7921031426269137, \"MacroF1\": 0.791644431462719, \"Memory in Mb\": 0.9522132873535156, \"Time in s\": 50.271512 }, { \"step\": 1288, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7964257964257965, \"MicroF1\": 0.7964257964257965, \"MacroF1\": 0.7949172523959339, \"Memory in Mb\": 0.952223777770996, \"Time in s\": 53.444669000000005 }, { \"step\": 1334, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.795198799699925, \"MicroF1\": 0.7951987996999249, \"MacroF1\": 0.7938516970082157, \"Memory in Mb\": 0.9516925811767578, \"Time in s\": 56.707980000000006 }, { \"step\": 1380, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7955039883973894, \"MicroF1\": 0.7955039883973894, \"MacroF1\": 0.794312731896104, \"Memory in Mb\": 0.9516897201538086, \"Time in s\": 60.06121 }, { \"step\": 1426, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.7971929824561403, \"MicroF1\": 0.7971929824561403, \"MacroF1\": 0.7952130436298935, \"Memory in Mb\": 0.9521360397338868, \"Time in s\": 63.507281000000006 }, { \"step\": 1472, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8008157715839564, \"MicroF1\": 0.8008157715839563, \"MacroF1\": 0.7971305683653547, \"Memory in Mb\": 0.9521236419677734, \"Time in s\": 67.043451 }, { \"step\": 1518, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8015820698747528, \"MicroF1\": 0.8015820698747528, \"MacroF1\": 0.7969787037511136, \"Memory in Mb\": 0.95166015625, \"Time in s\": 70.670162 }, { \"step\": 1564, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8016634676903391, \"MicroF1\": 0.8016634676903392, \"MacroF1\": 0.7975983332578384, \"Memory in Mb\": 0.9521465301513672, \"Time in s\": 74.387065 }, { \"step\": 1610, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8017402113113735, \"MicroF1\": 0.8017402113113735, \"MacroF1\": 0.7969541458804642, \"Memory in Mb\": 0.9521703720092772, \"Time in s\": 78.19725000000001 }, { \"step\": 1656, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8018126888217523, \"MicroF1\": 0.8018126888217523, \"MacroF1\": 0.7970318311622571, \"Memory in Mb\": 0.9516267776489258, \"Time in s\": 82.09764400000002 }, { \"step\": 1702, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8018812463256908, \"MicroF1\": 0.8018812463256908, \"MacroF1\": 0.7992301124377234, \"Memory in Mb\": 0.9516735076904296, \"Time in s\": 86.09102400000002 }, { \"step\": 1748, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8036634230108758, \"MicroF1\": 0.8036634230108759, \"MacroF1\": 0.8004815801809151, \"Memory in Mb\": 0.952157974243164, \"Time in s\": 90.17551900000002 }, { \"step\": 1794, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8042387060791969, \"MicroF1\": 0.8042387060791969, \"MacroF1\": 0.799787639242423, \"Memory in Mb\": 0.9521493911743164, \"Time in s\": 94.35067000000002 }, { \"step\": 1840, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8053289831430125, \"MicroF1\": 0.8053289831430125, \"MacroF1\": 0.8009597766649573, \"Memory in Mb\": 0.9516563415527344, \"Time in s\": 98.61893200000004 }, { \"step\": 1886, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.803183023872679, \"MicroF1\": 0.8031830238726789, \"MacroF1\": 0.799227837217116, \"Memory in Mb\": 0.9521894454956056, \"Time in s\": 102.97762100000004 }, { \"step\": 1932, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8032107716209218, \"MicroF1\": 0.8032107716209218, \"MacroF1\": 0.7985344176802335, \"Memory in Mb\": 0.9521827697753906, \"Time in s\": 107.42655600000003 }, { \"step\": 1978, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8042488619119879, \"MicroF1\": 0.8042488619119877, \"MacroF1\": 0.7992002826592023, \"Memory in Mb\": 0.9516563415527344, \"Time in s\": 111.96600400000004 }, { \"step\": 2024, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8057340583292141, \"MicroF1\": 0.8057340583292142, \"MacroF1\": 0.799488243695578, \"Memory in Mb\": 0.9516725540161132, \"Time in s\": 116.59842900000002 }, { \"step\": 2070, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.80521991300145, \"MicroF1\": 0.80521991300145, \"MacroF1\": 0.7990099218703556, \"Memory in Mb\": 0.9521942138671876, \"Time in s\": 121.31866500000002 }, { \"step\": 2116, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8056737588652483, \"MicroF1\": 0.8056737588652483, \"MacroF1\": 0.798658845250099, \"Memory in Mb\": 0.9521694183349608, \"Time in s\": 126.12612700000004 }, { \"step\": 2162, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8061082832022212, \"MicroF1\": 0.8061082832022212, \"MacroF1\": 0.7986518526284686, \"Memory in Mb\": 0.9516706466674804, \"Time in s\": 131.02082400000003 }, { \"step\": 2208, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8078840054372451, \"MicroF1\": 0.8078840054372451, \"MacroF1\": 0.7995103660963299, \"Memory in Mb\": 0.9521732330322266, \"Time in s\": 136.00538900000004 }, { \"step\": 2254, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8047048379937861, \"MicroF1\": 0.8047048379937861, \"MacroF1\": 0.7963417515999387, \"Memory in Mb\": 0.9521608352661132, \"Time in s\": 141.07727400000005 }, { \"step\": 2300, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.8033927794693345, \"MicroF1\": 0.8033927794693345, \"MacroF1\": 0.7949752803158223, \"Memory in Mb\": 0.9516582489013672, \"Time in s\": 146.23634000000004 }, { \"step\": 1056, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Insects\", \"Accuracy\": 0.6293838862559241, \"MicroF1\": 0.6293838862559241, \"MacroF1\": 0.5939725193500994, \"Memory in Mb\": 1.5110340118408203, \"Time in s\": 3.200552 }, { \"step\": 2112, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Insects\", \"Accuracy\": 0.62482235907153, \"MicroF1\": 0.62482235907153, \"MacroF1\": 0.5894737350922559, \"Memory in Mb\": 1.5110177993774414, \"Time in s\": 9.230037 }, { \"step\": 3168, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Insects\", \"Accuracy\": 0.6198294916324597, \"MicroF1\": 0.6198294916324597, \"MacroF1\": 0.5838888884930272, \"Memory in Mb\": 1.5110721588134766, \"Time in s\": 17.854201 }, { \"step\": 4224, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Insects\", \"Accuracy\": 0.6192280369405636, \"MicroF1\": 0.6192280369405636, \"MacroF1\": 0.5835519631382228, \"Memory in Mb\": 1.5110435485839844, \"Time in s\": 28.950406 }, { \"step\": 5280, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Insects\", \"Accuracy\": 0.6256866830839174, \"MicroF1\": 0.6256866830839174, \"MacroF1\": 0.5887468172490868, \"Memory in Mb\": 1.511063575744629, \"Time in s\": 42.489771000000005 }, { \"step\": 6336, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Insects\", \"Accuracy\": 0.6187845303867403, \"MicroF1\": 0.6187845303867403, \"MacroF1\": 0.5833486573822239, \"Memory in Mb\": 1.5110454559326172, \"Time in s\": 58.469284 }, { \"step\": 7392, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Insects\", \"Accuracy\": 0.6180489784873495, \"MicroF1\": 0.6180489784873495, \"MacroF1\": 0.5826198728106428, \"Memory in Mb\": 1.5110721588134766, \"Time in s\": 76.881409 }, { \"step\": 8448, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Insects\", \"Accuracy\": 0.619746655617379, \"MicroF1\": 0.619746655617379, \"MacroF1\": 0.5840081546383048, \"Memory in Mb\": 1.5110502243041992, \"Time in s\": 97.728436 }, { \"step\": 9504, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Insects\", \"Accuracy\": 0.6190676628433126, \"MicroF1\": 0.6190676628433126, \"MacroF1\": 0.5828637425505069, \"Memory in Mb\": 1.511042594909668, \"Time in s\": 121.006042 }, { \"step\": 10560, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Insects\", \"Accuracy\": 0.6198503646178616, \"MicroF1\": 0.6198503646178616, \"MacroF1\": 0.5836946750940745, \"Memory in Mb\": 1.5110759735107422, \"Time in s\": 146.711441 }, { \"step\": 11616, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Insects\", \"Accuracy\": 0.6175634954799828, \"MicroF1\": 0.6175634954799828, \"MacroF1\": 0.5822534545682404, \"Memory in Mb\": 1.511033058166504, \"Time in s\": 174.85184600000002 }, { \"step\": 12672, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Insects\", \"Accuracy\": 0.6204719438086971, \"MicroF1\": 0.6204719438086971, \"MacroF1\": 0.5879866433279776, \"Memory in Mb\": 1.5111761093139648, \"Time in s\": 205.425851 }, { \"step\": 13728, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Insects\", \"Accuracy\": 0.6369199388067313, \"MicroF1\": 0.6369199388067313, \"MacroF1\": 0.618745437324273, \"Memory in Mb\": 1.5112380981445312, \"Time in s\": 238.425571 }, { \"step\": 14784, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Insects\", \"Accuracy\": 0.630386254481499, \"MicroF1\": 0.630386254481499, \"MacroF1\": 0.6115259179282228, \"Memory in Mb\": 1.5110998153686523, \"Time in s\": 273.85181700000004 }, { \"step\": 15840, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Insects\", \"Accuracy\": 0.5992171222930741, \"MicroF1\": 0.5992171222930741, \"MacroF1\": 0.581747071745844, \"Memory in Mb\": 1.5110797882080078, \"Time in s\": 311.71395800000005 }, { \"step\": 16896, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Insects\", \"Accuracy\": 0.5783959751405742, \"MicroF1\": 0.5783959751405742, \"MacroF1\": 0.5619501594422388, \"Memory in Mb\": 1.511063575744629, \"Time in s\": 352.00364300000007 }, { \"step\": 17952, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Insects\", \"Accuracy\": 0.5631998217369506, \"MicroF1\": 0.5631998217369506, \"MacroF1\": 0.5464708450044057, \"Memory in Mb\": 1.511117935180664, \"Time in s\": 394.7217830000001 }, { \"step\": 19008, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Insects\", \"Accuracy\": 0.565528489503867, \"MicroF1\": 0.565528489503867, \"MacroF1\": 0.5447789723081985, \"Memory in Mb\": 1.5110950469970703, \"Time in s\": 439.87157300000007 }, { \"step\": 20064, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Insects\", \"Accuracy\": 0.5725464785924338, \"MicroF1\": 0.5725464785924338, \"MacroF1\": 0.5493312346450109, \"Memory in Mb\": 2.16702938079834, \"Time in s\": 487.436087 }, { \"step\": 21120, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Insects\", \"Accuracy\": 0.5819404327856432, \"MicroF1\": 0.5819404327856432, \"MacroF1\": 0.5575973426297249, \"Memory in Mb\": 2.167789459228516, \"Time in s\": 537.344767 }, { \"step\": 22176, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Insects\", \"Accuracy\": 0.5905298759864712, \"MicroF1\": 0.5905298759864712, \"MacroF1\": 0.5648531785235197, \"Memory in Mb\": 2.167774200439453, \"Time in s\": 589.565521 }, { \"step\": 23232, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Insects\", \"Accuracy\": 0.5995867590719297, \"MicroF1\": 0.5995867590719297, \"MacroF1\": 0.5728007753824246, \"Memory in Mb\": 2.167778968811035, \"Time in s\": 644.107989 }, { \"step\": 24288, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Insects\", \"Accuracy\": 0.6068678717009099, \"MicroF1\": 0.6068678717009099, \"MacroF1\": 0.578555560305262, \"Memory in Mb\": 2.16780948638916, \"Time in s\": 700.966552 }, { \"step\": 25344, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Insects\", \"Accuracy\": 0.6143313735548278, \"MicroF1\": 0.6143313735548278, \"MacroF1\": 0.5848116898462843, \"Memory in Mb\": 2.167755126953125, \"Time in s\": 760.153761 }, { \"step\": 26400, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Insects\", \"Accuracy\": 0.621084131974696, \"MicroF1\": 0.621084131974696, \"MacroF1\": 0.5900605973096019, \"Memory in Mb\": 2.1677980422973637, \"Time in s\": 821.662998 }, { \"step\": 27456, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Insects\", \"Accuracy\": 0.6266618102349298, \"MicroF1\": 0.6266618102349298, \"MacroF1\": 0.5936647802901621, \"Memory in Mb\": 2.167790412902832, \"Time in s\": 885.506266 }, { \"step\": 28512, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Insects\", \"Accuracy\": 0.6295114166462067, \"MicroF1\": 0.6295114166462067, \"MacroF1\": 0.5991480792709615, \"Memory in Mb\": 2.168045997619629, \"Time in s\": 951.65113 }, { \"step\": 29568, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Insects\", \"Accuracy\": 0.6294517536442655, \"MicroF1\": 0.6294517536442655, \"MacroF1\": 0.6037001563215106, \"Memory in Mb\": 2.1680641174316406, \"Time in s\": 1020.249901 }, { \"step\": 30624, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Insects\", \"Accuracy\": 0.6287104463964993, \"MicroF1\": 0.6287104463964993, \"MacroF1\": 0.6068237930795873, \"Memory in Mb\": 2.168071746826172, \"Time in s\": 1091.329507 }, { \"step\": 31680, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Insects\", \"Accuracy\": 0.6292496606584804, \"MicroF1\": 0.6292496606584804, \"MacroF1\": 0.6106666463743293, \"Memory in Mb\": 2.1680679321289062, \"Time in s\": 1164.886578 }, { \"step\": 32736, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Insects\", \"Accuracy\": 0.6302734076676341, \"MicroF1\": 0.6302734076676341, \"MacroF1\": 0.614251388937007, \"Memory in Mb\": 2.168027877807617, \"Time in s\": 1240.9182979999998 }, { \"step\": 33792, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Insects\", \"Accuracy\": 0.6266165547039152, \"MicroF1\": 0.6266165547039152, \"MacroF1\": 0.6112639299818544, \"Memory in Mb\": 2.1678638458251958, \"Time in s\": 1319.4287269999998 }, { \"step\": 34848, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Insects\", \"Accuracy\": 0.6216604011823113, \"MicroF1\": 0.6216604011823113, \"MacroF1\": 0.6060150865308916, \"Memory in Mb\": 2.1678390502929688, \"Time in s\": 1400.4187529999997 }, { \"step\": 35904, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Insects\", \"Accuracy\": 0.6181377600757597, \"MicroF1\": 0.6181377600757597, \"MacroF1\": 0.6018714875673907, \"Memory in Mb\": 2.167888641357422, \"Time in s\": 1483.8935999999997 }, { \"step\": 36960, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Insects\", \"Accuracy\": 0.6138153088557591, \"MicroF1\": 0.6138153088557591, \"MacroF1\": 0.5971057932031453, \"Memory in Mb\": 2.167864799499512, \"Time in s\": 1569.8367669999996 }, { \"step\": 38016, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Insects\", \"Accuracy\": 0.6116796001578324, \"MicroF1\": 0.6116796001578324, \"MacroF1\": 0.5945381289951768, \"Memory in Mb\": 2.709075927734375, \"Time in s\": 1658.2515679999997 }, { \"step\": 39072, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Insects\", \"Accuracy\": 0.6122187811932124, \"MicroF1\": 0.6122187811932124, \"MacroF1\": 0.5950787740952911, \"Memory in Mb\": 2.823759078979492, \"Time in s\": 1749.1539719999994 }, { \"step\": 40128, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Insects\", \"Accuracy\": 0.6125052956861963, \"MicroF1\": 0.6125052956861963, \"MacroF1\": 0.5964110573184415, \"Memory in Mb\": 2.823785781860352, \"Time in s\": 1842.4635979999996 }, { \"step\": 41184, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Insects\", \"Accuracy\": 0.6145254109705461, \"MicroF1\": 0.6145254109705461, \"MacroF1\": 0.5992770713855892, \"Memory in Mb\": 2.82379150390625, \"Time in s\": 1938.0560169999997 }, { \"step\": 42240, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Insects\", \"Accuracy\": 0.6163024692819432, \"MicroF1\": 0.6163024692819432, \"MacroF1\": 0.601670854132613, \"Memory in Mb\": 2.823772430419922, \"Time in s\": 2035.9261369999997 }, { \"step\": 43296, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Insects\", \"Accuracy\": 0.6181776186626631, \"MicroF1\": 0.6181776186626631, \"MacroF1\": 0.6041281005310094, \"Memory in Mb\": 2.8237924575805664, \"Time in s\": 2136.072454 }, { \"step\": 44352, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Insects\", \"Accuracy\": 0.6197605465491195, \"MicroF1\": 0.6197605465491195, \"MacroF1\": 0.6062005996937425, \"Memory in Mb\": 2.824528694152832, \"Time in s\": 2238.51149 }, { \"step\": 45408, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Insects\", \"Accuracy\": 0.6214019864778558, \"MicroF1\": 0.6214019864778558, \"MacroF1\": 0.607792464273323, \"Memory in Mb\": 2.824528694152832, \"Time in s\": 2343.241606 }, { \"step\": 46464, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Insects\", \"Accuracy\": 0.6233992639304393, \"MicroF1\": 0.6233992639304393, \"MacroF1\": 0.6097993182820672, \"Memory in Mb\": 2.824531555175781, \"Time in s\": 2450.26566 }, { \"step\": 47520, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Insects\", \"Accuracy\": 0.6260864075422463, \"MicroF1\": 0.6260864075422463, \"MacroF1\": 0.6129939002712749, \"Memory in Mb\": 2.8244552612304688, \"Time in s\": 2559.63137 }, { \"step\": 48576, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Insects\", \"Accuracy\": 0.6300154400411735, \"MicroF1\": 0.6300154400411735, \"MacroF1\": 0.6173873766747581, \"Memory in Mb\": 2.824479103088379, \"Time in s\": 2671.379304 }, { \"step\": 49632, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Insects\", \"Accuracy\": 0.6343011424311418, \"MicroF1\": 0.6343011424311418, \"MacroF1\": 0.621931196280001, \"Memory in Mb\": 2.8244781494140625, \"Time in s\": 2785.498386 }, { \"step\": 50688, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Insects\", \"Accuracy\": 0.638506914988064, \"MicroF1\": 0.638506914988064, \"MacroF1\": 0.6263145143911814, \"Memory in Mb\": 2.947113037109375, \"Time in s\": 2902.0058780000004 }, { \"step\": 51744, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Insects\", \"Accuracy\": 0.6434686817540537, \"MicroF1\": 0.6434686817540537, \"MacroF1\": 0.6313977027921706, \"Memory in Mb\": 3.184900283813477, \"Time in s\": 3020.8771950000005 }, { \"step\": 52800, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Insects\", \"Accuracy\": 0.6479289380480691, \"MicroF1\": 0.6479289380480691, \"MacroF1\": 0.635943324049664, \"Memory in Mb\": 3.3886165618896484, \"Time in s\": 3141.9869480000007 }, { \"step\": 408, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9828009828009828, \"MicroF1\": 0.9828009828009828, \"MacroF1\": 0.6067632850241546, \"Memory in Mb\": 0.6423864364624023, \"Time in s\": 0.703596 }, { \"step\": 816, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9546012269938652, \"MicroF1\": 0.9546012269938652, \"MacroF1\": 0.7993954329623859, \"Memory in Mb\": 0.8351936340332031, \"Time in s\": 2.165771 }, { \"step\": 1224, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9206868356500408, \"MicroF1\": 0.9206868356500408, \"MacroF1\": 0.9055597826779512, \"Memory in Mb\": 1.029007911682129, \"Time in s\": 4.467328 }, { \"step\": 1632, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9307173513182097, \"MicroF1\": 0.9307173513182097, \"MacroF1\": 0.917259757091744, \"Memory in Mb\": 1.2232952117919922, \"Time in s\": 7.6892 }, { \"step\": 2040, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9303580186365864, \"MicroF1\": 0.9303580186365864, \"MacroF1\": 0.919916287137026, \"Memory in Mb\": 1.428065299987793, \"Time in s\": 11.917356 }, { \"step\": 2448, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9060073559460564, \"MicroF1\": 0.9060073559460564, \"MacroF1\": 0.9093956340782632, \"Memory in Mb\": 1.6218795776367188, \"Time in s\": 17.211204 }, { \"step\": 2856, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9103327495621716, \"MicroF1\": 0.9103327495621716, \"MacroF1\": 0.8980697688452707, \"Memory in Mb\": 1.8146867752075195, \"Time in s\": 23.617224 }, { \"step\": 3264, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.904382470119522, \"MicroF1\": 0.904382470119522, \"MacroF1\": 0.888202704220525, \"Memory in Mb\": 2.0085010528564453, \"Time in s\": 31.150342 }, { \"step\": 3672, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8994824298556252, \"MicroF1\": 0.8994824298556252, \"MacroF1\": 0.8972334256598172, \"Memory in Mb\": 2.2018117904663086, \"Time in s\": 39.868796 }, { \"step\": 4080, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8945820053934788, \"MicroF1\": 0.8945820053934787, \"MacroF1\": 0.8851783489415491, \"Memory in Mb\": 2.420787811279297, \"Time in s\": 49.793690000000005 }, { \"step\": 4488, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8914642299977713, \"MicroF1\": 0.8914642299977713, \"MacroF1\": 0.898372373723482, \"Memory in Mb\": 2.6146020889282227, \"Time in s\": 61.011475 }, { \"step\": 4896, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8880490296220633, \"MicroF1\": 0.8880490296220633, \"MacroF1\": 0.8932697641963906, \"Memory in Mb\": 2.807912826538086, \"Time in s\": 73.572163 }, { \"step\": 5304, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.883085046200264, \"MicroF1\": 0.883085046200264, \"MacroF1\": 0.8680917053752625, \"Memory in Mb\": 3.000770568847656, \"Time in s\": 87.52603 }, { \"step\": 5712, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8746279110488531, \"MicroF1\": 0.8746279110488531, \"MacroF1\": 0.8792177397015432, \"Memory in Mb\": 3.194584846496582, \"Time in s\": 102.922365 }, { \"step\": 6120, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8695865337473443, \"MicroF1\": 0.8695865337473442, \"MacroF1\": 0.8546904737358852, \"Memory in Mb\": 3.387392044067383, \"Time in s\": 119.819987 }, { \"step\": 6528, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8579745671824728, \"MicroF1\": 0.8579745671824728, \"MacroF1\": 0.858067415232278, \"Memory in Mb\": 3.5812063217163086, \"Time in s\": 138.28125400000002 }, { \"step\": 6936, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8537851478010093, \"MicroF1\": 0.8537851478010093, \"MacroF1\": 0.8590096923865055, \"Memory in Mb\": 3.774517059326172, \"Time in s\": 158.37518500000002 }, { \"step\": 7344, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8594579871986926, \"MicroF1\": 0.8594579871986926, \"MacroF1\": 0.8620220702364139, \"Memory in Mb\": 3.9702539443969727, \"Time in s\": 180.164436 }, { \"step\": 7752, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8593729841310799, \"MicroF1\": 0.8593729841310799, \"MacroF1\": 0.8617576440335053, \"Memory in Mb\": 4.164068222045898, \"Time in s\": 203.71390800000003 }, { \"step\": 8160, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8601544306900355, \"MicroF1\": 0.8601544306900355, \"MacroF1\": 0.8605355806611993, \"Memory in Mb\": 4.357378959655762, \"Time in s\": 229.09093700000005 }, { \"step\": 8568, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8596941753239173, \"MicroF1\": 0.8596941753239173, \"MacroF1\": 0.8627767842417701, \"Memory in Mb\": 4.60028076171875, \"Time in s\": 256.36638600000003 }, { \"step\": 8976, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8599442896935933, \"MicroF1\": 0.8599442896935933, \"MacroF1\": 0.8629838037923419, \"Memory in Mb\": 4.794095039367676, \"Time in s\": 285.60935500000005 }, { \"step\": 9384, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8581477139507621, \"MicroF1\": 0.8581477139507621, \"MacroF1\": 0.8592031021693959, \"Memory in Mb\": 4.986902236938477, \"Time in s\": 316.88668100000007 }, { \"step\": 9792, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8539475028087019, \"MicroF1\": 0.8539475028087019, \"MacroF1\": 0.8546213426549989, \"Memory in Mb\": 5.180716514587402, \"Time in s\": 350.2631590000001 }, { \"step\": 10200, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8465535836846749, \"MicroF1\": 0.8465535836846749, \"MacroF1\": 0.8431270001478435, \"Memory in Mb\": 5.374000549316406, \"Time in s\": 385.8123490000001 }, { \"step\": 10608, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8300179126991609, \"MicroF1\": 0.8300179126991609, \"MacroF1\": 0.8240754775818138, \"Memory in Mb\": 5.566834449768066, \"Time in s\": 423.5940000000001 }, { \"step\": 11016, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8254198819791194, \"MicroF1\": 0.8254198819791194, \"MacroF1\": 0.8271925616445298, \"Memory in Mb\": 5.760648727416992, \"Time in s\": 463.6719130000001 }, { \"step\": 11424, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.820449969360063, \"MicroF1\": 0.820449969360063, \"MacroF1\": 0.8166393841205931, \"Memory in Mb\": 5.953959465026856, \"Time in s\": 506.1140620000001 }, { \"step\": 11832, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8169216465218494, \"MicroF1\": 0.8169216465218494, \"MacroF1\": 0.8172029683603622, \"Memory in Mb\": 6.146766662597656, \"Time in s\": 550.9879090000001 }, { \"step\": 12240, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8200016341204347, \"MicroF1\": 0.8200016341204347, \"MacroF1\": 0.8225884010623591, \"Memory in Mb\": 6.340580940246582, \"Time in s\": 598.362302 }, { \"step\": 12648, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8167154265833795, \"MicroF1\": 0.8167154265833795, \"MacroF1\": 0.8162987105601626, \"Memory in Mb\": 6.533388137817383, \"Time in s\": 648.30112 }, { \"step\": 13056, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8121792416698583, \"MicroF1\": 0.8121792416698584, \"MacroF1\": 0.8136075732214813, \"Memory in Mb\": 6.727202415466309, \"Time in s\": 700.874955 }, { \"step\": 13464, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8099234940206492, \"MicroF1\": 0.8099234940206492, \"MacroF1\": 0.8122480630127521, \"Memory in Mb\": 6.920539855957031, \"Time in s\": 756.151971 }, { \"step\": 13872, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.810539975488429, \"MicroF1\": 0.810539975488429, \"MacroF1\": 0.8134726777385565, \"Memory in Mb\": 7.113347053527832, \"Time in s\": 814.194174 }, { \"step\": 14280, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8103508649065061, \"MicroF1\": 0.810350864906506, \"MacroF1\": 0.8130549704062812, \"Memory in Mb\": 7.307161331176758, \"Time in s\": 875.070018 }, { \"step\": 14688, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8133042826989855, \"MicroF1\": 0.8133042826989855, \"MacroF1\": 0.8168484225511677, \"Memory in Mb\": 7.500472068786621, \"Time in s\": 938.856287 }, { \"step\": 15096, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8174229877442862, \"MicroF1\": 0.8174229877442862, \"MacroF1\": 0.8208616131428813, \"Memory in Mb\": 7.693279266357422, \"Time in s\": 1005.608239 }, { \"step\": 15504, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8175191898342257, \"MicroF1\": 0.8175191898342257, \"MacroF1\": 0.8200404227627133, \"Memory in Mb\": 7.887093544006348, \"Time in s\": 1075.3914499999998 }, { \"step\": 15912, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8100685060649865, \"MicroF1\": 0.8100685060649865, \"MacroF1\": 0.8105704783549956, \"Memory in Mb\": 8.079900741577148, \"Time in s\": 1148.2720969999998 }, { \"step\": 16320, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8058704577486365, \"MicroF1\": 0.8058704577486365, \"MacroF1\": 0.8082920647955453, \"Memory in Mb\": 8.273715019226074, \"Time in s\": 1224.3182599999998 }, { \"step\": 16728, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.8029533090213428, \"MicroF1\": 0.8029533090213428, \"MacroF1\": 0.8061756731743527, \"Memory in Mb\": 8.467025756835938, \"Time in s\": 1303.603477 }, { \"step\": 17136, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7992996790195507, \"MicroF1\": 0.7992996790195507, \"MacroF1\": 0.8021910628966759, \"Memory in Mb\": 8.7622652053833, \"Time in s\": 1386.189622 }, { \"step\": 17544, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7934218776720059, \"MicroF1\": 0.7934218776720059, \"MacroF1\": 0.7969041071406875, \"Memory in Mb\": 8.956079483032227, \"Time in s\": 1472.1458429999998 }, { \"step\": 17952, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7934933986964514, \"MicroF1\": 0.7934933986964514, \"MacroF1\": 0.7978100866424277, \"Memory in Mb\": 9.14939022064209, \"Time in s\": 1561.5342959999998 }, { \"step\": 18360, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7969933002886868, \"MicroF1\": 0.7969933002886866, \"MacroF1\": 0.8014382450066739, \"Memory in Mb\": 9.34219741821289, \"Time in s\": 1654.4248979999998 }, { \"step\": 18768, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7999147439654714, \"MicroF1\": 0.7999147439654714, \"MacroF1\": 0.8043799341405246, \"Memory in Mb\": 9.536011695861816, \"Time in s\": 1750.8812689999995 }, { \"step\": 19176, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7945241199478488, \"MicroF1\": 0.7945241199478488, \"MacroF1\": 0.7987282715896407, \"Memory in Mb\": 9.728818893432615, \"Time in s\": 1850.973451 }, { \"step\": 19584, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.797375274472757, \"MicroF1\": 0.797375274472757, \"MacroF1\": 0.8021140041360401, \"Memory in Mb\": 9.922633171081545, \"Time in s\": 1954.769568 }, { \"step\": 19992, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.7945075283877745, \"MicroF1\": 0.7945075283877745, \"MacroF1\": 0.7995475233856788, \"Memory in Mb\": 10.115943908691406, \"Time in s\": 2062.333925 }, { \"step\": 20400, \"track\": \"Multiclass classification\", \"model\": \"Voting\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.793274180106868, \"MicroF1\": 0.793274180106868, \"MacroF1\": 0.7984237858213096, \"Memory in Mb\": 10.308751106262209, \"Time in s\": 2173.749777 }, { \"step\": 46, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.1777777777777777, \"MicroF1\": 0.1777777777777777, \"MacroF1\": 0.1526026604973973, \"Memory in Mb\": 0.0013666152954101, \"Time in s\": 0.007048 }, { \"step\": 92, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.1318681318681318, \"MicroF1\": 0.1318681318681318, \"MacroF1\": 0.1213108980966124, \"Memory in Mb\": 0.0013637542724609, \"Time in s\": 0.018168 }, { \"step\": 138, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.1240875912408759, \"MicroF1\": 0.1240875912408759, \"MacroF1\": 0.1187445506554449, \"Memory in Mb\": 0.0013694763183593, \"Time in s\": 0.031716 }, { \"step\": 184, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.1256830601092896, \"MicroF1\": 0.1256830601092896, \"MacroF1\": 0.1226298342307158, \"Memory in Mb\": 0.0013647079467773, \"Time in s\": 0.047654 }, { \"step\": 230, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.1266375545851528, \"MicroF1\": 0.1266375545851528, \"MacroF1\": 0.1250385204120806, \"Memory in Mb\": 0.0013637542724609, \"Time in s\": 0.065983 }, { \"step\": 276, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.1272727272727272, \"MicroF1\": 0.1272727272727272, \"MacroF1\": 0.1242790791814499, \"Memory in Mb\": 0.0013666152954101, \"Time in s\": 0.086242 }, { \"step\": 322, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.1339563862928348, \"MicroF1\": 0.1339563862928348, \"MacroF1\": 0.1321003659624602, \"Memory in Mb\": 0.0013666152954101, \"Time in s\": 0.108232 }, { \"step\": 368, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.1389645776566757, \"MicroF1\": 0.1389645776566757, \"MacroF1\": 0.1374501146297296, \"Memory in Mb\": 0.0013675689697265, \"Time in s\": 0.131958 }, { \"step\": 414, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.1404358353510895, \"MicroF1\": 0.1404358353510895, \"MacroF1\": 0.1403581309694754, \"Memory in Mb\": 0.0013666152954101, \"Time in s\": 0.1574209999999999 }, { \"step\": 460, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.1459694989106753, \"MicroF1\": 0.1459694989106753, \"MacroF1\": 0.1456314871072794, \"Memory in Mb\": 0.0013656616210937, \"Time in s\": 0.1845859999999999 }, { \"step\": 506, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.1386138613861386, \"MicroF1\": 0.1386138613861386, \"MacroF1\": 0.1383381610231494, \"Memory in Mb\": 0.0013666152954101, \"Time in s\": 0.2134849999999999 }, { \"step\": 552, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.1397459165154265, \"MicroF1\": 0.1397459165154265, \"MacroF1\": 0.1393865249177789, \"Memory in Mb\": 0.0013666152954101, \"Time in s\": 0.24411 }, { \"step\": 598, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.1373534338358459, \"MicroF1\": 0.1373534338358459, \"MacroF1\": 0.1372798104345861, \"Memory in Mb\": 0.0013675689697265, \"Time in s\": 0.276463 }, { \"step\": 644, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.1399688958009331, \"MicroF1\": 0.1399688958009331, \"MacroF1\": 0.1401757170901796, \"Memory in Mb\": 0.0013666152954101, \"Time in s\": 0.310533 }, { \"step\": 690, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.1378809869375907, \"MicroF1\": 0.1378809869375907, \"MacroF1\": 0.1380151778455332, \"Memory in Mb\": 0.0013694763183593, \"Time in s\": 0.346313 }, { \"step\": 736, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.1401360544217687, \"MicroF1\": 0.1401360544217687, \"MacroF1\": 0.1403108892795828, \"Memory in Mb\": 0.0013675689697265, \"Time in s\": 0.38382 }, { \"step\": 782, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.1421254801536491, \"MicroF1\": 0.1421254801536491, \"MacroF1\": 0.1420930265541123, \"Memory in Mb\": 0.0013647079467773, \"Time in s\": 0.423095 }, { \"step\": 828, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.1426844014510278, \"MicroF1\": 0.1426844014510278, \"MacroF1\": 0.1422987455304691, \"Memory in Mb\": 0.0013666152954101, \"Time in s\": 0.464082 }, { \"step\": 874, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.138602520045819, \"MicroF1\": 0.138602520045819, \"MacroF1\": 0.1384535269459527, \"Memory in Mb\": 0.0013647079467773, \"Time in s\": 0.506788 }, { \"step\": 920, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.1349292709466811, \"MicroF1\": 0.1349292709466811, \"MacroF1\": 0.1348083913046733, \"Memory in Mb\": 0.0013666152954101, \"Time in s\": 0.551195 }, { \"step\": 966, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.1336787564766839, \"MicroF1\": 0.1336787564766839, \"MacroF1\": 0.1334917777444527, \"Memory in Mb\": 0.0013637542724609, \"Time in s\": 0.597302 }, { \"step\": 1012, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.1325420375865479, \"MicroF1\": 0.1325420375865479, \"MacroF1\": 0.1324936677659038, \"Memory in Mb\": 0.0013675689697265, \"Time in s\": 0.645131 }, { \"step\": 1058, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.1333964049195837, \"MicroF1\": 0.1333964049195837, \"MacroF1\": 0.1331834965440007, \"Memory in Mb\": 0.0013656616210937, \"Time in s\": 0.69466 }, { \"step\": 1104, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.1341795104261106, \"MicroF1\": 0.1341795104261106, \"MacroF1\": 0.1340282652950153, \"Memory in Mb\": 0.0013666152954101, \"Time in s\": 0.7459020000000001 }, { \"step\": 1150, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.134029590948651, \"MicroF1\": 0.134029590948651, \"MacroF1\": 0.1340639115051912, \"Memory in Mb\": 0.0013637542724609, \"Time in s\": 0.7988440000000001 }, { \"step\": 1196, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.1364016736401673, \"MicroF1\": 0.1364016736401673, \"MacroF1\": 0.1363948420172951, \"Memory in Mb\": 0.0013694763183593, \"Time in s\": 0.8534870000000001 }, { \"step\": 1242, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.1394037066881547, \"MicroF1\": 0.1394037066881547, \"MacroF1\": 0.1391977238389222, \"Memory in Mb\": 0.0013637542724609, \"Time in s\": 0.909824 }, { \"step\": 1288, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.1414141414141414, \"MicroF1\": 0.1414141414141414, \"MacroF1\": 0.1411871502321015, \"Memory in Mb\": 0.0013666152954101, \"Time in s\": 0.967868 }, { \"step\": 1334, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.1432858214553638, \"MicroF1\": 0.1432858214553638, \"MacroF1\": 0.1430255327815666, \"Memory in Mb\": 0.0013637542724609, \"Time in s\": 1.027625 }, { \"step\": 1380, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.1435823060188542, \"MicroF1\": 0.1435823060188542, \"MacroF1\": 0.1433209000486506, \"Memory in Mb\": 0.0013694763183593, \"Time in s\": 1.089079 }, { \"step\": 1426, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.1417543859649122, \"MicroF1\": 0.1417543859649122, \"MacroF1\": 0.1414546655929112, \"Memory in Mb\": 0.0013694763183593, \"Time in s\": 1.152253 }, { \"step\": 1472, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.1393609789259007, \"MicroF1\": 0.1393609789259007, \"MacroF1\": 0.1390762971394262, \"Memory in Mb\": 0.0013647079467773, \"Time in s\": 1.217139 }, { \"step\": 1518, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.1397495056031641, \"MicroF1\": 0.1397495056031641, \"MacroF1\": 0.1395136668589845, \"Memory in Mb\": 0.0013666152954101, \"Time in s\": 1.283725 }, { \"step\": 1564, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.1369161868202175, \"MicroF1\": 0.1369161868202175, \"MacroF1\": 0.1366417047439511, \"Memory in Mb\": 0.0013666152954101, \"Time in s\": 1.352073 }, { \"step\": 1610, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.1361093847110006, \"MicroF1\": 0.1361093847110006, \"MacroF1\": 0.1359768388190307, \"Memory in Mb\": 0.0013637542724609, \"Time in s\": 1.422125 }, { \"step\": 1656, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.1365558912386707, \"MicroF1\": 0.1365558912386707, \"MacroF1\": 0.1363322462377459, \"Memory in Mb\": 0.0013694763183593, \"Time in s\": 1.493896 }, { \"step\": 1702, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.1393298059964726, \"MicroF1\": 0.1393298059964726, \"MacroF1\": 0.1390129627439909, \"Memory in Mb\": 0.0013675689697265, \"Time in s\": 1.5673830000000002 }, { \"step\": 1748, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.1419576416714367, \"MicroF1\": 0.1419576416714367, \"MacroF1\": 0.1414719731272364, \"Memory in Mb\": 0.0013656616210937, \"Time in s\": 1.6425530000000002 }, { \"step\": 1794, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.1422197434467373, \"MicroF1\": 0.1422197434467373, \"MacroF1\": 0.1419410396611007, \"Memory in Mb\": 0.0013647079467773, \"Time in s\": 1.7194410000000002 }, { \"step\": 1840, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.1413811854268624, \"MicroF1\": 0.1413811854268624, \"MacroF1\": 0.1411432976659866, \"Memory in Mb\": 0.0013675689697265, \"Time in s\": 1.7980130000000003 }, { \"step\": 1886, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.140053050397878, \"MicroF1\": 0.140053050397878, \"MacroF1\": 0.1397325871382075, \"Memory in Mb\": 0.0013666152954101, \"Time in s\": 1.8782870000000005 }, { \"step\": 1932, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.1429311237700673, \"MicroF1\": 0.1429311237700673, \"MacroF1\": 0.1427522922982585, \"Memory in Mb\": 0.0013666152954101, \"Time in s\": 1.960245 }, { \"step\": 1978, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.1461810824481537, \"MicroF1\": 0.1461810824481537, \"MacroF1\": 0.1459715815160596, \"Memory in Mb\": 0.0013694763183593, \"Time in s\": 2.043928 }, { \"step\": 2024, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.1443400889767671, \"MicroF1\": 0.1443400889767671, \"MacroF1\": 0.1441662523776106, \"Memory in Mb\": 0.0013694763183593, \"Time in s\": 2.12929 }, { \"step\": 2070, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.1440309328177863, \"MicroF1\": 0.1440309328177863, \"MacroF1\": 0.1438554349712762, \"Memory in Mb\": 0.0013666152954101, \"Time in s\": 2.216361 }, { \"step\": 2116, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.1446808510638297, \"MicroF1\": 0.1446808510638297, \"MacroF1\": 0.1446036231777657, \"Memory in Mb\": 0.0013637542724609, \"Time in s\": 2.305147 }, { \"step\": 2162, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.1453031004164738, \"MicroF1\": 0.1453031004164738, \"MacroF1\": 0.1452046591382179, \"Memory in Mb\": 0.0013694763183593, \"Time in s\": 2.395629 }, { \"step\": 2208, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.1449932034435885, \"MicroF1\": 0.1449932034435885, \"MacroF1\": 0.1449110985199169, \"Memory in Mb\": 0.0013694763183593, \"Time in s\": 2.487817 }, { \"step\": 2254, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.1464713715046604, \"MicroF1\": 0.1464713715046604, \"MacroF1\": 0.146404255341296, \"Memory in Mb\": 0.0013666152954101, \"Time in s\": 2.5817110000000003 }, { \"step\": 2300, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"ImageSegments\", \"Accuracy\": 0.1478903871248368, \"MicroF1\": 0.1478903871248368, \"MacroF1\": 0.1478868852481029, \"Memory in Mb\": 0.0013675689697265, \"Time in s\": 2.6773210000000005 }, { \"step\": 1056, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Insects\", \"Accuracy\": 0.1582938388625592, \"MicroF1\": 0.1582938388625592, \"MacroF1\": 0.1376212379233521, \"Memory in Mb\": 0.0013856887817382, \"Time in s\": 0.055672 }, { \"step\": 2112, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Insects\", \"Accuracy\": 0.1657981999052581, \"MicroF1\": 0.1657981999052581, \"MacroF1\": 0.1511045106411843, \"Memory in Mb\": 0.0013856887817382, \"Time in s\": 0.157206 }, { \"step\": 3168, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Insects\", \"Accuracy\": 0.1701926113040732, \"MicroF1\": 0.1701926113040732, \"MacroF1\": 0.1568151235503963, \"Memory in Mb\": 0.0013885498046875, \"Time in s\": 0.304619 }, { \"step\": 4224, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Insects\", \"Accuracy\": 0.1659957376272791, \"MicroF1\": 0.1659957376272791, \"MacroF1\": 0.1525443315605067, \"Memory in Mb\": 0.0013856887817382, \"Time in s\": 0.4978269999999999 }, { \"step\": 5280, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Insects\", \"Accuracy\": 0.1708656942602765, \"MicroF1\": 0.1708656942602765, \"MacroF1\": 0.1567667911399359, \"Memory in Mb\": 0.0013837814331054, \"Time in s\": 0.736912 }, { \"step\": 6336, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Insects\", \"Accuracy\": 0.1737963693764798, \"MicroF1\": 0.1737963693764798, \"MacroF1\": 0.1613756819597299, \"Memory in Mb\": 0.0013837814331054, \"Time in s\": 1.021646 }, { \"step\": 7392, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Insects\", \"Accuracy\": 0.1752130970098769, \"MicroF1\": 0.1752130970098769, \"MacroF1\": 0.1618940790413477, \"Memory in Mb\": 0.0013837814331054, \"Time in s\": 1.351897 }, { \"step\": 8448, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Insects\", \"Accuracy\": 0.1772226826092103, \"MicroF1\": 0.1772226826092103, \"MacroF1\": 0.163740045170864, \"Memory in Mb\": 0.0013818740844726, \"Time in s\": 1.7276090000000002 }, { \"step\": 9504, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Insects\", \"Accuracy\": 0.1773124276544249, \"MicroF1\": 0.1773124276544249, \"MacroF1\": 0.1637492974453095, \"Memory in Mb\": 0.0013885498046875, \"Time in s\": 2.148941 }, { \"step\": 10560, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Insects\", \"Accuracy\": 0.1790889288758405, \"MicroF1\": 0.1790889288758405, \"MacroF1\": 0.1656421076747495, \"Memory in Mb\": 0.0013837814331054, \"Time in s\": 2.615755 }, { \"step\": 11616, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Insects\", \"Accuracy\": 0.1789926818768833, \"MicroF1\": 0.1789926818768833, \"MacroF1\": 0.1655925383533761, \"Memory in Mb\": 0.0013856887817382, \"Time in s\": 3.128148 }, { \"step\": 12672, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Insects\", \"Accuracy\": 0.1853050272275274, \"MicroF1\": 0.1853050272275274, \"MacroF1\": 0.182698099884098, \"Memory in Mb\": 0.0013866424560546, \"Time in s\": 3.685883 }, { \"step\": 13728, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Insects\", \"Accuracy\": 0.2479784366576819, \"MicroF1\": 0.2479784366576819, \"MacroF1\": 0.266039368455288, \"Memory in Mb\": 0.0013866424560546, \"Time in s\": 4.288806 }, { \"step\": 14784, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Insects\", \"Accuracy\": 0.2795778935263478, \"MicroF1\": 0.2795778935263478, \"MacroF1\": 0.2822974275171512, \"Memory in Mb\": 0.0013818740844726, \"Time in s\": 4.937051 }, { \"step\": 15840, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Insects\", \"Accuracy\": 0.2761537975882315, \"MicroF1\": 0.2761537975882315, \"MacroF1\": 0.2847375853365436, \"Memory in Mb\": 0.0013818740844726, \"Time in s\": 5.631085000000001 }, { \"step\": 16896, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Insects\", \"Accuracy\": 0.2723290914471737, \"MicroF1\": 0.2723290914471737, \"MacroF1\": 0.2859139704285301, \"Memory in Mb\": 0.0013856887817382, \"Time in s\": 6.370871000000001 }, { \"step\": 17952, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Insects\", \"Accuracy\": 0.2720739791655061, \"MicroF1\": 0.2720739791655061, \"MacroF1\": 0.2880143206503877, \"Memory in Mb\": 0.0013866424560546, \"Time in s\": 7.156724000000001 }, { \"step\": 19008, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Insects\", \"Accuracy\": 0.2825274898721523, \"MicroF1\": 0.2825274898721523, \"MacroF1\": 0.2877504429321086, \"Memory in Mb\": 0.0013866424560546, \"Time in s\": 7.988744000000001 }, { \"step\": 20064, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Insects\", \"Accuracy\": 0.2872451776902756, \"MicroF1\": 0.2872451776902756, \"MacroF1\": 0.2866739236661926, \"Memory in Mb\": 0.0013818740844726, \"Time in s\": 8.866412000000002 }, { \"step\": 21120, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Insects\", \"Accuracy\": 0.2830626450116009, \"MicroF1\": 0.2830626450116009, \"MacroF1\": 0.2816476602425525, \"Memory in Mb\": 0.0013837814331054, \"Time in s\": 9.789517000000002 }, { \"step\": 22176, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Insects\", \"Accuracy\": 0.2805411499436302, \"MicroF1\": 0.2805411499436302, \"MacroF1\": 0.2786296072528009, \"Memory in Mb\": 0.0013866424560546, \"Time in s\": 10.758806000000002 }, { \"step\": 23232, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Insects\", \"Accuracy\": 0.2797124531875511, \"MicroF1\": 0.2797124531875511, \"MacroF1\": 0.2771941975793341, \"Memory in Mb\": 0.0013856887817382, \"Time in s\": 11.774485000000002 }, { \"step\": 24288, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Insects\", \"Accuracy\": 0.2777205912628155, \"MicroF1\": 0.2777205912628155, \"MacroF1\": 0.2745878480946635, \"Memory in Mb\": 0.0013866424560546, \"Time in s\": 12.836246000000004 }, { \"step\": 25344, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Insects\", \"Accuracy\": 0.2756579726157124, \"MicroF1\": 0.2756579726157124, \"MacroF1\": 0.2723380305202896, \"Memory in Mb\": 0.0013818740844726, \"Time in s\": 13.944171000000004 }, { \"step\": 26400, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Insects\", \"Accuracy\": 0.2739497708246524, \"MicroF1\": 0.2739497708246524, \"MacroF1\": 0.2699690442569991, \"Memory in Mb\": 0.0013837814331054, \"Time in s\": 15.098386000000003 }, { \"step\": 27456, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Insects\", \"Accuracy\": 0.2718994718630486, \"MicroF1\": 0.2718994718630486, \"MacroF1\": 0.2671948532388624, \"Memory in Mb\": 0.0013866424560546, \"Time in s\": 16.299309000000004 }, { \"step\": 28512, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Insects\", \"Accuracy\": 0.2723860965942969, \"MicroF1\": 0.2723860965942969, \"MacroF1\": 0.2686965366571338, \"Memory in Mb\": 0.0013885498046875, \"Time in s\": 17.546694000000006 }, { \"step\": 29568, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Insects\", \"Accuracy\": 0.2738187844556431, \"MicroF1\": 0.2738187844556431, \"MacroF1\": 0.2720266804437783, \"Memory in Mb\": 0.0013885498046875, \"Time in s\": 18.840271000000005 }, { \"step\": 30624, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Insects\", \"Accuracy\": 0.2753812493877151, \"MicroF1\": 0.2753812493877151, \"MacroF1\": 0.2748698663810352, \"Memory in Mb\": 0.0013885498046875, \"Time in s\": 20.179938000000003 }, { \"step\": 31680, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Insects\", \"Accuracy\": 0.2780390795163989, \"MicroF1\": 0.2780390795163989, \"MacroF1\": 0.2784141751235631, \"Memory in Mb\": 0.0013856887817382, \"Time in s\": 21.565237000000003 }, { \"step\": 32736, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Insects\", \"Accuracy\": 0.279670077898274, \"MicroF1\": 0.279670077898274, \"MacroF1\": 0.2802192251245276, \"Memory in Mb\": 0.0013837814331054, \"Time in s\": 22.996618000000005 }, { \"step\": 33792, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Insects\", \"Accuracy\": 0.2808440117190968, \"MicroF1\": 0.2808440117190968, \"MacroF1\": 0.2811962745371706, \"Memory in Mb\": 0.0013856887817382, \"Time in s\": 24.474295000000005 }, { \"step\": 34848, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Insects\", \"Accuracy\": 0.2772405085086234, \"MicroF1\": 0.2772405085086234, \"MacroF1\": 0.2781905182864757, \"Memory in Mb\": 0.0013837814331054, \"Time in s\": 25.998543000000005 }, { \"step\": 35904, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Insects\", \"Accuracy\": 0.2739325404562293, \"MicroF1\": 0.2739325404562293, \"MacroF1\": 0.2754200456137155, \"Memory in Mb\": 0.0013856887817382, \"Time in s\": 27.569042000000007 }, { \"step\": 36960, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Insects\", \"Accuracy\": 0.271246516410076, \"MicroF1\": 0.271246516410076, \"MacroF1\": 0.273332837678202, \"Memory in Mb\": 0.0013818740844726, \"Time in s\": 29.18542500000001 }, { \"step\": 38016, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Insects\", \"Accuracy\": 0.2685518874128633, \"MicroF1\": 0.2685518874128633, \"MacroF1\": 0.2710722002891223, \"Memory in Mb\": 0.0013856887817382, \"Time in s\": 30.847968000000005 }, { \"step\": 39072, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Insects\", \"Accuracy\": 0.277034117376059, \"MicroF1\": 0.277034117376059, \"MacroF1\": 0.2770619820799866, \"Memory in Mb\": 0.0013866424560546, \"Time in s\": 32.556678000000005 }, { \"step\": 40128, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Insects\", \"Accuracy\": 0.2761731502479627, \"MicroF1\": 0.2761731502479627, \"MacroF1\": 0.2760769006623073, \"Memory in Mb\": 0.0013837814331054, \"Time in s\": 34.31145000000001 }, { \"step\": 41184, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Insects\", \"Accuracy\": 0.2756720005827647, \"MicroF1\": 0.2756720005827647, \"MacroF1\": 0.2754352632972117, \"Memory in Mb\": 0.0013837814331054, \"Time in s\": 36.11334600000001 }, { \"step\": 42240, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Insects\", \"Accuracy\": 0.2740121688486943, \"MicroF1\": 0.2740121688486943, \"MacroF1\": 0.2735946193588543, \"Memory in Mb\": 0.0013885498046875, \"Time in s\": 37.96222800000001 }, { \"step\": 43296, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Insects\", \"Accuracy\": 0.2738422450629403, \"MicroF1\": 0.2738422450629403, \"MacroF1\": 0.2731948869083578, \"Memory in Mb\": 0.0013856887817382, \"Time in s\": 39.85759000000001 }, { \"step\": 44352, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Insects\", \"Accuracy\": 0.2729588960790061, \"MicroF1\": 0.2729588960790061, \"MacroF1\": 0.2720911653869048, \"Memory in Mb\": 0.0013866424560546, \"Time in s\": 41.79925400000001 }, { \"step\": 45408, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Insects\", \"Accuracy\": 0.2720505648908758, \"MicroF1\": 0.2720505648908758, \"MacroF1\": 0.2708084959373003, \"Memory in Mb\": 0.0013866424560546, \"Time in s\": 43.79182700000001 }, { \"step\": 46464, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Insects\", \"Accuracy\": 0.271377224888621, \"MicroF1\": 0.271377224888621, \"MacroF1\": 0.2698631410415436, \"Memory in Mb\": 0.0013837814331054, \"Time in s\": 45.834688000000014 }, { \"step\": 47520, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Insects\", \"Accuracy\": 0.2723542162082535, \"MicroF1\": 0.2723542162082535, \"MacroF1\": 0.2717062798322285, \"Memory in Mb\": 0.0013837814331054, \"Time in s\": 47.92837500000002 }, { \"step\": 48576, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Insects\", \"Accuracy\": 0.2741327843540916, \"MicroF1\": 0.2741327843540916, \"MacroF1\": 0.2744946340974243, \"Memory in Mb\": 0.0013818740844726, \"Time in s\": 50.07231500000002 }, { \"step\": 49632, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Insects\", \"Accuracy\": 0.2753520984868328, \"MicroF1\": 0.2753520984868328, \"MacroF1\": 0.2765036876430403, \"Memory in Mb\": 0.0013818740844726, \"Time in s\": 52.26665400000002 }, { \"step\": 50688, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Insects\", \"Accuracy\": 0.2768362696549411, \"MicroF1\": 0.2768362696549411, \"MacroF1\": 0.2786344091273496, \"Memory in Mb\": 0.0013837814331054, \"Time in s\": 54.51115200000002 }, { \"step\": 51744, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Insects\", \"Accuracy\": 0.2782791875229499, \"MicroF1\": 0.2782791875229499, \"MacroF1\": 0.2805971515128955, \"Memory in Mb\": 0.0013885498046875, \"Time in s\": 56.805577000000014 }, { \"step\": 52800, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Insects\", \"Accuracy\": 0.2891153241538665, \"MicroF1\": 0.2891153241538665, \"MacroF1\": 0.2892953202729756, \"Memory in Mb\": 0.0013866424560546, \"Time in s\": 59.150289000000015 }, { \"step\": 408, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9975429975429976, \"MicroF1\": 0.9975429975429976, \"MacroF1\": 0.966040884438882, \"Memory in Mb\": 0.0006122589111328, \"Time in s\": 0.026957 }, { \"step\": 816, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9975460122699388, \"MicroF1\": 0.9975460122699388, \"MacroF1\": 0.9879967903427672, \"Memory in Mb\": 0.0006628036499023, \"Time in s\": 0.073338 }, { \"step\": 1224, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9975470155355682, \"MicroF1\": 0.9975470155355682, \"MacroF1\": 0.9931179599499376, \"Memory in Mb\": 0.0007133483886718, \"Time in s\": 0.138405 }, { \"step\": 1632, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9975475168608215, \"MicroF1\": 0.9975475168608215, \"MacroF1\": 0.9950750839342832, \"Memory in Mb\": 0.0012521743774414, \"Time in s\": 0.2220349999999999 }, { \"step\": 2040, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9975478175576264, \"MicroF1\": 0.9975478175576264, \"MacroF1\": 0.9960150346160548, \"Memory in Mb\": 0.0013027191162109, \"Time in s\": 0.3242069999999999 }, { \"step\": 2448, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9975480179812016, \"MicroF1\": 0.9975480179812016, \"MacroF1\": 0.9965317313935652, \"Memory in Mb\": 0.0013532638549804, \"Time in s\": 0.4452569999999999 }, { \"step\": 2856, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9975481611208408, \"MicroF1\": 0.9975481611208408, \"MacroF1\": 0.996842428316928, \"Memory in Mb\": 0.00140380859375, \"Time in s\": 0.58488 }, { \"step\": 3264, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9975482684646032, \"MicroF1\": 0.9975482684646032, \"MacroF1\": 0.9970416021996, \"Memory in Mb\": 0.0014543533325195, \"Time in s\": 0.7430509999999999 }, { \"step\": 3672, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9975483519476982, \"MicroF1\": 0.9975483519476982, \"MacroF1\": 0.9971755428551424, \"Memory in Mb\": 0.001504898071289, \"Time in s\": 0.9196609999999998 }, { \"step\": 4080, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9975484187300808, \"MicroF1\": 0.9975484187300808, \"MacroF1\": 0.9972690115789392, \"Memory in Mb\": 0.0015554428100585, \"Time in s\": 1.1148029999999998 }, { \"step\": 4488, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9975484733675062, \"MicroF1\": 0.9975484733675062, \"MacroF1\": 0.9973361791525124, \"Memory in Mb\": 0.0016059875488281, \"Time in s\": 1.3284669999999998 }, { \"step\": 4896, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9975485188968336, \"MicroF1\": 0.9975485188968336, \"MacroF1\": 0.9973856025730918, \"Memory in Mb\": 0.0016565322875976, \"Time in s\": 1.56082 }, { \"step\": 5304, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.997548557420328, \"MicroF1\": 0.997548557420328, \"MacroF1\": 0.997422679833574, \"Memory in Mb\": 0.0017070770263671, \"Time in s\": 1.81175 }, { \"step\": 5712, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9975485904395028, \"MicroF1\": 0.9975485904395028, \"MacroF1\": 0.99745094204078, \"Memory in Mb\": 0.0017576217651367, \"Time in s\": 2.0815 }, { \"step\": 6120, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9975486190554012, \"MicroF1\": 0.9975486190554012, \"MacroF1\": 0.9974727709453766, \"Memory in Mb\": 0.0018081665039062, \"Time in s\": 2.369724 }, { \"step\": 6528, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9975486440937644, \"MicroF1\": 0.9975486440937644, \"MacroF1\": 0.997489815700999, \"Memory in Mb\": 0.0018587112426757, \"Time in s\": 2.6764910000000004 }, { \"step\": 6936, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.997548666186013, \"MicroF1\": 0.997548666186013, \"MacroF1\": 0.9975032443691146, \"Memory in Mb\": 0.0019092559814453, \"Time in s\": 3.0019130000000005 }, { \"step\": 7344, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.997548685823233, \"MicroF1\": 0.997548685823233, \"MacroF1\": 0.9975139007887864, \"Memory in Mb\": 0.0034246444702148, \"Time in s\": 3.3461420000000004 }, { \"step\": 7752, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9975487033931104, \"MicroF1\": 0.9975487033931104, \"MacroF1\": 0.9975224052755712, \"Memory in Mb\": 0.0034751892089843, \"Time in s\": 3.708917000000001 }, { \"step\": 8160, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.997548719205785, \"MicroF1\": 0.997548719205785, \"MacroF1\": 0.9975292209193422, \"Memory in Mb\": 0.0035257339477539, \"Time in s\": 4.090185000000001 }, { \"step\": 8568, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9975487335123148, \"MicroF1\": 0.9975487335123148, \"MacroF1\": 0.9975346982235256, \"Memory in Mb\": 0.0035762786865234, \"Time in s\": 4.489997000000001 }, { \"step\": 8976, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.997548746518106, \"MicroF1\": 0.997548746518106, \"MacroF1\": 0.9975391057693664, \"Memory in Mb\": 0.0036268234252929, \"Time in s\": 4.908346000000001 }, { \"step\": 9384, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.997548758392838, \"MicroF1\": 0.997548758392838, \"MacroF1\": 0.997542651662671, \"Memory in Mb\": 0.0036773681640625, \"Time in s\": 5.3453800000000005 }, { \"step\": 9792, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9975487692779084, \"MicroF1\": 0.9975487692779084, \"MacroF1\": 0.9975454987794794, \"Memory in Mb\": 0.003727912902832, \"Time in s\": 5.8012950000000005 }, { \"step\": 10200, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9975487792920874, \"MicroF1\": 0.9975487792920874, \"MacroF1\": 0.9975477757646256, \"Memory in Mb\": 0.0037784576416015, \"Time in s\": 6.275930000000001 }, { \"step\": 10608, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9975487885358726, \"MicroF1\": 0.9975487885358726, \"MacroF1\": 0.9975495850737114, \"Memory in Mb\": 0.003829002380371, \"Time in s\": 6.769232000000001 }, { \"step\": 11016, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9975487970948708, \"MicroF1\": 0.9975487970948708, \"MacroF1\": 0.997551008926056, \"Memory in Mb\": 0.0038795471191406, \"Time in s\": 7.281090000000001 }, { \"step\": 11424, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9975488050424582, \"MicroF1\": 0.9975488050424582, \"MacroF1\": 0.997552113761348, \"Memory in Mb\": 0.0039300918579101, \"Time in s\": 7.811594 }, { \"step\": 11832, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.99754881244189, \"MicroF1\": 0.99754881244189, \"MacroF1\": 0.9975529536110198, \"Memory in Mb\": 0.0039806365966796, \"Time in s\": 8.360849 }, { \"step\": 12240, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.997548819347986, \"MicroF1\": 0.997548819347986, \"MacroF1\": 0.9975535726732964, \"Memory in Mb\": 0.0040311813354492, \"Time in s\": 8.928801 }, { \"step\": 12648, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.997548825808492, \"MicroF1\": 0.997548825808492, \"MacroF1\": 0.9975540072976318, \"Memory in Mb\": 0.0040817260742187, \"Time in s\": 9.515298 }, { \"step\": 13056, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9975488318651856, \"MicroF1\": 0.9975488318651856, \"MacroF1\": 0.997554287526727, \"Memory in Mb\": 0.0041322708129882, \"Time in s\": 10.120335 }, { \"step\": 13464, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9975488375547796, \"MicroF1\": 0.9975488375547796, \"MacroF1\": 0.9975544383040468, \"Memory in Mb\": 0.0041828155517578, \"Time in s\": 10.744063 }, { \"step\": 13872, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9975488429096676, \"MicroF1\": 0.9975488429096676, \"MacroF1\": 0.9975544804262364, \"Memory in Mb\": 0.0042333602905273, \"Time in s\": 11.386615999999998 }, { \"step\": 14280, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9975488479585404, \"MicroF1\": 0.9975488479585404, \"MacroF1\": 0.9975544312994103, \"Memory in Mb\": 0.0042839050292968, \"Time in s\": 12.048032999999998 }, { \"step\": 14688, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9975488527269012, \"MicroF1\": 0.9975488527269012, \"MacroF1\": 0.997554305543504, \"Memory in Mb\": 0.0043344497680664, \"Time in s\": 12.728291999999998 }, { \"step\": 15096, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.997548857237496, \"MicroF1\": 0.997548857237496, \"MacroF1\": 0.9975541154780816, \"Memory in Mb\": 0.0043849945068359, \"Time in s\": 13.427186999999998 }, { \"step\": 15504, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9975488615106752, \"MicroF1\": 0.9975488615106752, \"MacroF1\": 0.9975538715150368, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 14.144781999999998 }, { \"step\": 15912, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9975488655647036, \"MicroF1\": 0.9975488655647036, \"MacroF1\": 0.997553582477696, \"Memory in Mb\": 0.004486083984375, \"Time in s\": 14.881121999999998 }, { \"step\": 16320, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9975488694160182, \"MicroF1\": 0.9975488694160182, \"MacroF1\": 0.997553255861403, \"Memory in Mb\": 0.0045366287231445, \"Time in s\": 15.636483999999998 }, { \"step\": 16728, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9975488730794524, \"MicroF1\": 0.9975488730794524, \"MacroF1\": 0.997552898047314, \"Memory in Mb\": 0.004587173461914, \"Time in s\": 16.410486 }, { \"step\": 17136, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9975488765684272, \"MicroF1\": 0.9975488765684272, \"MacroF1\": 0.997552514478575, \"Memory in Mb\": 0.0046377182006835, \"Time in s\": 17.203509999999998 }, { \"step\": 17544, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9975488798951148, \"MicroF1\": 0.9975488798951148, \"MacroF1\": 0.997552109806108, \"Memory in Mb\": 0.0046882629394531, \"Time in s\": 18.015261 }, { \"step\": 17952, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.997548883070581, \"MicroF1\": 0.997548883070581, \"MacroF1\": 0.997551688009728, \"Memory in Mb\": 0.0047388076782226, \"Time in s\": 18.845896 }, { \"step\": 18360, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9975488861049076, \"MicroF1\": 0.9975488861049076, \"MacroF1\": 0.9975512524991372, \"Memory in Mb\": 0.0047893524169921, \"Time in s\": 19.695493 }, { \"step\": 18768, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9975488890073, \"MicroF1\": 0.9975488890073, \"MacroF1\": 0.9975508061984416, \"Memory in Mb\": 0.0048398971557617, \"Time in s\": 20.563922 }, { \"step\": 19176, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.99754889178618, \"MicroF1\": 0.99754889178618, \"MacroF1\": 0.9975503516171184, \"Memory in Mb\": 0.0048904418945312, \"Time in s\": 21.451134 }, { \"step\": 19584, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9975488944492672, \"MicroF1\": 0.9975488944492672, \"MacroF1\": 0.997549890909789, \"Memory in Mb\": 0.0049409866333007, \"Time in s\": 22.357399 }, { \"step\": 19992, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9975488970036516, \"MicroF1\": 0.9975488970036516, \"MacroF1\": 0.9975494259267256, \"Memory in Mb\": 0.0049915313720703, \"Time in s\": 23.282656 }, { \"step\": 20400, \"track\": \"Multiclass classification\", \"model\": \"[baseline] Last Class\", \"dataset\": \"Keystroke\", \"Accuracy\": 0.9975488994558556, \"MicroF1\": 0.9975488994558556, \"MacroF1\": 0.9975489582566448, \"Memory in Mb\": 0.0050420761108398, \"Time in s\": 24.227046 } ] }, \"params\": [ { \"name\": \"models\", \"select\": { \"type\": \"point\", \"fields\": [ \"model\" ] }, \"bind\": \"legend\" }, { \"name\": \"Dataset\", \"value\": \"ImageSegments\", \"bind\": { \"input\": \"select\", \"options\": [ \"ImageSegments\", \"Insects\", \"Keystroke\" ] } }, { \"name\": \"grid\", \"select\": \"interval\", \"bind\": \"scales\" } ], \"transform\": [ { \"filter\": { \"field\": \"dataset\", \"equal\": { \"expr\": \"Dataset\" } } } ], \"repeat\": { \"row\": [ \"Accuracy\", \"MicroF1\", \"MacroF1\", \"Memory in Mb\", \"Time in s\" ] }, \"spec\": { \"width\": \"container\", \"mark\": \"line\", \"encoding\": { \"x\": { \"field\": \"step\", \"type\": \"quantitative\", \"axis\": { \"titleFontSize\": 18, \"labelFontSize\": 18, \"title\": \"Instance\" } }, \"y\": { \"field\": { \"repeat\": \"row\" }, \"type\": \"quantitative\", \"axis\": { \"titleFontSize\": 18, \"labelFontSize\": 18 } }, \"color\": { \"field\": \"model\", \"type\": \"ordinal\", \"scale\": { \"scheme\": \"category20b\" }, \"title\": \"Models\", \"legend\": { \"titleFontSize\": 18, \"labelFontSize\": 18, \"labelLimit\": 500 } }, \"opacity\": { \"condition\": { \"param\": \"models\", \"value\": 1 }, \"value\": 0.2 } } } }

    "},{"location":"benchmarks/Multiclass%20classification/#datasets","title":"Datasets","text":"ImageSegments

    Image segments classification.

    This dataset contains features that describe image segments into 7 classes: brickface, sky, foliage, cement, window, path, and grass.

    Name  ImageSegments                                              \nTask  Multi-class classification\n

    Samples 2,310 Features 18 Sparse False Path /home/kulbach/projects/river/river/datasets/segment.csv.zip

    Insects

    Insects dataset.

    This dataset has different variants, which are:

    • abrupt_balanced
    • abrupt_imbalanced
    • gradual_balanced
    • gradual_imbalanced
    • incremental-abrupt_balanced
    • incremental-abrupt_imbalanced
    • incremental-reoccurring_balanced
    • incremental-reoccurring_imbalanced
    • incremental_balanced
    • incremental_imbalanced
    • out-of-control

    The number of samples and the difficulty change from one variant to another. The number of classes is always the same (6), except for the last variant (24).

      Name  Insects                                                                                 \n  Task  Multi-class classification\n

    Samples 52,848 Features 33 Classes 6 Sparse False Path /home/kulbach/river_data/Insects/INSECTS-abrupt_balanced_norm.arff URL http://sites.labic.icmc.usp.br/vsouza/repository/creme/INSECTS-abrupt_balanced_norm.arff Size 15.66 MB Downloaded True Variant abrupt_balanced

    Keystroke

    CMU keystroke dataset.

    Users are tasked to type in a password. The task is to determine which user is typing in the password.

    The only difference with the original dataset is that the \"sessionIndex\" and \"rep\" attributes have been dropped.

      Name  Keystroke                                                    \n  Task  Multi-class classification\n

    Samples 20,400 Features 31 Sparse False Path /home/kulbach/river_data/Keystroke/DSL-StrongPasswordData.csv URL http://www.cs.cmu.edu/~keystroke/DSL-StrongPasswordData.csv Size 4.45 MB Downloaded True

    "},{"location":"benchmarks/Multiclass%20classification/#parameters","title":"Parameters","text":"
    variant\n    Indicates which variant of the dataset to load.\n
    "},{"location":"benchmarks/Multiclass%20classification/#models","title":"Models","text":"Naive Bayes

    GaussianNB ()

    Hoeffding Tree

    HoeffdingTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n)

    Hoeffding Adaptive Tree

    HoeffdingAdaptiveTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  bootstrap_sampling=True\n  drift_window_threshold=300\n  drift_detector=ADWIN (\n    delta=0.002\n    clock=32\n    max_buckets=5\n    min_window_length=5\n    grace_period=10\n  )\n  switch_significance=0.05\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n  seed=42\n)

    Adaptive Random Forest

    []

    Streaming Random Patches

    SRPClassifier (\n  model=HoeffdingTreeClassifier (\n    grace_period=50\n    max_depth=inf\n    split_criterion=\"info_gain\"\n    delta=0.01\n    tau=0.05\n    leaf_prediction=\"nba\"\n    nb_threshold=0\n    nominal_attributes=None\n    splitter=GaussianSplitter (\n      n_splits=10\n    )\n    binary_split=False\n    max_size=100.\n    memory_estimate_period=1000000\n    stop_mem_management=False\n    remove_poor_attrs=False\n    merit_preprune=True\n  )\n  n_models=10\n  subspace_size=0.6\n  training_method=\"patches\"\n  lam=6\n  drift_detector=ADWIN (\n    delta=1e-05\n    clock=32\n    max_buckets=5\n    min_window_length=5\n    grace_period=10\n  )\n  warning_detector=ADWIN (\n    delta=0.0001\n    clock=32\n    max_buckets=5\n    min_window_length=5\n    grace_period=10\n  )\n  disable_detector=\"off\"\n  disable_weighted_vote=False\n  seed=None\n  metric=Accuracy (\n    cm=ConfusionMatrix (\n      classes=[]\n    )\n  )\n)

    k-Nearest Neighbors

    Pipeline (\n  StandardScaler (\n    with_std=True\n  ),\n  KNNClassifier (\n    n_neighbors=5\n    window_size=100\n    min_distance_keep=0.\n    weighted=True\n    cleanup_every=0\n    distance_func=functools.partial(, p=2)\n    softmax=False\n  )\n)\n

    \nADWIN Bagging\n

    [HoeffdingTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n), HoeffdingTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n), HoeffdingTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n), HoeffdingTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n), HoeffdingTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n), HoeffdingTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n), HoeffdingTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n), HoeffdingTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n), HoeffdingTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n), HoeffdingTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n)]

    \n

    \nAdaBoost\n

    [HoeffdingTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n), HoeffdingTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n), HoeffdingTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n), HoeffdingTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n), HoeffdingTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n), HoeffdingTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n), HoeffdingTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n), HoeffdingTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n), HoeffdingTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n), HoeffdingTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n)]

    \n

    \nBagging\n

    [HoeffdingAdaptiveTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  bootstrap_sampling=False\n  drift_window_threshold=300\n  drift_detector=ADWIN (\n    delta=0.002\n    clock=32\n    max_buckets=5\n    min_window_length=5\n    grace_period=10\n  )\n  switch_significance=0.05\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n  seed=None\n), HoeffdingAdaptiveTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  bootstrap_sampling=False\n  drift_window_threshold=300\n  drift_detector=ADWIN (\n    delta=0.002\n    clock=32\n    max_buckets=5\n    min_window_length=5\n    grace_period=10\n  )\n  switch_significance=0.05\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n  seed=None\n), HoeffdingAdaptiveTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  bootstrap_sampling=False\n  drift_window_threshold=300\n  drift_detector=ADWIN (\n    delta=0.002\n    clock=32\n    max_buckets=5\n    min_window_length=5\n    grace_period=10\n  )\n  switch_significance=0.05\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n  seed=None\n), HoeffdingAdaptiveTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  bootstrap_sampling=False\n  drift_window_threshold=300\n  drift_detector=ADWIN (\n    delta=0.002\n    clock=32\n    max_buckets=5\n    min_window_length=5\n    grace_period=10\n  )\n  switch_significance=0.05\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n  seed=None\n), HoeffdingAdaptiveTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  bootstrap_sampling=False\n  drift_window_threshold=300\n  drift_detector=ADWIN (\n    delta=0.002\n    clock=32\n    max_buckets=5\n    min_window_length=5\n    grace_period=10\n  )\n  switch_significance=0.05\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n  seed=None\n), HoeffdingAdaptiveTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  bootstrap_sampling=False\n  drift_window_threshold=300\n  drift_detector=ADWIN (\n    delta=0.002\n    clock=32\n    max_buckets=5\n    min_window_length=5\n    grace_period=10\n  )\n  switch_significance=0.05\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n  seed=None\n), HoeffdingAdaptiveTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  bootstrap_sampling=False\n  drift_window_threshold=300\n  drift_detector=ADWIN (\n    delta=0.002\n    clock=32\n    max_buckets=5\n    min_window_length=5\n    grace_period=10\n  )\n  switch_significance=0.05\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n  seed=None\n), HoeffdingAdaptiveTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  bootstrap_sampling=False\n  drift_window_threshold=300\n  drift_detector=ADWIN (\n    delta=0.002\n    clock=32\n    max_buckets=5\n    min_window_length=5\n    grace_period=10\n  )\n  switch_significance=0.05\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n  seed=None\n), HoeffdingAdaptiveTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  bootstrap_sampling=False\n  drift_window_threshold=300\n  drift_detector=ADWIN (\n    delta=0.002\n    clock=32\n    max_buckets=5\n    min_window_length=5\n    grace_period=10\n  )\n  switch_significance=0.05\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n  seed=None\n), HoeffdingAdaptiveTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  bootstrap_sampling=False\n  drift_window_threshold=300\n  drift_detector=ADWIN (\n    delta=0.002\n    clock=32\n    max_buckets=5\n    min_window_length=5\n    grace_period=10\n  )\n  switch_significance=0.05\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n  seed=None\n)]

    \n

    \nLeveraging Bagging\n

    [HoeffdingTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n), HoeffdingTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n), HoeffdingTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n), HoeffdingTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n), HoeffdingTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n), HoeffdingTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n), HoeffdingTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n), HoeffdingTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n), HoeffdingTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n), HoeffdingTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n)]

    \n

    \nStacking\n

    [Pipeline (\n  StandardScaler (\n    with_std=True\n  ),\n  SoftmaxRegression (\n    optimizer=SGD (\n      lr=Constant (\n        learning_rate=0.01\n      )\n    )\n    loss=CrossEntropy (\n      class_weight={}\n    )\n    l2=0\n  )\n), GaussianNB (), HoeffdingTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n), Pipeline (\n  StandardScaler (\n    with_std=True\n  ),\n  KNNClassifier (\n    n_neighbors=5\n    window_size=100\n    min_distance_keep=0.\n    weighted=True\n    cleanup_every=0\n    distance_func=functools.partial(, p=2)\n    softmax=False\n  )\n)]\n

    \nVoting\n

    VotingClassifier (\n  models=[Pipeline (\n  StandardScaler (\n    with_std=True\n  ),\n  SoftmaxRegression (\n    optimizer=SGD (\n      lr=Constant (\n        learning_rate=0.01\n      )\n    )\n    loss=CrossEntropy (\n      class_weight={}\n    )\n    l2=0\n  )\n), GaussianNB (), HoeffdingTreeClassifier (\n  grace_period=200\n  max_depth=inf\n  split_criterion=\"info_gain\"\n  delta=1e-07\n  tau=0.05\n  leaf_prediction=\"nba\"\n  nb_threshold=0\n  nominal_attributes=None\n  splitter=GaussianSplitter (\n    n_splits=10\n  )\n  binary_split=False\n  max_size=100.\n  memory_estimate_period=1000000\n  stop_mem_management=False\n  remove_poor_attrs=False\n  merit_preprune=True\n), Pipeline (\n  StandardScaler (\n    with_std=True\n  ),\n  KNNClassifier (\n    n_neighbors=5\n    window_size=100\n    min_distance_keep=0.\n    weighted=True\n    cleanup_every=0\n    distance_func=functools.partial(, p=2)\n    softmax=False\n  )\n)]\n  use_probabilities=True\n)\n

    \n[baseline] Last Class\n

    NoChangeClassifier ()

    \n

    "},{"location":"benchmarks/Multiclass%20classification/#environment","title":"Environment","text":"
    Python implementation: CPython\nPython version       : 3.11.5\nIPython version      : 8.15.0\n\nriver       : 0.19.0\nnumpy       : 1.26.0\nscikit-learn: 1.3.0\npandas      : 2.1.0\nscipy       : 1.11.2\n\nCompiler    : GCC 11.4.0\nOS          : Linux\nRelease     : 6.2.0-1011-azure\nMachine     : x86_64\nProcessor   : x86_64\nCPU cores   : 2\nArchitecture: 64bit\n
    "},{"location":"benchmarks/Regression/","title":"Regression","text":"TableChart Model Dataset MAE RMSE R2 Memory in Mb Time in s Adaptive Model Rules ChickWeights 24.0925 37.1369 0.719675 0.0469542 5.03028 Adaptive Model Rules TrumpApproval 1.40204 2.43644 -1.02749 0.114429 5.76779 Adaptive Random Forest ChickWeights 25.9648 40.6034 0.6649 1.18613 32.8286 Adaptive Random Forest TrumpApproval 0.801133 2.11603 -0.529292 1.28362 54.6942 Bagging ChickWeights 23.0595 36.5862 0.727928 0.643575 19.8658 Bagging TrumpApproval 0.904415 2.23483 -0.705833 1.33501 42.6904 Exponentially Weighted Average ChickWeights 120.54 139.462 -2.95334 0.183387 12.3806 Exponentially Weighted Average TrumpApproval 40.7536 40.7895 -567.257 0.316642 30.0432 Hoeffding Adaptive Tree ChickWeights 23.2557 37.579 0.712962 0.0946112 5.75782 Hoeffding Adaptive Tree TrumpApproval 0.910675 2.2343 -0.705019 0.138225 6.69917 Hoeffding Tree ChickWeights 23.0842 36.6638 0.726773 0.0440512 4.02236 Hoeffding Tree TrumpApproval 0.949745 2.24815 -0.726224 0.148639 9.13796 Linear Regression ChickWeights 23.8353 37.0287 0.721307 0.00421047 2.10647 Linear Regression TrumpApproval 1.3486 4.12828 -4.82084 0.00497341 3.6327 Linear Regression with l1 regularization ChickWeights 23.868 37.0773 0.720575 0.00444126 1.13401 Linear Regression with l1 regularization TrumpApproval 1.21585 4.06821 -4.65269 0.0052042 2.06156 Linear Regression with l2 regularization ChickWeights 25.5204 38.6553 0.696284 0.00423336 1.11618 Linear Regression with l2 regularization TrumpApproval 1.99918 4.40997 -5.64232 0.0049963 1.98704 Passive-Aggressive Regressor, mode 1 ChickWeights 24.2339 37.5576 0.713289 0.00345898 1.33977 Passive-Aggressive Regressor, mode 1 TrumpApproval 4.90639 6.6656 -14.1749 0.00443554 2.18425 Passive-Aggressive Regressor, mode 2 ChickWeights 99.5681 141.4 -3.06396 0.00345898 1.99155 Passive-Aggressive Regressor, mode 2 TrumpApproval 31.1288 34.4257 -403.774 0.00443554 2.19594 River MLP ChickWeights 49.5783 77.9026 -0.233541 0.0123129 18.4913 River MLP TrumpApproval 1.59139 5.147 -8.04808 0.0133505 30.7873 Stochastic Gradient Tree ChickWeights 68.1198 79.5649 -0.286746 1.12059 9.48214 Stochastic Gradient Tree TrumpApproval 9.43874 17.9468 -109.008 3.08244 24.6638 Streaming Random Patches ChickWeights 23.5162 38.2072 0.703285 0.558536 50.7829 Streaming Random Patches TrumpApproval 0.640561 1.97134 -0.32731 1.05934 101.873 [baseline] Mean predictor ChickWeights 49.4914 70.2457 -0.00297194 0.000490189 0.529127 [baseline] Mean predictor TrumpApproval 1.56814 2.20374 -0.658701 0.000490189 0.8379 k-Nearest Neighbors ChickWeights 22.9043 34.7945 0.753924 0.0461216 4.35991 k-Nearest Neighbors TrumpApproval 0.493975 1.50807 0.223232 0.0660038 9.48546

    Try reloading the page if something is buggy

    { \"$schema\": \"https://vega.github.io/schema/vega-lite/v5.json\", \"data\": { \"values\": [ { \"step\": 11, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"ChickWeights\", \"MAE\": 32.364564569910335, \"RMSE\": 32.97872020361878, \"R2\": -1398.9905780691188, \"Memory in Mb\": 0.0041303634643554, \"Time in s\": 0.003051 }, { \"step\": 22, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"ChickWeights\", \"MAE\": 22.977933628105813, \"RMSE\": 25.38362603225939, \"R2\": -681.3960169454474, \"Memory in Mb\": 0.0041303634643554, \"Time in s\": 0.0083 }, { \"step\": 33, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"ChickWeights\", \"MAE\": 16.216942910977988, \"RMSE\": 20.82463881551788, \"R2\": -300.18738429635704, \"Memory in Mb\": 0.0041303634643554, \"Time in s\": 0.014943 }, { \"step\": 44, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"ChickWeights\", \"MAE\": 12.450847696587651, \"RMSE\": 18.04722398474583, \"R2\": -255.42929659358052, \"Memory in Mb\": 0.0041303634643554, \"Time in s\": 0.023007 }, { \"step\": 55, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"ChickWeights\", \"MAE\": 11.8883407017882, \"RMSE\": 18.699705575978975, \"R2\": -67.26141846932143, \"Memory in Mb\": 0.0041303634643554, \"Time in s\": 0.032524 }, { \"step\": 66, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"ChickWeights\", \"MAE\": 11.481406471145082, \"RMSE\": 17.562600262725994, \"R2\": -24.95549321582236, \"Memory in Mb\": 0.0041303634643554, \"Time in s\": 0.043462 }, { \"step\": 77, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"ChickWeights\", \"MAE\": 10.781108661026623, \"RMSE\": 16.493572764286025, \"R2\": -14.34295652053857, \"Memory in Mb\": 0.0041303634643554, \"Time in s\": 0.055844 }, { \"step\": 88, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"ChickWeights\", \"MAE\": 9.717703273355898, \"RMSE\": 15.46585610846664, \"R2\": -11.231382330967593, \"Memory in Mb\": 0.0041303634643554, \"Time in s\": 0.069657 }, { \"step\": 99, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"ChickWeights\", \"MAE\": 8.826979124235404, \"RMSE\": 14.601347274688614, \"R2\": -8.118374730562003, \"Memory in Mb\": 0.0041303634643554, \"Time in s\": 0.084902 }, { \"step\": 110, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"ChickWeights\", \"MAE\": 8.34720326953035, \"RMSE\": 13.931298318002057, \"R2\": -4.796525071049026, \"Memory in Mb\": 0.0041303634643554, \"Time in s\": 0.101965 }, { \"step\": 121, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"ChickWeights\", \"MAE\": 8.037877082888846, \"RMSE\": 13.41080638289134, \"R2\": -3.136902586442697, \"Memory in Mb\": 0.0041303634643554, \"Time in s\": 0.120474 }, { \"step\": 132, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"ChickWeights\", \"MAE\": 7.656433924417384, \"RMSE\": 12.898278689410905, \"R2\": -2.1275837609073576, \"Memory in Mb\": 0.0041303634643554, \"Time in s\": 0.140407 }, { \"step\": 143, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"ChickWeights\", \"MAE\": 7.307942088554156, \"RMSE\": 12.437137940834392, \"R2\": -1.3553409371460328, \"Memory in Mb\": 0.0041303634643554, \"Time in s\": 0.161783 }, { \"step\": 154, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"ChickWeights\", \"MAE\": 7.037714222368383, \"RMSE\": 12.042115748312936, \"R2\": -0.8765797740197239, \"Memory in Mb\": 0.0041303634643554, \"Time in s\": 0.184594 }, { \"step\": 165, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"ChickWeights\", \"MAE\": 7.129031762481882, \"RMSE\": 11.913307711374014, \"R2\": -0.4764258010150459, \"Memory in Mb\": 0.0041303634643554, \"Time in s\": 0.208855 }, { \"step\": 176, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"ChickWeights\", \"MAE\": 7.184514897799321, \"RMSE\": 11.77636646389892, \"R2\": -0.1632359842489146, \"Memory in Mb\": 0.0041303634643554, \"Time in s\": 0.23453 }, { \"step\": 187, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"ChickWeights\", \"MAE\": 7.115123062484693, \"RMSE\": 11.572523949602724, \"R2\": 0.0802677819230903, \"Memory in Mb\": 0.0041303634643554, \"Time in s\": 0.261536 }, { \"step\": 198, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"ChickWeights\", \"MAE\": 7.006474290899419, \"RMSE\": 11.366304822809298, \"R2\": 0.2942397460202306, \"Memory in Mb\": 0.0041303634643554, \"Time in s\": 0.289991 }, { \"step\": 209, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"ChickWeights\", \"MAE\": 7.129008217053805, \"RMSE\": 11.440940870898142, \"R2\": 0.4105268603250641, \"Memory in Mb\": 0.0041303634643554, \"Time in s\": 0.319876 }, { \"step\": 220, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"ChickWeights\", \"MAE\": 7.62421928608864, \"RMSE\": 12.045617517752785, \"R2\": 0.4279229250366536, \"Memory in Mb\": 0.0041303634643554, \"Time in s\": 0.351191 }, { \"step\": 231, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"ChickWeights\", \"MAE\": 7.729844807682863, \"RMSE\": 12.068921171072352, \"R2\": 0.5087708730950672, \"Memory in Mb\": 0.0041303634643554, \"Time in s\": 0.383941 }, { \"step\": 242, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"ChickWeights\", \"MAE\": 7.873703200353374, \"RMSE\": 12.23342730557754, \"R2\": 0.5938836560989084, \"Memory in Mb\": 0.0041303634643554, \"Time in s\": 0.4181169999999999 }, { \"step\": 253, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"ChickWeights\", \"MAE\": 7.894340397324045, \"RMSE\": 12.218207932001455, \"R2\": 0.6481596201604607, \"Memory in Mb\": 0.0041303634643554, \"Time in s\": 0.4537249999999999 }, { \"step\": 264, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"ChickWeights\", \"MAE\": 8.479294890454037, \"RMSE\": 13.126132095776898, \"R2\": 0.6289858847198173, \"Memory in Mb\": 0.0042104721069335, \"Time in s\": 0.4907699999999999 }, { \"step\": 275, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"ChickWeights\", \"MAE\": 8.914096443559163, \"RMSE\": 13.971715828104037, \"R2\": 0.6301108693673194, \"Memory in Mb\": 0.0042104721069335, \"Time in s\": 0.529253 }, { \"step\": 286, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"ChickWeights\", \"MAE\": 9.123963222012373, \"RMSE\": 14.305597328390173, \"R2\": 0.6641373552910966, \"Memory in Mb\": 0.0042104721069335, \"Time in s\": 0.569186 }, { \"step\": 297, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"ChickWeights\", \"MAE\": 9.083791720841957, \"RMSE\": 14.24670706195338, \"R2\": 0.7111028643570333, \"Memory in Mb\": 0.0042104721069335, \"Time in s\": 0.610599 }, { \"step\": 308, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"ChickWeights\", \"MAE\": 9.589205789771716, \"RMSE\": 14.956254664628933, \"R2\": 0.716435491318643, \"Memory in Mb\": 0.0042104721069335, \"Time in s\": 0.653398 }, { \"step\": 319, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"ChickWeights\", \"MAE\": 10.6480954226875, \"RMSE\": 17.335456678654833, \"R2\": 0.654294294845865, \"Memory in Mb\": 0.0042104721069335, \"Time in s\": 0.697581 }, { \"step\": 330, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"ChickWeights\", \"MAE\": 11.061417554605155, \"RMSE\": 17.89416376383148, \"R2\": 0.6847745473646168, \"Memory in Mb\": 0.0042104721069335, \"Time in s\": 0.743241 }, { \"step\": 341, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"ChickWeights\", \"MAE\": 11.240970714084437, \"RMSE\": 17.96809449059472, \"R2\": 0.7153933828209167, \"Memory in Mb\": 0.0042104721069335, \"Time in s\": 0.7902760000000001 }, { \"step\": 352, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"ChickWeights\", \"MAE\": 11.393007763406809, \"RMSE\": 18.07679096199219, \"R2\": 0.7381404893604309, \"Memory in Mb\": 0.0042104721069335, \"Time in s\": 0.8386990000000001 }, { \"step\": 363, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"ChickWeights\", \"MAE\": 12.251680566634816, \"RMSE\": 19.3891577397662, \"R2\": 0.7074601934283691, \"Memory in Mb\": 0.0042104721069335, \"Time in s\": 0.8885620000000001 }, { \"step\": 374, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"ChickWeights\", \"MAE\": 12.75183556333798, \"RMSE\": 20.473547618215623, \"R2\": 0.7001526953506461, \"Memory in Mb\": 0.0042104721069335, \"Time in s\": 0.93984 }, { \"step\": 385, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"ChickWeights\", \"MAE\": 13.120977867369843, \"RMSE\": 21.06680160073653, \"R2\": 0.7191139726408686, \"Memory in Mb\": 0.0042104721069335, \"Time in s\": 0.992561 }, { \"step\": 396, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"ChickWeights\", \"MAE\": 13.243904830041805, \"RMSE\": 21.04850718241465, \"R2\": 0.7385587649833809, \"Memory in Mb\": 0.0042104721069335, \"Time in s\": 1.046701 }, { \"step\": 407, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"ChickWeights\", \"MAE\": 14.114140715648691, \"RMSE\": 22.50284796635845, \"R2\": 0.7222415724766076, \"Memory in Mb\": 0.0042104721069335, \"Time in s\": 1.102244 }, { \"step\": 418, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"ChickWeights\", \"MAE\": 14.877176135328032, \"RMSE\": 23.91912678123439, \"R2\": 0.7054123344015044, \"Memory in Mb\": 0.0042104721069335, \"Time in s\": 1.159239 }, { \"step\": 429, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"ChickWeights\", \"MAE\": 15.420211528669606, \"RMSE\": 24.826921056607983, \"R2\": 0.71797392321154, \"Memory in Mb\": 0.0042104721069335, \"Time in s\": 1.2176479999999998 }, { \"step\": 440, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"ChickWeights\", \"MAE\": 15.588380621816588, \"RMSE\": 24.89946727120753, \"R2\": 0.7364018543257719, \"Memory in Mb\": 0.0042104721069335, \"Time in s\": 1.277456 }, { \"step\": 451, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"ChickWeights\", \"MAE\": 16.102138383202178, \"RMSE\": 25.5012042182244, \"R2\": 0.73526123694725, \"Memory in Mb\": 0.0042104721069335, \"Time in s\": 1.338723 }, { \"step\": 462, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"ChickWeights\", \"MAE\": 17.19666374070754, \"RMSE\": 27.602141070792264, \"R2\": 0.7086782414730581, \"Memory in Mb\": 0.0042104721069335, \"Time in s\": 1.4014049999999998 }, { \"step\": 473, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"ChickWeights\", \"MAE\": 17.97145397683086, \"RMSE\": 28.90516312323801, \"R2\": 0.7179019616037816, \"Memory in Mb\": 0.0042104721069335, \"Time in s\": 1.4654839999999998 }, { \"step\": 484, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"ChickWeights\", \"MAE\": 18.29792978215437, \"RMSE\": 29.184271659667463, \"R2\": 0.728186505594778, \"Memory in Mb\": 0.0042104721069335, \"Time in s\": 1.531041 }, { \"step\": 495, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"ChickWeights\", \"MAE\": 18.74962346435519, \"RMSE\": 29.70957841185893, \"R2\": 0.7350194821983969, \"Memory in Mb\": 0.0042104721069335, \"Time in s\": 1.597992 }, { \"step\": 506, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"ChickWeights\", \"MAE\": 19.63242676502778, \"RMSE\": 31.145843529930996, \"R2\": 0.717244444772776, \"Memory in Mb\": 0.0042104721069335, \"Time in s\": 1.666343 }, { \"step\": 517, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"ChickWeights\", \"MAE\": 20.352340621675207, \"RMSE\": 32.13418072986834, \"R2\": 0.7159654376794024, \"Memory in Mb\": 0.0042104721069335, \"Time in s\": 1.736162 }, { \"step\": 528, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"ChickWeights\", \"MAE\": 21.13777985928475, \"RMSE\": 33.324214910779105, \"R2\": 0.7253645356808669, \"Memory in Mb\": 0.0042104721069335, \"Time in s\": 1.807369 }, { \"step\": 539, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"ChickWeights\", \"MAE\": 21.30552841968683, \"RMSE\": 33.32197733500869, \"R2\": 0.7367405849979859, \"Memory in Mb\": 0.0042104721069335, \"Time in s\": 1.880014 }, { \"step\": 550, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"ChickWeights\", \"MAE\": 22.28842093535661, \"RMSE\": 34.93191609140748, \"R2\": 0.7196038878445231, \"Memory in Mb\": 0.0042104721069335, \"Time in s\": 1.954091 }, { \"step\": 561, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"ChickWeights\", \"MAE\": 22.98790385213596, \"RMSE\": 35.84862508987654, \"R2\": 0.7176082524890277, \"Memory in Mb\": 0.0042104721069335, \"Time in s\": 2.029559 }, { \"step\": 572, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"ChickWeights\", \"MAE\": 23.835304128485923, \"RMSE\": 37.028707868367256, \"R2\": 0.7213067136137974, \"Memory in Mb\": 0.0042104721069335, \"Time in s\": 2.106474 }, { \"step\": 20, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"TrumpApproval\", \"MAE\": 23.20376765378399, \"RMSE\": 26.086393589237737, \"R2\": -1595.1823041445402, \"Memory in Mb\": 0.0048131942749023, \"Time in s\": 0.006002 }, { \"step\": 40, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"TrumpApproval\", \"MAE\": 14.037845165976735, \"RMSE\": 19.010285970857197, \"R2\": -144.292318589198, \"Memory in Mb\": 0.0048131942749023, \"Time in s\": 0.015135 }, { \"step\": 60, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"TrumpApproval\", \"MAE\": 11.507970876430278, \"RMSE\": 16.25440462414082, \"R2\": -142.20309184289852, \"Memory in Mb\": 0.0048131942749023, \"Time in s\": 0.026737 }, { \"step\": 80, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"TrumpApproval\", \"MAE\": 9.557907896578074, \"RMSE\": 14.248619966820993, \"R2\": -109.38231735939875, \"Memory in Mb\": 0.0048131942749023, \"Time in s\": 0.04106 }, { \"step\": 100, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"TrumpApproval\", \"MAE\": 8.00119890237694, \"RMSE\": 12.784639272000032, \"R2\": -54.757167221204185, \"Memory in Mb\": 0.0048131942749023, \"Time in s\": 0.058228 }, { \"step\": 120, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"TrumpApproval\", \"MAE\": 6.9642139928158, \"RMSE\": 11.706689332840265, \"R2\": -38.660847151370525, \"Memory in Mb\": 0.0048131942749023, \"Time in s\": 0.078171 }, { \"step\": 140, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"TrumpApproval\", \"MAE\": 6.158017211594616, \"RMSE\": 10.855926078196225, \"R2\": -34.244125473921144, \"Memory in Mb\": 0.0048131942749023, \"Time in s\": 0.100823 }, { \"step\": 160, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"TrumpApproval\", \"MAE\": 5.477712824897756, \"RMSE\": 10.159717829752896, \"R2\": -26.22221848793916, \"Memory in Mb\": 0.0048131942749023, \"Time in s\": 0.126265 }, { \"step\": 180, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"TrumpApproval\", \"MAE\": 5.024407839120485, \"RMSE\": 9.597258286357787, \"R2\": -20.33422740878964, \"Memory in Mb\": 0.0048131942749023, \"Time in s\": 0.154466 }, { \"step\": 200, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"TrumpApproval\", \"MAE\": 4.585662202332267, \"RMSE\": 9.108145701438088, \"R2\": -18.272229834363905, \"Memory in Mb\": 0.0048131942749023, \"Time in s\": 0.185533 }, { \"step\": 220, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"TrumpApproval\", \"MAE\": 4.26060909213789, \"RMSE\": 8.692057179629266, \"R2\": -17.933082537971817, \"Memory in Mb\": 0.0048131942749023, \"Time in s\": 0.219335 }, { \"step\": 240, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"TrumpApproval\", \"MAE\": 3.9717866152166015, \"RMSE\": 8.326248244302885, \"R2\": -16.503720237291063, \"Memory in Mb\": 0.0048131942749023, \"Time in s\": 0.255864 }, { \"step\": 260, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"TrumpApproval\", \"MAE\": 3.713770572650404, \"RMSE\": 8.00217875002923, \"R2\": -15.385557669694744, \"Memory in Mb\": 0.0049734115600585, \"Time in s\": 0.2951479999999999 }, { \"step\": 280, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"TrumpApproval\", \"MAE\": 3.519033617242816, \"RMSE\": 7.718418241237259, \"R2\": -14.960370233444367, \"Memory in Mb\": 0.0049734115600585, \"Time in s\": 0.3371889999999999 }, { \"step\": 300, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"TrumpApproval\", \"MAE\": 3.3459125962612686, \"RMSE\": 7.4642342223287805, \"R2\": -13.679347912302555, \"Memory in Mb\": 0.0049734115600585, \"Time in s\": 0.382005 }, { \"step\": 320, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"TrumpApproval\", \"MAE\": 3.2142611116185447, \"RMSE\": 7.238080925352425, \"R2\": -13.486769876410833, \"Memory in Mb\": 0.0049734115600585, \"Time in s\": 0.429554 }, { \"step\": 340, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"TrumpApproval\", \"MAE\": 3.0579195410777067, \"RMSE\": 7.023783188903098, \"R2\": -13.41588572736102, \"Memory in Mb\": 0.0049734115600585, \"Time in s\": 0.479826 }, { \"step\": 360, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.945682332324278, \"RMSE\": 6.834004497968132, \"R2\": -12.75946181118139, \"Memory in Mb\": 0.0049734115600585, \"Time in s\": 0.532818 }, { \"step\": 380, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.834623050495076, \"RMSE\": 6.655478314361804, \"R2\": -12.501407484394289, \"Memory in Mb\": 0.0049734115600585, \"Time in s\": 0.588545 }, { \"step\": 400, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.750580257859316, \"RMSE\": 6.492898516140861, \"R2\": -12.2130072039923, \"Memory in Mb\": 0.0049734115600585, \"Time in s\": 0.647075 }, { \"step\": 420, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.6430441633874877, \"RMSE\": 6.337629923196658, \"R2\": -12.005235448499764, \"Memory in Mb\": 0.0049734115600585, \"Time in s\": 0.708317 }, { \"step\": 440, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.55209658354648, \"RMSE\": 6.194505226365406, \"R2\": -11.19965041203295, \"Memory in Mb\": 0.0049734115600585, \"Time in s\": 0.7723099999999999 }, { \"step\": 460, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.456089002686458, \"RMSE\": 6.059000096335146, \"R2\": -10.068304379054997, \"Memory in Mb\": 0.0049734115600585, \"Time in s\": 0.8388639999999999 }, { \"step\": 480, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.366054305985814, \"RMSE\": 5.93188196569365, \"R2\": -9.364683952709628, \"Memory in Mb\": 0.0049734115600585, \"Time in s\": 0.907942 }, { \"step\": 500, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.2878529832492878, \"RMSE\": 5.812913918334153, \"R2\": -8.7442989221461, \"Memory in Mb\": 0.0049734115600585, \"Time in s\": 0.979676 }, { \"step\": 520, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.2263077878678064, \"RMSE\": 5.701877933590318, \"R2\": -8.391958889485423, \"Memory in Mb\": 0.0049734115600585, \"Time in s\": 1.054282 }, { \"step\": 540, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.159275760054389, \"RMSE\": 5.596308740310266, \"R2\": -8.01424271274666, \"Memory in Mb\": 0.0049734115600585, \"Time in s\": 1.13157 }, { \"step\": 560, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.121286703179314, \"RMSE\": 5.500929056902255, \"R2\": -7.917124287747498, \"Memory in Mb\": 0.0049734115600585, \"Time in s\": 1.211538 }, { \"step\": 580, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.058130800812745, \"RMSE\": 5.4056516105350205, \"R2\": -7.823875188349783, \"Memory in Mb\": 0.0049734115600585, \"Time in s\": 1.294212 }, { \"step\": 600, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.010772210317983, \"RMSE\": 5.316298216027806, \"R2\": -7.440165070210115, \"Memory in Mb\": 0.0049734115600585, \"Time in s\": 1.379521 }, { \"step\": 620, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.9712240547218984, \"RMSE\": 5.232121316388296, \"R2\": -7.05039272692726, \"Memory in Mb\": 0.0049734115600585, \"Time in s\": 1.467481 }, { \"step\": 640, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.918906116628168, \"RMSE\": 5.150155111235484, \"R2\": -6.654334565315682, \"Memory in Mb\": 0.0049734115600585, \"Time in s\": 1.558 }, { \"step\": 660, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.87787066207154, \"RMSE\": 5.072802363597129, \"R2\": -6.372735616761029, \"Memory in Mb\": 0.0049734115600585, \"Time in s\": 1.651027 }, { \"step\": 680, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.8268195769848845, \"RMSE\": 4.997758130035794, \"R2\": -6.2693000939147145, \"Memory in Mb\": 0.0049734115600585, \"Time in s\": 1.746732 }, { \"step\": 700, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.786028440025988, \"RMSE\": 4.9266674679383895, \"R2\": -6.249499636750513, \"Memory in Mb\": 0.0049734115600585, \"Time in s\": 1.84509 }, { \"step\": 720, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.7407901109286728, \"RMSE\": 4.857855812572241, \"R2\": -6.20325679847918, \"Memory in Mb\": 0.0049734115600585, \"Time in s\": 1.946268 }, { \"step\": 740, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.6980813320245582, \"RMSE\": 4.791872643159282, \"R2\": -6.00468916158508, \"Memory in Mb\": 0.0049734115600585, \"Time in s\": 2.050093 }, { \"step\": 760, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.6641755726043943, \"RMSE\": 4.729168490426344, \"R2\": -5.896482494172518, \"Memory in Mb\": 0.0049734115600585, \"Time in s\": 2.156434 }, { \"step\": 780, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.6304504193200038, \"RMSE\": 4.6685949390255965, \"R2\": -5.751055148180852, \"Memory in Mb\": 0.0049734115600585, \"Time in s\": 2.265329 }, { \"step\": 800, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.6024144632936517, \"RMSE\": 4.610765602340218, \"R2\": -5.644394777378336, \"Memory in Mb\": 0.0049734115600585, \"Time in s\": 2.376883 }, { \"step\": 820, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.5772046524793362, \"RMSE\": 4.555342563217192, \"R2\": -5.556843815680599, \"Memory in Mb\": 0.0049734115600585, \"Time in s\": 2.490967 }, { \"step\": 840, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.5504230286371177, \"RMSE\": 4.501494961913348, \"R2\": -5.462190804899245, \"Memory in Mb\": 0.0049734115600585, \"Time in s\": 2.607626 }, { \"step\": 860, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.5213504760602443, \"RMSE\": 4.449264316210896, \"R2\": -5.302224210455831, \"Memory in Mb\": 0.0049734115600585, \"Time in s\": 2.72689 }, { \"step\": 880, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.4920620594434295, \"RMSE\": 4.398585386750051, \"R2\": -5.128866413959423, \"Memory in Mb\": 0.0049734115600585, \"Time in s\": 2.848697 }, { \"step\": 900, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.4631535468073946, \"RMSE\": 4.3495699730724, \"R2\": -5.018290064232882, \"Memory in Mb\": 0.0049734115600585, \"Time in s\": 2.973079 }, { \"step\": 920, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.4376774845864433, \"RMSE\": 4.302379067062498, \"R2\": -4.985157602999735, \"Memory in Mb\": 0.0049734115600585, \"Time in s\": 3.100144 }, { \"step\": 940, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.415413283450155, \"RMSE\": 4.2569033587476754, \"R2\": -4.909007968017405, \"Memory in Mb\": 0.0049734115600585, \"Time in s\": 3.229628 }, { \"step\": 960, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.3929189597034646, \"RMSE\": 4.212790914002432, \"R2\": -4.847686152244137, \"Memory in Mb\": 0.0049734115600585, \"Time in s\": 3.361574 }, { \"step\": 980, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.3689716642677323, \"RMSE\": 4.1697584324400925, \"R2\": -4.840094251784054, \"Memory in Mb\": 0.0049734115600585, \"Time in s\": 3.49595 }, { \"step\": 1000, \"track\": \"Regression\", \"model\": \"Linear Regression\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.348598310616665, \"RMSE\": 4.128277744647548, \"R2\": -4.8208398605179, \"Memory in Mb\": 0.0049734115600585, \"Time in s\": 3.6327 }, { \"step\": 11, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 32.42675747760146, \"RMSE\": 33.032143455333795, \"R2\": -1403.530028209614, \"Memory in Mb\": 0.0043611526489257, \"Time in s\": 0.002172 }, { \"step\": 22, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 23.11671120534681, \"RMSE\": 25.467535638550565, \"R2\": -685.9150105173057, \"Memory in Mb\": 0.0043611526489257, \"Time in s\": 0.005938 }, { \"step\": 33, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 16.40645850052153, \"RMSE\": 20.90890407573329, \"R2\": -302.6297778360383, \"Memory in Mb\": 0.0043611526489257, \"Time in s\": 0.010657 }, { \"step\": 44, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 12.633013937743208, \"RMSE\": 18.123648450153382, \"R2\": -257.60569409037487, \"Memory in Mb\": 0.0043611526489257, \"Time in s\": 0.016312 }, { \"step\": 55, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 12.09340740686418, \"RMSE\": 18.75532087846616, \"R2\": -67.66805855020911, \"Memory in Mb\": 0.0043611526489257, \"Time in s\": 0.022822 }, { \"step\": 66, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 11.723217014070244, \"RMSE\": 17.64468538999345, \"R2\": -25.19868487320792, \"Memory in Mb\": 0.0043611526489257, \"Time in s\": 0.030046 }, { \"step\": 77, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 10.995180265837302, \"RMSE\": 16.56912334002292, \"R2\": -14.483838555564008, \"Memory in Mb\": 0.0043611526489257, \"Time in s\": 0.037972 }, { \"step\": 88, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 9.883907021821408, \"RMSE\": 15.530287677810511, \"R2\": -11.333507781967656, \"Memory in Mb\": 0.0043611526489257, \"Time in s\": 0.046598 }, { \"step\": 99, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 8.972176235897166, \"RMSE\": 14.66146594146288, \"R2\": -8.193616152032533, \"Memory in Mb\": 0.0043611526489257, \"Time in s\": 0.055927 }, { \"step\": 110, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 8.50780758363674, \"RMSE\": 13.99831063395296, \"R2\": -4.852424068253989, \"Memory in Mb\": 0.0043611526489257, \"Time in s\": 0.066187 }, { \"step\": 121, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 8.189643323772653, \"RMSE\": 13.479618530659062, \"R2\": -3.1794651999709123, \"Memory in Mb\": 0.0043611526489257, \"Time in s\": 0.077165 }, { \"step\": 132, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 7.801679323915957, \"RMSE\": 12.961634417305982, \"R2\": -2.158384304610562, \"Memory in Mb\": 0.0043611526489257, \"Time in s\": 0.088845 }, { \"step\": 143, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 7.451861020480359, \"RMSE\": 12.498785048420814, \"R2\": -1.3787482214976663, \"Memory in Mb\": 0.0043611526489257, \"Time in s\": 0.101232 }, { \"step\": 154, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 7.149646459280303, \"RMSE\": 12.093459492377487, \"R2\": -0.8926161646086501, \"Memory in Mb\": 0.0043611526489257, \"Time in s\": 0.114319 }, { \"step\": 165, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 7.229782522017506, \"RMSE\": 11.96532542528415, \"R2\": -0.4893471433346175, \"Memory in Mb\": 0.0043611526489257, \"Time in s\": 0.128105 }, { \"step\": 176, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 7.272512426798904, \"RMSE\": 11.818048782353436, \"R2\": -0.1714850789711801, \"Memory in Mb\": 0.0043611526489257, \"Time in s\": 0.1426069999999999 }, { \"step\": 187, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 7.196746625780547, \"RMSE\": 11.610365671998538, \"R2\": 0.0742429673312855, \"Memory in Mb\": 0.0043611526489257, \"Time in s\": 0.1578139999999999 }, { \"step\": 198, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 7.1064479168500405, \"RMSE\": 11.42347112116664, \"R2\": 0.2871227171728154, \"Memory in Mb\": 0.0043611526489257, \"Time in s\": 0.173728 }, { \"step\": 209, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 7.187961051781539, \"RMSE\": 11.470757896418933, \"R2\": 0.4074503232991815, \"Memory in Mb\": 0.0043611526489257, \"Time in s\": 0.1903399999999999 }, { \"step\": 220, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 7.669011107337858, \"RMSE\": 12.056664202246258, \"R2\": 0.426873173509426, \"Memory in Mb\": 0.0043611526489257, \"Time in s\": 0.2076519999999999 }, { \"step\": 231, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 7.786810512364482, \"RMSE\": 12.097059810994589, \"R2\": 0.5064776054701067, \"Memory in Mb\": 0.0043611526489257, \"Time in s\": 0.2256619999999999 }, { \"step\": 242, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 7.967587416991401, \"RMSE\": 12.312376354870244, \"R2\": 0.5886249568088757, \"Memory in Mb\": 0.0043611526489257, \"Time in s\": 0.2443719999999999 }, { \"step\": 253, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 7.942191618805254, \"RMSE\": 12.251768500136135, \"R2\": 0.6462241186586895, \"Memory in Mb\": 0.0043611526489257, \"Time in s\": 0.2637739999999999 }, { \"step\": 264, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 8.532657015260138, \"RMSE\": 13.159069559279288, \"R2\": 0.6271215737447722, \"Memory in Mb\": 0.0044412612915039, \"Time in s\": 0.2838889999999999 }, { \"step\": 275, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 8.974527826218258, \"RMSE\": 14.016709692996267, \"R2\": 0.6277246858047626, \"Memory in Mb\": 0.0044412612915039, \"Time in s\": 0.3047049999999999 }, { \"step\": 286, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 9.187875132430849, \"RMSE\": 14.367497338174372, \"R2\": 0.6612245262436964, \"Memory in Mb\": 0.0044412612915039, \"Time in s\": 0.3262349999999999 }, { \"step\": 297, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 9.146460078204475, \"RMSE\": 14.316362398212211, \"R2\": 0.7082709930315267, \"Memory in Mb\": 0.0044412612915039, \"Time in s\": 0.3484709999999999 }, { \"step\": 308, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 9.641370323857412, \"RMSE\": 15.001693346690402, \"R2\": 0.7147098761119641, \"Memory in Mb\": 0.0044412612915039, \"Time in s\": 0.3714179999999999 }, { \"step\": 319, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 10.700825117113602, \"RMSE\": 17.38383679543193, \"R2\": 0.6523619983610591, \"Memory in Mb\": 0.0044412612915039, \"Time in s\": 0.3950729999999999 }, { \"step\": 330, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 11.121905143066762, \"RMSE\": 17.96551370253039, \"R2\": 0.6822557197544588, \"Memory in Mb\": 0.0044412612915039, \"Time in s\": 0.4194319999999999 }, { \"step\": 341, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 11.300130820443067, \"RMSE\": 18.038310133249198, \"R2\": 0.7131646675929553, \"Memory in Mb\": 0.0044412612915039, \"Time in s\": 0.4444929999999999 }, { \"step\": 352, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 11.446939695127732, \"RMSE\": 18.13441953669637, \"R2\": 0.7364682185789984, \"Memory in Mb\": 0.0044412612915039, \"Time in s\": 0.4702589999999999 }, { \"step\": 363, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 12.306713664516929, \"RMSE\": 19.446501901626007, \"R2\": 0.7057272396553849, \"Memory in Mb\": 0.0044412612915039, \"Time in s\": 0.4967349999999999 }, { \"step\": 374, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 12.8047145381729, \"RMSE\": 20.530886427306594, \"R2\": 0.6984708214392588, \"Memory in Mb\": 0.0044412612915039, \"Time in s\": 0.523921 }, { \"step\": 385, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 13.174415073298738, \"RMSE\": 21.133761140382827, \"R2\": 0.7173255769005842, \"Memory in Mb\": 0.0044412612915039, \"Time in s\": 0.551805 }, { \"step\": 396, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 13.2883642577537, \"RMSE\": 21.10340115690396, \"R2\": 0.7371933225224319, \"Memory in Mb\": 0.0044412612915039, \"Time in s\": 0.580392 }, { \"step\": 407, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 14.156717848187574, \"RMSE\": 22.549679209142333, \"R2\": 0.7210842693854467, \"Memory in Mb\": 0.0044412612915039, \"Time in s\": 0.6096860000000001 }, { \"step\": 418, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 14.91944953335544, \"RMSE\": 23.967687063528587, \"R2\": 0.7042149845564116, \"Memory in Mb\": 0.0044412612915039, \"Time in s\": 0.6396860000000001 }, { \"step\": 429, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 15.467166242517186, \"RMSE\": 24.886955839016704, \"R2\": 0.7166083213097005, \"Memory in Mb\": 0.0044412612915039, \"Time in s\": 0.6704000000000001 }, { \"step\": 440, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 15.631989433801651, \"RMSE\": 24.954278611820005, \"R2\": 0.735240056765428, \"Memory in Mb\": 0.0044412612915039, \"Time in s\": 0.7018220000000001 }, { \"step\": 451, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 16.140858755557055, \"RMSE\": 25.549476814516595, \"R2\": 0.7342580119275103, \"Memory in Mb\": 0.0044412612915039, \"Time in s\": 0.7339490000000001 }, { \"step\": 462, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 17.234115417053438, \"RMSE\": 27.64913352119068, \"R2\": 0.7076854506057617, \"Memory in Mb\": 0.0044412612915039, \"Time in s\": 0.766781 }, { \"step\": 473, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 18.0152369206823, \"RMSE\": 28.967470484053976, \"R2\": 0.7166844816496443, \"Memory in Mb\": 0.0044412612915039, \"Time in s\": 0.800319 }, { \"step\": 484, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 18.33784663215877, \"RMSE\": 29.23326362096376, \"R2\": 0.727273146935064, \"Memory in Mb\": 0.0044412612915039, \"Time in s\": 0.834558 }, { \"step\": 495, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 18.785497932712666, \"RMSE\": 29.755293652524703, \"R2\": 0.7342033839127224, \"Memory in Mb\": 0.0044412612915039, \"Time in s\": 0.86951 }, { \"step\": 506, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 19.668627686193567, \"RMSE\": 31.19471320280161, \"R2\": 0.7163564282233008, \"Memory in Mb\": 0.0044412612915039, \"Time in s\": 0.90517 }, { \"step\": 517, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 20.38947136061499, \"RMSE\": 32.18441644636668, \"R2\": 0.7150766748346216, \"Memory in Mb\": 0.0044412612915039, \"Time in s\": 0.941543 }, { \"step\": 528, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 21.174247281712567, \"RMSE\": 33.375214332088184, \"R2\": 0.7245232875146275, \"Memory in Mb\": 0.0044412612915039, \"Time in s\": 0.978621 }, { \"step\": 539, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 21.335113723003413, \"RMSE\": 33.369412847265615, \"R2\": 0.7359905254430201, \"Memory in Mb\": 0.0044412612915039, \"Time in s\": 1.016405 }, { \"step\": 550, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 22.319908918673335, \"RMSE\": 34.98038586656285, \"R2\": 0.7188252208291055, \"Memory in Mb\": 0.0044412612915039, \"Time in s\": 1.054903 }, { \"step\": 561, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 23.02102944586984, \"RMSE\": 35.899425045778656, \"R2\": 0.7168073485461736, \"Memory in Mb\": 0.0044412612915039, \"Time in s\": 1.094104 }, { \"step\": 572, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 23.86795045847124, \"RMSE\": 37.077313876148, \"R2\": 0.7205745757906983, \"Memory in Mb\": 0.0044412612915039, \"Time in s\": 1.134008 }, { \"step\": 20, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 23.431235633428948, \"RMSE\": 26.218144216470428, \"R2\": -1611.3462157506035, \"Memory in Mb\": 0.0050439834594726, \"Time in s\": 0.004147 }, { \"step\": 40, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 14.103513545807008, \"RMSE\": 19.087149489688155, \"R2\": -145.46960290724672, \"Memory in Mb\": 0.0050439834594726, \"Time in s\": 0.010783 }, { \"step\": 60, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 11.461367198760032, \"RMSE\": 16.23222714650599, \"R2\": -141.81258639462544, \"Memory in Mb\": 0.0050439834594726, \"Time in s\": 0.018889 }, { \"step\": 80, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 9.44127715126052, \"RMSE\": 14.201674320759096, \"R2\": -108.65615123709478, \"Memory in Mb\": 0.0050439834594726, \"Time in s\": 0.028452 }, { \"step\": 100, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 7.797022024035496, \"RMSE\": 12.721240349043525, \"R2\": -54.205539694816935, \"Memory in Mb\": 0.0050439834594726, \"Time in s\": 0.039551 }, { \"step\": 120, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 6.691147023027369, \"RMSE\": 11.629122321270428, \"R2\": -38.13701304656029, \"Memory in Mb\": 0.0050439834594726, \"Time in s\": 0.052122 }, { \"step\": 140, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 5.831783229321132, \"RMSE\": 10.770680162674168, \"R2\": -33.69279126993988, \"Memory in Mb\": 0.0050439834594726, \"Time in s\": 0.066155 }, { \"step\": 160, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 5.153144449790517, \"RMSE\": 10.076741978726949, \"R2\": -25.77937886024605, \"Memory in Mb\": 0.0050439834594726, \"Time in s\": 0.08166 }, { \"step\": 180, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 4.653806702754108, \"RMSE\": 9.504457154537077, \"R2\": -19.92363756907144, \"Memory in Mb\": 0.0050439834594726, \"Time in s\": 0.098629 }, { \"step\": 200, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 4.222103651464603, \"RMSE\": 9.017588926226493, \"R2\": -17.890910704295415, \"Memory in Mb\": 0.0050439834594726, \"Time in s\": 0.117152 }, { \"step\": 220, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 3.8867321432249606, \"RMSE\": 8.600346446062781, \"R2\": -17.535660715889627, \"Memory in Mb\": 0.0050439834594726, \"Time in s\": 0.137154 }, { \"step\": 240, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 3.599340946609919, \"RMSE\": 8.235553698215059, \"R2\": -16.124474758948196, \"Memory in Mb\": 0.0050439834594726, \"Time in s\": 0.15861 }, { \"step\": 260, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 3.3420538385748757, \"RMSE\": 7.912854669167724, \"R2\": -15.021792727856036, \"Memory in Mb\": 0.0052042007446289, \"Time in s\": 0.181525 }, { \"step\": 280, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 3.121876705697044, \"RMSE\": 7.625390851691296, \"R2\": -14.577959242562436, \"Memory in Mb\": 0.0052042007446289, \"Time in s\": 0.205897 }, { \"step\": 300, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.9443386382725367, \"RMSE\": 7.3683114276631025, \"R2\": -13.30448388815742, \"Memory in Mb\": 0.0052042007446289, \"Time in s\": 0.231796 }, { \"step\": 320, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.815755936756583, \"RMSE\": 7.138230019587049, \"R2\": -13.089830522149688, \"Memory in Mb\": 0.0052042007446289, \"Time in s\": 0.259173 }, { \"step\": 340, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.6933833627330244, \"RMSE\": 6.928597052658441, \"R2\": -13.027805837336182, \"Memory in Mb\": 0.0052042007446289, \"Time in s\": 0.288012 }, { \"step\": 360, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.6051143953909115, \"RMSE\": 6.739167122054826, \"R2\": -12.380223856378365, \"Memory in Mb\": 0.0052042007446289, \"Time in s\": 0.318313 }, { \"step\": 380, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.490520088621087, \"RMSE\": 6.560408055638074, \"R2\": -12.118440379947032, \"Memory in Mb\": 0.0052042007446289, \"Time in s\": 0.350072 }, { \"step\": 400, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.379600080775968, \"RMSE\": 6.3947378153227445, \"R2\": -11.816514349639151, \"Memory in Mb\": 0.0052042007446289, \"Time in s\": 0.383345 }, { \"step\": 420, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.2783211278818065, \"RMSE\": 6.240996339547436, \"R2\": -11.61166202267384, \"Memory in Mb\": 0.0052042007446289, \"Time in s\": 0.418092 }, { \"step\": 440, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.2145782025697205, \"RMSE\": 6.101000859547544, \"R2\": -10.83412932108482, \"Memory in Mb\": 0.0052042007446289, \"Time in s\": 0.454298 }, { \"step\": 460, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.1464112051078263, \"RMSE\": 5.968705291729145, \"R2\": -9.740869666058233, \"Memory in Mb\": 0.0052042007446289, \"Time in s\": 0.491959 }, { \"step\": 480, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.072454283271577, \"RMSE\": 5.843773480387036, \"R2\": -9.059069500744918, \"Memory in Mb\": 0.0052042007446289, \"Time in s\": 0.5310900000000001 }, { \"step\": 500, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.9987939253418097, \"RMSE\": 5.726023081885353, \"R2\": -8.45516263823347, \"Memory in Mb\": 0.0052042007446289, \"Time in s\": 0.5716870000000001 }, { \"step\": 520, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.9284894530733132, \"RMSE\": 5.614979238753044, \"R2\": -8.107866604370331, \"Memory in Mb\": 0.0052042007446289, \"Time in s\": 0.613828 }, { \"step\": 540, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.8708966196370944, \"RMSE\": 5.510640598064868, \"R2\": -7.740375491210358, \"Memory in Mb\": 0.0052042007446289, \"Time in s\": 0.657455 }, { \"step\": 560, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.824494929332068, \"RMSE\": 5.412971505312132, \"R2\": -7.634241934578528, \"Memory in Mb\": 0.0052042007446289, \"Time in s\": 0.702504 }, { \"step\": 580, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.781951127238344, \"RMSE\": 5.3203622791847724, \"R2\": -7.547628985954546, \"Memory in Mb\": 0.0052042007446289, \"Time in s\": 0.748994 }, { \"step\": 600, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.749732390051898, \"RMSE\": 5.233421242207161, \"R2\": -7.179064952823026, \"Memory in Mb\": 0.0052042007446289, \"Time in s\": 0.796981 }, { \"step\": 620, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.7052484400139905, \"RMSE\": 5.14921027958835, \"R2\": -6.797272491389374, \"Memory in Mb\": 0.0052042007446289, \"Time in s\": 0.846385 }, { \"step\": 640, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.6664817162996513, \"RMSE\": 5.068963198399273, \"R2\": -6.414896594696412, \"Memory in Mb\": 0.0052042007446289, \"Time in s\": 0.897251 }, { \"step\": 660, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.6214900718153242, \"RMSE\": 4.99171743637858, \"R2\": -6.138924064419633, \"Memory in Mb\": 0.0052042007446289, \"Time in s\": 0.949568 }, { \"step\": 680, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.5818233283584242, \"RMSE\": 4.918048415051679, \"R2\": -6.03927170869104, \"Memory in Mb\": 0.0052042007446289, \"Time in s\": 1.003319 }, { \"step\": 700, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.555259840641907, \"RMSE\": 4.848763195223404, \"R2\": -6.02204294951599, \"Memory in Mb\": 0.0052042007446289, \"Time in s\": 1.058593 }, { \"step\": 720, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.532054525069809, \"RMSE\": 4.783346185353484, \"R2\": -5.983984772166844, \"Memory in Mb\": 0.0052042007446289, \"Time in s\": 1.1153050000000002 }, { \"step\": 740, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.5146024654094865, \"RMSE\": 4.7215191517857305, \"R2\": -5.800515662606876, \"Memory in Mb\": 0.0052042007446289, \"Time in s\": 1.1734340000000003 }, { \"step\": 760, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.4818289913711118, \"RMSE\": 4.659306491272199, \"R2\": -5.694229888242945, \"Memory in Mb\": 0.0052042007446289, \"Time in s\": 1.2330550000000002 }, { \"step\": 780, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.452402157072035, \"RMSE\": 4.59956290700914, \"R2\": -5.552882667907455, \"Memory in Mb\": 0.0052042007446289, \"Time in s\": 1.294088 }, { \"step\": 800, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.4293016341621465, \"RMSE\": 4.542852809132106, \"R2\": -5.450103306475059, \"Memory in Mb\": 0.0052042007446289, \"Time in s\": 1.356613 }, { \"step\": 820, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.4031025761620075, \"RMSE\": 4.487566758646917, \"R2\": -5.363185739412862, \"Memory in Mb\": 0.0052042007446289, \"Time in s\": 1.420603 }, { \"step\": 840, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.381369306663701, \"RMSE\": 4.43470615373713, \"R2\": -5.271853959037296, \"Memory in Mb\": 0.0052042007446289, \"Time in s\": 1.48601 }, { \"step\": 860, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.3592027324075535, \"RMSE\": 4.3834303912536, \"R2\": -5.11710119981194, \"Memory in Mb\": 0.0052042007446289, \"Time in s\": 1.552867 }, { \"step\": 880, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.3365452172324814, \"RMSE\": 4.3337346544721145, \"R2\": -4.949476248649982, \"Memory in Mb\": 0.0052042007446289, \"Time in s\": 1.621167 }, { \"step\": 900, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.309932841675593, \"RMSE\": 4.285399956971287, \"R2\": -4.842022075269781, \"Memory in Mb\": 0.0052042007446289, \"Time in s\": 1.690888 }, { \"step\": 920, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.286195364366444, \"RMSE\": 4.238743982476022, \"R2\": -4.809417914789597, \"Memory in Mb\": 0.0052042007446289, \"Time in s\": 1.762146 }, { \"step\": 940, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.270754149650798, \"RMSE\": 4.194574036632911, \"R2\": -4.737236102715975, \"Memory in Mb\": 0.0052042007446289, \"Time in s\": 1.834847 }, { \"step\": 960, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.252007480522268, \"RMSE\": 4.151199175197265, \"R2\": -4.677947706919012, \"Memory in Mb\": 0.0052042007446289, \"Time in s\": 1.908959 }, { \"step\": 980, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.2327553756112288, \"RMSE\": 4.10900051997898, \"R2\": -4.671141158917876, \"Memory in Mb\": 0.0052042007446289, \"Time in s\": 1.984552 }, { \"step\": 1000, \"track\": \"Regression\", \"model\": \"Linear Regression with l1 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.2158544056174447, \"RMSE\": 4.0682125513101886, \"R2\": -4.652689174637866, \"Memory in Mb\": 0.0052042007446289, \"Time in s\": 2.061558 }, { \"step\": 11, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 32.49979765090093, \"RMSE\": 33.085767570527814, \"R2\": -1408.093935143081, \"Memory in Mb\": 0.0041532516479492, \"Time in s\": 0.00221 }, { \"step\": 22, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 23.548763427243948, \"RMSE\": 25.711783397814365, \"R2\": -699.1539821884553, \"Memory in Mb\": 0.0041532516479492, \"Time in s\": 0.0060149999999999 }, { \"step\": 33, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 16.994606748791693, \"RMSE\": 21.16216382986949, \"R2\": -310.0297747454571, \"Memory in Mb\": 0.0041532516479492, \"Time in s\": 0.010773 }, { \"step\": 44, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 13.434663043069094, \"RMSE\": 18.386175360023177, \"R2\": -265.1519301746234, \"Memory in Mb\": 0.0041532516479492, \"Time in s\": 0.016452 }, { \"step\": 55, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 12.685726760044922, \"RMSE\": 18.64479618798502, \"R2\": -66.86112450289035, \"Memory in Mb\": 0.0041532516479492, \"Time in s\": 0.023006 }, { \"step\": 66, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 12.3059577367172, \"RMSE\": 17.58876192176611, \"R2\": -25.03287862681572, \"Memory in Mb\": 0.0041532516479492, \"Time in s\": 0.030252 }, { \"step\": 77, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 11.844694328458631, \"RMSE\": 16.6807536659431, \"R2\": -14.693178357367543, \"Memory in Mb\": 0.0041532516479492, \"Time in s\": 0.03818 }, { \"step\": 88, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 11.154725738202892, \"RMSE\": 15.783555067193374, \"R2\": -11.739056703820452, \"Memory in Mb\": 0.0041532516479492, \"Time in s\": 0.046793 }, { \"step\": 99, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 10.383929359740238, \"RMSE\": 14.970988963724652, \"R2\": -8.585892537614809, \"Memory in Mb\": 0.0041532516479492, \"Time in s\": 0.056087 }, { \"step\": 110, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 9.96579366260941, \"RMSE\": 14.36848414897767, \"R2\": -5.166041463970149, \"Memory in Mb\": 0.0041532516479492, \"Time in s\": 0.066275 }, { \"step\": 121, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 9.69456937849415, \"RMSE\": 13.920059192886765, \"R2\": -3.4570517192093604, \"Memory in Mb\": 0.0041532516479492, \"Time in s\": 0.077161 }, { \"step\": 132, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 9.550940690791585, \"RMSE\": 13.540299798742517, \"R2\": -2.4466881997122822, \"Memory in Mb\": 0.0041532516479492, \"Time in s\": 0.088733 }, { \"step\": 143, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 9.359343163302276, \"RMSE\": 13.17888693683795, \"R2\": -1.6446630191344274, \"Memory in Mb\": 0.0041532516479492, \"Time in s\": 0.100994 }, { \"step\": 154, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 9.050096583806178, \"RMSE\": 12.809003240652473, \"R2\": -1.1232058766616235, \"Memory in Mb\": 0.0041532516479492, \"Time in s\": 0.113939 }, { \"step\": 165, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 9.0176805612097, \"RMSE\": 12.690905771048246, \"R2\": -0.6754526017728211, \"Memory in Mb\": 0.0041532516479492, \"Time in s\": 0.127575 }, { \"step\": 176, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 9.128420828629457, \"RMSE\": 12.67565049026222, \"R2\": -0.3476766887041909, \"Memory in Mb\": 0.0041532516479492, \"Time in s\": 0.141896 }, { \"step\": 187, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 9.200055067293626, \"RMSE\": 12.61943948921252, \"R2\": -0.0936676208508318, \"Memory in Mb\": 0.0041532516479492, \"Time in s\": 0.156913 }, { \"step\": 198, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 9.131920516304556, \"RMSE\": 12.48608852319409, \"R2\": 0.14832986501041, \"Memory in Mb\": 0.0041532516479492, \"Time in s\": 0.172621 }, { \"step\": 209, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 9.262178084838377, \"RMSE\": 12.632807163510387, \"R2\": 0.2813122010719644, \"Memory in Mb\": 0.0041532516479492, \"Time in s\": 0.18901 }, { \"step\": 220, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 9.624585089266471, \"RMSE\": 13.14522964439942, \"R2\": 0.3187088278286061, \"Memory in Mb\": 0.0041532516479492, \"Time in s\": 0.206079 }, { \"step\": 231, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 9.871229105762628, \"RMSE\": 13.33182219595452, \"R2\": 0.4005868940419749, \"Memory in Mb\": 0.0041532516479492, \"Time in s\": 0.223824 }, { \"step\": 242, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 10.053594156641497, \"RMSE\": 13.615837484576032, \"R2\": 0.496913232046656, \"Memory in Mb\": 0.0041532516479492, \"Time in s\": 0.242255 }, { \"step\": 253, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 10.103586276318884, \"RMSE\": 13.654347763291469, \"R2\": 0.5605873283913356, \"Memory in Mb\": 0.0041532516479492, \"Time in s\": 0.261365 }, { \"step\": 264, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 10.539757430756955, \"RMSE\": 14.361033144769577, \"R2\": 0.5558923432468688, \"Memory in Mb\": 0.0042333602905273, \"Time in s\": 0.281177 }, { \"step\": 275, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 11.000663746720075, \"RMSE\": 15.253690514733572, \"R2\": 0.5591184315358017, \"Memory in Mb\": 0.0042333602905273, \"Time in s\": 0.301675 }, { \"step\": 286, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 11.301045102393736, \"RMSE\": 15.716738058687294, \"R2\": 0.5946085911267809, \"Memory in Mb\": 0.0042333602905273, \"Time in s\": 0.32286 }, { \"step\": 297, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 11.275670942063872, \"RMSE\": 15.722526759958118, \"R2\": 0.648148884657232, \"Memory in Mb\": 0.0042333602905273, \"Time in s\": 0.344736 }, { \"step\": 308, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 11.737413962135747, \"RMSE\": 16.425717512690383, \"R2\": 0.6579773485389515, \"Memory in Mb\": 0.0042333602905273, \"Time in s\": 0.367308 }, { \"step\": 319, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 12.62956768598283, \"RMSE\": 18.5239816734196, \"R2\": 0.6052658886851463, \"Memory in Mb\": 0.0042333602905273, \"Time in s\": 0.390571 }, { \"step\": 330, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 13.20835169207171, \"RMSE\": 19.400144953397177, \"R2\": 0.6294827674482892, \"Memory in Mb\": 0.0042333602905273, \"Time in s\": 0.41452 }, { \"step\": 341, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 13.34580485099439, \"RMSE\": 19.47322215763284, \"R2\": 0.6657152345574865, \"Memory in Mb\": 0.0042333602905273, \"Time in s\": 0.439156 }, { \"step\": 352, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 13.572829398695534, \"RMSE\": 19.644456145190084, \"R2\": 0.6907528542453616, \"Memory in Mb\": 0.0042333602905273, \"Time in s\": 0.464482 }, { \"step\": 363, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 14.286348966120116, \"RMSE\": 20.694687599962585, \"R2\": 0.666738740088638, \"Memory in Mb\": 0.0042333602905273, \"Time in s\": 0.490494 }, { \"step\": 374, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 14.777113556436731, \"RMSE\": 21.8206517710938, \"R2\": 0.6593962849465681, \"Memory in Mb\": 0.0042333602905273, \"Time in s\": 0.517211 }, { \"step\": 385, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 15.215863000720542, \"RMSE\": 22.583610768099227, \"R2\": 0.6772102871974224, \"Memory in Mb\": 0.0042333602905273, \"Time in s\": 0.544615 }, { \"step\": 396, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 15.33144965807796, \"RMSE\": 22.564695888148663, \"R2\": 0.6995373757169706, \"Memory in Mb\": 0.0042333602905273, \"Time in s\": 0.5727059999999999 }, { \"step\": 407, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 16.156138146741625, \"RMSE\": 23.924755047114477, \"R2\": 0.6860306363812331, \"Memory in Mb\": 0.0042333602905273, \"Time in s\": 0.601482 }, { \"step\": 418, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 16.826141311926307, \"RMSE\": 25.281544830782227, \"R2\": 0.6708975363085852, \"Memory in Mb\": 0.0042333602905273, \"Time in s\": 0.6309469999999999 }, { \"step\": 429, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 17.40263926327312, \"RMSE\": 26.38004441662919, \"R2\": 0.6815842184892376, \"Memory in Mb\": 0.0042333602905273, \"Time in s\": 0.6611069999999999 }, { \"step\": 440, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 17.533400796677356, \"RMSE\": 26.42712307382207, \"R2\": 0.7030645738539452, \"Memory in Mb\": 0.0042333602905273, \"Time in s\": 0.6919729999999998 }, { \"step\": 451, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 18.01931805998843, \"RMSE\": 26.98764790902567, \"R2\": 0.7034989551644695, \"Memory in Mb\": 0.0042333602905273, \"Time in s\": 0.7235269999999998 }, { \"step\": 462, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 19.02005973582853, \"RMSE\": 28.983219342716676, \"R2\": 0.6787962347144068, \"Memory in Mb\": 0.0042333602905273, \"Time in s\": 0.7557739999999998 }, { \"step\": 473, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 19.888963224686226, \"RMSE\": 30.578078926209333, \"R2\": 0.6843036130043219, \"Memory in Mb\": 0.0042333602905273, \"Time in s\": 0.7887069999999998 }, { \"step\": 484, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 20.14556805064173, \"RMSE\": 30.710181129007665, \"R2\": 0.6990197135707891, \"Memory in Mb\": 0.0042333602905273, \"Time in s\": 0.8223269999999998 }, { \"step\": 495, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 20.606054171923702, \"RMSE\": 31.270986299633183, \"R2\": 0.7064351021760091, \"Memory in Mb\": 0.0042333602905273, \"Time in s\": 0.8566339999999998 }, { \"step\": 506, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 21.410220326067677, \"RMSE\": 32.615082621422005, \"R2\": 0.6899384474766328, \"Memory in Mb\": 0.0042333602905273, \"Time in s\": 0.8916409999999998 }, { \"step\": 517, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 22.149063292155795, \"RMSE\": 33.66176418126127, \"R2\": 0.6883188968774838, \"Memory in Mb\": 0.0042333602905273, \"Time in s\": 0.9273449999999998 }, { \"step\": 528, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 22.923596011881333, \"RMSE\": 34.92960124509041, \"R2\": 0.6982661596564212, \"Memory in Mb\": 0.0042333602905273, \"Time in s\": 0.9637369999999996 }, { \"step\": 539, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 23.042465823580866, \"RMSE\": 34.93124976178739, \"R2\": 0.7106985365247873, \"Memory in Mb\": 0.0042333602905273, \"Time in s\": 1.0008159999999997 }, { \"step\": 550, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 23.974366627279803, \"RMSE\": 36.47485289150521, \"R2\": 0.6942867450600009, \"Memory in Mb\": 0.0042333602905273, \"Time in s\": 1.0385829999999998 }, { \"step\": 561, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 24.688352372874245, \"RMSE\": 37.45551228620605, \"R2\": 0.6917248794696187, \"Memory in Mb\": 0.0042333602905273, \"Time in s\": 1.077041 }, { \"step\": 572, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"ChickWeights\", \"MAE\": 25.52040552278537, \"RMSE\": 38.65530944983144, \"R2\": 0.6962839796503111, \"Memory in Mb\": 0.0042333602905273, \"Time in s\": 1.116183 }, { \"step\": 20, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 23.51586240468561, \"RMSE\": 26.237375459551668, \"R2\": -1613.712423965852, \"Memory in Mb\": 0.004836082458496, \"Time in s\": 0.004083 }, { \"step\": 40, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 14.404588626352648, \"RMSE\": 19.15618772462805, \"R2\": -146.53108046561562, \"Memory in Mb\": 0.004836082458496, \"Time in s\": 0.01063 }, { \"step\": 60, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 12.231791081689297, \"RMSE\": 16.474815193156783, \"R2\": -146.113106004694, \"Memory in Mb\": 0.004836082458496, \"Time in s\": 0.018597 }, { \"step\": 80, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 9.897294138330498, \"RMSE\": 14.380930849374858, \"R2\": -111.44182781593445, \"Memory in Mb\": 0.004836082458496, \"Time in s\": 0.027966 }, { \"step\": 100, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 8.086918618304638, \"RMSE\": 12.871841233853624, \"R2\": -55.52038254270653, \"Memory in Mb\": 0.004836082458496, \"Time in s\": 0.038819 }, { \"step\": 120, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 7.090525087262731, \"RMSE\": 11.800543733353924, \"R2\": -39.29933105483862, \"Memory in Mb\": 0.004836082458496, \"Time in s\": 0.051073 }, { \"step\": 140, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 6.252895179240796, \"RMSE\": 10.939748534807466, \"R2\": -34.79049149741283, \"Memory in Mb\": 0.004836082458496, \"Time in s\": 0.064731 }, { \"step\": 160, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 5.613172563674658, \"RMSE\": 10.244365728872303, \"R2\": -26.67772385659461, \"Memory in Mb\": 0.004836082458496, \"Time in s\": 0.079792 }, { \"step\": 180, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 5.139122523864994, \"RMSE\": 9.674573881172426, \"R2\": -20.679349421494624, \"Memory in Mb\": 0.004836082458496, \"Time in s\": 0.096247 }, { \"step\": 200, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 4.970287512907828, \"RMSE\": 9.286148634805688, \"R2\": -19.03287536608377, \"Memory in Mb\": 0.004836082458496, \"Time in s\": 0.114161 }, { \"step\": 220, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 4.714509288646119, \"RMSE\": 8.88589717245386, \"R2\": -18.78694495096694, \"Memory in Mb\": 0.004836082458496, \"Time in s\": 0.133481 }, { \"step\": 240, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 4.3785831664508095, \"RMSE\": 8.51169894931726, \"R2\": -17.292125083299812, \"Memory in Mb\": 0.004836082458496, \"Time in s\": 0.154212 }, { \"step\": 260, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 4.192432902948977, \"RMSE\": 8.203158141559566, \"R2\": -16.21895925323482, \"Memory in Mb\": 0.0049962997436523, \"Time in s\": 0.176332 }, { \"step\": 280, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 4.032216676138626, \"RMSE\": 7.92689648751503, \"R2\": -15.834209174335763, \"Memory in Mb\": 0.0049962997436523, \"Time in s\": 0.199865 }, { \"step\": 300, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 3.888249411356283, \"RMSE\": 7.680244711632193, \"R2\": -14.54126486396457, \"Memory in Mb\": 0.0049962997436523, \"Time in s\": 0.224857 }, { \"step\": 320, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 3.851910342336546, \"RMSE\": 7.48583024126048, \"R2\": -14.495465984230798, \"Memory in Mb\": 0.0049962997436523, \"Time in s\": 0.251273 }, { \"step\": 340, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 3.7628843932429423, \"RMSE\": 7.2990742240635536, \"R2\": -14.5680671641431, \"Memory in Mb\": 0.0049962997436523, \"Time in s\": 0.279095 }, { \"step\": 360, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 3.741498329747232, \"RMSE\": 7.147194018854174, \"R2\": -14.04949995366026, \"Memory in Mb\": 0.0049962997436523, \"Time in s\": 0.308317 }, { \"step\": 380, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 3.6331417160345065, \"RMSE\": 6.972318267910069, \"R2\": -13.817498979914417, \"Memory in Mb\": 0.0049962997436523, \"Time in s\": 0.3389480000000001 }, { \"step\": 400, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 3.505723677240456, \"RMSE\": 6.801269751447825, \"R2\": -13.497878046830476, \"Memory in Mb\": 0.0049962997436523, \"Time in s\": 0.371039 }, { \"step\": 420, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 3.377789937544477, \"RMSE\": 6.6406714986639335, \"R2\": -13.278693194916952, \"Memory in Mb\": 0.0049962997436523, \"Time in s\": 0.4045350000000001 }, { \"step\": 440, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 3.285565585155429, \"RMSE\": 6.496417025168413, \"R2\": -12.417819031510929, \"Memory in Mb\": 0.0049962997436523, \"Time in s\": 0.4394350000000001 }, { \"step\": 460, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 3.204438206990859, \"RMSE\": 6.363151879091182, \"R2\": -11.207416254643826, \"Memory in Mb\": 0.0049962997436523, \"Time in s\": 0.4757300000000001 }, { \"step\": 480, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 3.1147239220792944, \"RMSE\": 6.234124280033156, \"R2\": -10.44779843680249, \"Memory in Mb\": 0.0049962997436523, \"Time in s\": 0.5134280000000001 }, { \"step\": 500, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 3.0674610457420317, \"RMSE\": 6.126558214637352, \"R2\": -9.824203383261038, \"Memory in Mb\": 0.0049962997436523, \"Time in s\": 0.5525380000000001 }, { \"step\": 520, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.990407253638538, \"RMSE\": 6.01302433311803, \"R2\": -9.444947809169491, \"Memory in Mb\": 0.0049962997436523, \"Time in s\": 0.593122 }, { \"step\": 540, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.9353658306947947, \"RMSE\": 5.909270916056388, \"R2\": -9.05064000964308, \"Memory in Mb\": 0.0049962997436523, \"Time in s\": 0.635128 }, { \"step\": 560, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.8633890512526734, \"RMSE\": 5.806679023039649, \"R2\": -8.935926541145857, \"Memory in Mb\": 0.0049962997436523, \"Time in s\": 0.6785490000000001 }, { \"step\": 580, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.807495338487497, \"RMSE\": 5.711109374041071, \"R2\": -8.849273711490637, \"Memory in Mb\": 0.0049962997436523, \"Time in s\": 0.723357 }, { \"step\": 600, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.7371272959787114, \"RMSE\": 5.616984296672238, \"R2\": -8.421904346026553, \"Memory in Mb\": 0.0049962997436523, \"Time in s\": 0.769575 }, { \"step\": 620, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.695279458158905, \"RMSE\": 5.533794104458184, \"R2\": -8.005492094038127, \"Memory in Mb\": 0.0049962997436523, \"Time in s\": 0.817237 }, { \"step\": 640, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.663427445960627, \"RMSE\": 5.457208108806078, \"R2\": -7.594247452705627, \"Memory in Mb\": 0.0049962997436523, \"Time in s\": 0.866263 }, { \"step\": 660, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.616152871221604, \"RMSE\": 5.378587544649896, \"R2\": -7.28837246231315, \"Memory in Mb\": 0.0049962997436523, \"Time in s\": 0.916653 }, { \"step\": 680, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.5798661012588893, \"RMSE\": 5.305972052989116, \"R2\": -7.193548818831784, \"Memory in Mb\": 0.0049962997436523, \"Time in s\": 0.968481 }, { \"step\": 700, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.537320459903927, \"RMSE\": 5.236098928386573, \"R2\": -7.188742583674767, \"Memory in Mb\": 0.0049962997436523, \"Time in s\": 1.021719 }, { \"step\": 720, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.487562037475977, \"RMSE\": 5.165214649048708, \"R2\": -7.14359946989749, \"Memory in Mb\": 0.0049962997436523, \"Time in s\": 1.076348 }, { \"step\": 740, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.4359344014471898, \"RMSE\": 5.096521995605296, \"R2\": -6.923665614900413, \"Memory in Mb\": 0.0049962997436523, \"Time in s\": 1.1323770000000002 }, { \"step\": 760, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.407655610747697, \"RMSE\": 5.035258504842907, \"R2\": -6.818106944929671, \"Memory in Mb\": 0.0049962997436523, \"Time in s\": 1.1897670000000002 }, { \"step\": 780, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.359216458068109, \"RMSE\": 4.971257259303496, \"R2\": -6.65476288050581, \"Memory in Mb\": 0.0049962997436523, \"Time in s\": 1.2485520000000003 }, { \"step\": 800, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.316070029510264, \"RMSE\": 4.9101929612142525, \"R2\": -6.535402552442101, \"Memory in Mb\": 0.0049962997436523, \"Time in s\": 1.3087800000000005 }, { \"step\": 820, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.2777366252623445, \"RMSE\": 4.852446886619337, \"R2\": -6.440024000118236, \"Memory in Mb\": 0.0049962997436523, \"Time in s\": 1.3703650000000005 }, { \"step\": 840, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.243368105900121, \"RMSE\": 4.7970088928814505, \"R2\": -6.33849981001193, \"Memory in Mb\": 0.0049962997436523, \"Time in s\": 1.4333480000000005 }, { \"step\": 860, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.208161873346368, \"RMSE\": 4.742699334581194, \"R2\": -6.1609167229990645, \"Memory in Mb\": 0.0049962997436523, \"Time in s\": 1.4977110000000002 }, { \"step\": 880, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.173524312605847, \"RMSE\": 4.690026300839657, \"R2\": -5.967944096931786, \"Memory in Mb\": 0.0049962997436523, \"Time in s\": 1.5634250000000005 }, { \"step\": 900, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.143113317062195, \"RMSE\": 4.639957881226245, \"R2\": -5.848706397355668, \"Memory in Mb\": 0.0049962997436523, \"Time in s\": 1.6305320000000003 }, { \"step\": 920, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.107662636673197, \"RMSE\": 4.590154711256589, \"R2\": -5.812600067991807, \"Memory in Mb\": 0.0049962997436523, \"Time in s\": 1.6990770000000004 }, { \"step\": 940, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.084055644614135, \"RMSE\": 4.5438188766398575, \"R2\": -5.732386133187966, \"Memory in Mb\": 0.0049962997436523, \"Time in s\": 1.7689810000000004 }, { \"step\": 960, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.0573225686306618, \"RMSE\": 4.498049663013517, \"R2\": -5.666421016566231, \"Memory in Mb\": 0.0049962997436523, \"Time in s\": 1.8402960000000004 }, { \"step\": 980, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.029372876222096, \"RMSE\": 4.453478872426294, \"R2\": -5.661880798559547, \"Memory in Mb\": 0.0049962997436523, \"Time in s\": 1.9129870000000004 }, { \"step\": 1000, \"track\": \"Regression\", \"model\": \"Linear Regression with l2 regularization\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.9991816433402003, \"RMSE\": 4.409973662813209, \"R2\": -5.642320489885111, \"Memory in Mb\": 0.0049962997436523, \"Time in s\": 1.9870360000000005 }, { \"step\": 11, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"ChickWeights\", \"MAE\": 29.34636433128918, \"RMSE\": 30.877867366178624, \"R2\": -1226.303892160441, \"Memory in Mb\": 0.0034055709838867, \"Time in s\": 0.002755 }, { \"step\": 22, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"ChickWeights\", \"MAE\": 16.78579478575624, \"RMSE\": 22.219906445728544, \"R2\": -521.8939460594183, \"Memory in Mb\": 0.0034055709838867, \"Time in s\": 0.007689 }, { \"step\": 33, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"ChickWeights\", \"MAE\": 12.764406226748012, \"RMSE\": 18.43476392899385, \"R2\": -235.02444355689545, \"Memory in Mb\": 0.0034055709838867, \"Time in s\": 0.0139269999999999 }, { \"step\": 44, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"ChickWeights\", \"MAE\": 10.51475037374614, \"RMSE\": 16.140786164803156, \"R2\": -204.11441945614396, \"Memory in Mb\": 0.0034055709838867, \"Time in s\": 0.021043 }, { \"step\": 55, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"ChickWeights\", \"MAE\": 11.118868381910096, \"RMSE\": 17.807152193193623, \"R2\": -60.900579144648304, \"Memory in Mb\": 0.0034055709838867, \"Time in s\": 0.0290209999999999 }, { \"step\": 66, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"ChickWeights\", \"MAE\": 10.068652098820438, \"RMSE\": 16.444921319285292, \"R2\": -21.75701273895947, \"Memory in Mb\": 0.0034055709838867, \"Time in s\": 0.037852 }, { \"step\": 77, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"ChickWeights\", \"MAE\": 9.238561614797426, \"RMSE\": 15.414518181428049, \"R2\": -12.40107017683018, \"Memory in Mb\": 0.0034055709838867, \"Time in s\": 0.0475289999999999 }, { \"step\": 88, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"ChickWeights\", \"MAE\": 8.397438800335843, \"RMSE\": 14.475112340817043, \"R2\": -9.714489831884093, \"Memory in Mb\": 0.0034055709838867, \"Time in s\": 0.0580659999999999 }, { \"step\": 99, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"ChickWeights\", \"MAE\": 7.833048644024143, \"RMSE\": 13.715858800506394, \"R2\": -7.045954767890709, \"Memory in Mb\": 0.0034055709838867, \"Time in s\": 0.069447 }, { \"step\": 110, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"ChickWeights\", \"MAE\": 7.655503029296072, \"RMSE\": 13.224205829308971, \"R2\": -4.223044606682061, \"Memory in Mb\": 0.0034055709838867, \"Time in s\": 0.0819039999999999 }, { \"step\": 121, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"ChickWeights\", \"MAE\": 7.616633860178707, \"RMSE\": 12.86945366769748, \"R2\": -2.809655727072758, \"Memory in Mb\": 0.0034055709838867, \"Time in s\": 0.0952199999999999 }, { \"step\": 132, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"ChickWeights\", \"MAE\": 7.226514673025618, \"RMSE\": 12.359535435581538, \"R2\": -1.871770505407636, \"Memory in Mb\": 0.0034055709838867, \"Time in s\": 0.1094009999999999 }, { \"step\": 143, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"ChickWeights\", \"MAE\": 7.028644030968596, \"RMSE\": 11.955222747545204, \"R2\": -1.1763474084194208, \"Memory in Mb\": 0.0034055709838867, \"Time in s\": 0.1244559999999999 }, { \"step\": 154, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"ChickWeights\", \"MAE\": 6.986848640780892, \"RMSE\": 11.69340351574852, \"R2\": -0.7694704308339801, \"Memory in Mb\": 0.0034055709838867, \"Time in s\": 0.1403709999999999 }, { \"step\": 165, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"ChickWeights\", \"MAE\": 7.055944609610511, \"RMSE\": 11.647997480703344, \"R2\": -0.411397823157807, \"Memory in Mb\": 0.0034055709838867, \"Time in s\": 0.1571089999999999 }, { \"step\": 176, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"ChickWeights\", \"MAE\": 7.165300854035116, \"RMSE\": 11.69334605191042, \"R2\": -0.146892755517725, \"Memory in Mb\": 0.0034055709838867, \"Time in s\": 0.1746439999999999 }, { \"step\": 187, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"ChickWeights\", \"MAE\": 7.086580322778478, \"RMSE\": 11.479257625599036, \"R2\": 0.0950328202686906, \"Memory in Mb\": 0.0034055709838867, \"Time in s\": 0.1929879999999999 }, { \"step\": 198, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"ChickWeights\", \"MAE\": 7.0079421524022685, \"RMSE\": 11.279628541389028, \"R2\": 0.3049625679809165, \"Memory in Mb\": 0.0034055709838867, \"Time in s\": 0.2121359999999999 }, { \"step\": 209, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"ChickWeights\", \"MAE\": 7.136281210496879, \"RMSE\": 11.437970564343969, \"R2\": 0.4108328996032877, \"Memory in Mb\": 0.0034055709838867, \"Time in s\": 0.2320939999999999 }, { \"step\": 220, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"ChickWeights\", \"MAE\": 7.591733813835971, \"RMSE\": 12.23821647621677, \"R2\": 0.409482641430799, \"Memory in Mb\": 0.0034055709838867, \"Time in s\": 0.252837 }, { \"step\": 231, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"ChickWeights\", \"MAE\": 7.63642424272378, \"RMSE\": 12.197368986664095, \"R2\": 0.4982590675647103, \"Memory in Mb\": 0.0034055709838867, \"Time in s\": 0.274371 }, { \"step\": 242, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"ChickWeights\", \"MAE\": 7.775639220351206, \"RMSE\": 12.334191292520584, \"R2\": 0.5871659255406134, \"Memory in Mb\": 0.0034055709838867, \"Time in s\": 0.296703 }, { \"step\": 253, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"ChickWeights\", \"MAE\": 7.787415018822619, \"RMSE\": 12.26821713761009, \"R2\": 0.6452735558950271, \"Memory in Mb\": 0.0034055709838867, \"Time in s\": 0.31983 }, { \"step\": 264, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"ChickWeights\", \"MAE\": 8.387229506824328, \"RMSE\": 13.12794439290609, \"R2\": 0.6288834273883384, \"Memory in Mb\": 0.0034589767456054, \"Time in s\": 0.343762 }, { \"step\": 275, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"ChickWeights\", \"MAE\": 8.893165599265544, \"RMSE\": 14.22060275652947, \"R2\": 0.6168153604937621, \"Memory in Mb\": 0.0034589767456054, \"Time in s\": 0.368493 }, { \"step\": 286, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"ChickWeights\", \"MAE\": 9.145334455404228, \"RMSE\": 14.488680433063887, \"R2\": 0.6554856011642076, \"Memory in Mb\": 0.0034589767456054, \"Time in s\": 0.394025 }, { \"step\": 297, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"ChickWeights\", \"MAE\": 9.138984650870034, \"RMSE\": 14.40937364029996, \"R2\": 0.7044680409221951, \"Memory in Mb\": 0.0034589767456054, \"Time in s\": 0.420371 }, { \"step\": 308, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"ChickWeights\", \"MAE\": 9.653721426872565, \"RMSE\": 15.186897141025446, \"R2\": 0.7076222812215553, \"Memory in Mb\": 0.0034589767456054, \"Time in s\": 0.447515 }, { \"step\": 319, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"ChickWeights\", \"MAE\": 10.688547371510406, \"RMSE\": 17.568442046558065, \"R2\": 0.6449394075517852, \"Memory in Mb\": 0.0034589767456054, \"Time in s\": 0.475457 }, { \"step\": 330, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"ChickWeights\", \"MAE\": 11.136888804773204, \"RMSE\": 18.130051576409294, \"R2\": 0.6764089203981734, \"Memory in Mb\": 0.0034589767456054, \"Time in s\": 0.5041950000000001 }, { \"step\": 341, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"ChickWeights\", \"MAE\": 11.389709627918275, \"RMSE\": 18.31814497212097, \"R2\": 0.7041960757214223, \"Memory in Mb\": 0.0034589767456054, \"Time in s\": 0.53373 }, { \"step\": 352, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"ChickWeights\", \"MAE\": 11.506556624125665, \"RMSE\": 18.357319157972537, \"R2\": 0.7299499902857889, \"Memory in Mb\": 0.0034589767456054, \"Time in s\": 0.56408 }, { \"step\": 363, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"ChickWeights\", \"MAE\": 12.507799925411486, \"RMSE\": 19.94157204039453, \"R2\": 0.6905532928077196, \"Memory in Mb\": 0.0034589767456054, \"Time in s\": 0.5952430000000001 }, { \"step\": 374, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"ChickWeights\", \"MAE\": 13.00643913732256, \"RMSE\": 20.915910426515573, \"R2\": 0.6870553798396234, \"Memory in Mb\": 0.0034589767456054, \"Time in s\": 0.6272240000000001 }, { \"step\": 385, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"ChickWeights\", \"MAE\": 13.44126440090726, \"RMSE\": 21.59107831138786, \"R2\": 0.7049595304644938, \"Memory in Mb\": 0.0034589767456054, \"Time in s\": 0.6600050000000001 }, { \"step\": 396, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"ChickWeights\", \"MAE\": 13.452600873244052, \"RMSE\": 21.4799043653453, \"R2\": 0.7277322681469997, \"Memory in Mb\": 0.0034589767456054, \"Time in s\": 0.6935880000000001 }, { \"step\": 407, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"ChickWeights\", \"MAE\": 14.30954893105116, \"RMSE\": 22.795153034451378, \"R2\": 0.7149787127692513, \"Memory in Mb\": 0.0034589767456054, \"Time in s\": 0.7279810000000001 }, { \"step\": 418, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"ChickWeights\", \"MAE\": 15.058465273046648, \"RMSE\": 24.12824896117789, \"R2\": 0.7002387244037227, \"Memory in Mb\": 0.0034589767456054, \"Time in s\": 0.7631730000000001 }, { \"step\": 429, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"ChickWeights\", \"MAE\": 15.61316127520364, \"RMSE\": 25.00709438423813, \"R2\": 0.7138656442877266, \"Memory in Mb\": 0.0034589767456054, \"Time in s\": 0.799166 }, { \"step\": 440, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"ChickWeights\", \"MAE\": 15.833192306896644, \"RMSE\": 25.159785721055627, \"R2\": 0.7308613212218023, \"Memory in Mb\": 0.0034589767456054, \"Time in s\": 0.8359580000000001 }, { \"step\": 451, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"ChickWeights\", \"MAE\": 16.36714766376461, \"RMSE\": 25.770608582556893, \"R2\": 0.729638089688956, \"Memory in Mb\": 0.0034589767456054, \"Time in s\": 0.87355 }, { \"step\": 462, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"ChickWeights\", \"MAE\": 17.39003054773241, \"RMSE\": 27.77320733878432, \"R2\": 0.7050560764454472, \"Memory in Mb\": 0.0034589767456054, \"Time in s\": 0.911945 }, { \"step\": 473, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"ChickWeights\", \"MAE\": 18.288428268963266, \"RMSE\": 29.396681708172505, \"R2\": 0.7082265052542642, \"Memory in Mb\": 0.0034589767456054, \"Time in s\": 0.951137 }, { \"step\": 484, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"ChickWeights\", \"MAE\": 18.65703381705754, \"RMSE\": 29.64739580601693, \"R2\": 0.7194912595024185, \"Memory in Mb\": 0.0034589767456054, \"Time in s\": 0.991127 }, { \"step\": 495, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"ChickWeights\", \"MAE\": 19.170202167844984, \"RMSE\": 30.22319045197901, \"R2\": 0.7257784495510498, \"Memory in Mb\": 0.0034589767456054, \"Time in s\": 1.031919 }, { \"step\": 506, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"ChickWeights\", \"MAE\": 20.00164363454025, \"RMSE\": 31.52072905752619, \"R2\": 0.7103967314785831, \"Memory in Mb\": 0.0034589767456054, \"Time in s\": 1.073505 }, { \"step\": 517, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"ChickWeights\", \"MAE\": 20.7279777099298, \"RMSE\": 32.51187613530653, \"R2\": 0.7092492864364832, \"Memory in Mb\": 0.0034589767456054, \"Time in s\": 1.115928 }, { \"step\": 528, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"ChickWeights\", \"MAE\": 21.56683033620196, \"RMSE\": 33.84128388534863, \"R2\": 0.7167757554755416, \"Memory in Mb\": 0.0034589767456054, \"Time in s\": 1.159144 }, { \"step\": 539, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"ChickWeights\", \"MAE\": 21.76383497583081, \"RMSE\": 33.92033428284125, \"R2\": 0.727201090633804, \"Memory in Mb\": 0.0034589767456054, \"Time in s\": 1.203152 }, { \"step\": 550, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"ChickWeights\", \"MAE\": 22.69913149057359, \"RMSE\": 35.42417858076478, \"R2\": 0.7116454901231712, \"Memory in Mb\": 0.0034589767456054, \"Time in s\": 1.247925 }, { \"step\": 561, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"ChickWeights\", \"MAE\": 23.377380034069706, \"RMSE\": 36.32705612571005, \"R2\": 0.7100204288247138, \"Memory in Mb\": 0.0034589767456054, \"Time in s\": 1.293462 }, { \"step\": 572, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"ChickWeights\", \"MAE\": 24.23392939046311, \"RMSE\": 37.557568322570944, \"R2\": 0.7132890208408607, \"Memory in Mb\": 0.0034589767456054, \"Time in s\": 1.339772 }, { \"step\": 20, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"TrumpApproval\", \"MAE\": 19.88769754963664, \"RMSE\": 24.32970381980572, \"R2\": -1387.4429851591376, \"Memory in Mb\": 0.0043020248413085, \"Time in s\": 0.004188 }, { \"step\": 40, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"TrumpApproval\", \"MAE\": 13.670966252574736, \"RMSE\": 18.65150015508889, \"R2\": -138.85979610511808, \"Memory in Mb\": 0.0043020248413085, \"Time in s\": 0.011118 }, { \"step\": 60, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"TrumpApproval\", \"MAE\": 10.92834996141842, \"RMSE\": 15.667746469337834, \"R2\": -132.052589810652, \"Memory in Mb\": 0.0043020248413085, \"Time in s\": 0.019635 }, { \"step\": 80, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"TrumpApproval\", \"MAE\": 9.678171261463673, \"RMSE\": 14.124417656525663, \"R2\": -107.46634425227307, \"Memory in Mb\": 0.0043020248413085, \"Time in s\": 0.0297069999999999 }, { \"step\": 100, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"TrumpApproval\", \"MAE\": 9.352313294103828, \"RMSE\": 13.38210191485773, \"R2\": -60.090321390572015, \"Memory in Mb\": 0.0043020248413085, \"Time in s\": 0.041437 }, { \"step\": 120, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"TrumpApproval\", \"MAE\": 8.620142702346234, \"RMSE\": 12.447697479286916, \"R2\": -43.84064485890953, \"Memory in Mb\": 0.0043020248413085, \"Time in s\": 0.05473 }, { \"step\": 140, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"TrumpApproval\", \"MAE\": 8.069393725677067, \"RMSE\": 11.747669450243144, \"R2\": -40.272086023690846, \"Memory in Mb\": 0.0043020248413085, \"Time in s\": 0.069574 }, { \"step\": 160, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"TrumpApproval\", \"MAE\": 7.915997483818508, \"RMSE\": 11.323556682786094, \"R2\": -32.81628847194888, \"Memory in Mb\": 0.0043020248413085, \"Time in s\": 0.085987 }, { \"step\": 180, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"TrumpApproval\", \"MAE\": 7.901057285338727, \"RMSE\": 11.048721851620664, \"R2\": -27.27526182903414, \"Memory in Mb\": 0.0043020248413085, \"Time in s\": 0.103947 }, { \"step\": 200, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"TrumpApproval\", \"MAE\": 7.54356570823564, \"RMSE\": 10.615481381947994, \"R2\": -25.17890116869019, \"Memory in Mb\": 0.0043020248413085, \"Time in s\": 0.1235479999999999 }, { \"step\": 220, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"TrumpApproval\", \"MAE\": 7.241521693861677, \"RMSE\": 10.231227443733497, \"R2\": -25.232015334984823, \"Memory in Mb\": 0.0043020248413085, \"Time in s\": 0.1447119999999999 }, { \"step\": 240, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"TrumpApproval\", \"MAE\": 7.008192421558015, \"RMSE\": 9.935910620042463, \"R2\": -23.925679436807183, \"Memory in Mb\": 0.0043020248413085, \"Time in s\": 0.1674199999999999 }, { \"step\": 260, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"TrumpApproval\", \"MAE\": 6.801990869552961, \"RMSE\": 9.666795781283112, \"R2\": -22.91166502123009, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 0.191681 }, { \"step\": 280, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"TrumpApproval\", \"MAE\": 6.624313646675292, \"RMSE\": 9.422664085622769, \"R2\": -22.786676328633025, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 0.217492 }, { \"step\": 300, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"TrumpApproval\", \"MAE\": 6.459069309379183, \"RMSE\": 9.212027904845302, \"R2\": -21.358709628275356, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 0.244937 }, { \"step\": 320, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"TrumpApproval\", \"MAE\": 6.271769455500056, \"RMSE\": 8.978276576497144, \"R2\": -21.290029949269996, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 0.2739479999999999 }, { \"step\": 340, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"TrumpApproval\", \"MAE\": 6.106653016686588, \"RMSE\": 8.760106381933458, \"R2\": -21.42424847489592, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 0.304509 }, { \"step\": 360, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"TrumpApproval\", \"MAE\": 5.987363514041657, \"RMSE\": 8.573066448421043, \"R2\": -20.653260781415117, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 0.336642 }, { \"step\": 380, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"TrumpApproval\", \"MAE\": 5.8616356212919145, \"RMSE\": 8.393806584529749, \"R2\": -20.475259224624704, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 0.370328 }, { \"step\": 400, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"TrumpApproval\", \"MAE\": 5.709779552257037, \"RMSE\": 8.216191215364908, \"R2\": -20.15755692495403, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 0.4056389999999999 }, { \"step\": 420, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"TrumpApproval\", \"MAE\": 5.611842526965273, \"RMSE\": 8.072589482864162, \"R2\": -20.100376445858966, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 0.442509 }, { \"step\": 440, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"TrumpApproval\", \"MAE\": 5.621266668644241, \"RMSE\": 8.061169250733263, \"R2\": -19.659995268253144, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 0.480923 }, { \"step\": 460, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"TrumpApproval\", \"MAE\": 5.589374981160082, \"RMSE\": 7.977922595195435, \"R2\": -18.1892858176961, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 0.520896 }, { \"step\": 480, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"TrumpApproval\", \"MAE\": 5.601949193869223, \"RMSE\": 7.939757215258646, \"R2\": -17.568871361729258, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 0.5624170000000001 }, { \"step\": 500, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"TrumpApproval\", \"MAE\": 5.5323381947357735, \"RMSE\": 7.827118471157092, \"R2\": -16.667155417363553, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 0.6055100000000001 }, { \"step\": 520, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"TrumpApproval\", \"MAE\": 5.482642323931563, \"RMSE\": 7.735647602994712, \"R2\": -16.286764124140948, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 0.6502260000000001 }, { \"step\": 540, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"TrumpApproval\", \"MAE\": 5.40625682094428, \"RMSE\": 7.63238246035087, \"R2\": -15.7666448554263, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 0.696447 }, { \"step\": 560, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"TrumpApproval\", \"MAE\": 5.339029679572209, \"RMSE\": 7.532223621796607, \"R2\": -15.718570546376949, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 0.744222 }, { \"step\": 580, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"TrumpApproval\", \"MAE\": 5.278136676956568, \"RMSE\": 7.444054065699633, \"R2\": -15.733326658615429, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 0.7935180000000001 }, { \"step\": 600, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"TrumpApproval\", \"MAE\": 5.267448053107655, \"RMSE\": 7.406286860836784, \"R2\": -15.380732931295814, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 0.8443630000000001 }, { \"step\": 620, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"TrumpApproval\", \"MAE\": 5.236749685364014, \"RMSE\": 7.349650271122191, \"R2\": -14.88527744664384, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 0.8968030000000001 }, { \"step\": 640, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"TrumpApproval\", \"MAE\": 5.212060707085285, \"RMSE\": 7.303465599867649, \"R2\": -14.393054201831111, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 0.95074 }, { \"step\": 660, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"TrumpApproval\", \"MAE\": 5.169763730821092, \"RMSE\": 7.239859498572405, \"R2\": -14.017341318649356, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 1.0062160000000002 }, { \"step\": 680, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"TrumpApproval\", \"MAE\": 5.126488999027155, \"RMSE\": 7.167741867944588, \"R2\": -13.952260083042017, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 1.063227 }, { \"step\": 700, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"TrumpApproval\", \"MAE\": 5.093088048073085, \"RMSE\": 7.110044108134988, \"R2\": -14.098928285386927, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 1.121789 }, { \"step\": 720, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"TrumpApproval\", \"MAE\": 5.090740367293551, \"RMSE\": 7.074302078930798, \"R2\": -14.275901824522656, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 1.181923 }, { \"step\": 740, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"TrumpApproval\", \"MAE\": 5.094807885255693, \"RMSE\": 7.045283722404726, \"R2\": -14.141723406350016, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 1.243554 }, { \"step\": 760, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"TrumpApproval\", \"MAE\": 5.043787658869922, \"RMSE\": 6.975646542541488, \"R2\": -14.00468895273832, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 1.306712 }, { \"step\": 780, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"TrumpApproval\", \"MAE\": 5.064043270517397, \"RMSE\": 6.970645877797938, \"R2\": -14.050305184338798, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 1.3714110000000002 }, { \"step\": 800, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"TrumpApproval\", \"MAE\": 5.100755566375936, \"RMSE\": 7.010389089224324, \"R2\": -14.360083784619212, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 1.4376520000000002 }, { \"step\": 820, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"TrumpApproval\", \"MAE\": 5.088064827359337, \"RMSE\": 6.979627415872227, \"R2\": -14.392786134873418, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 1.505445 }, { \"step\": 840, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"TrumpApproval\", \"MAE\": 5.068856460674791, \"RMSE\": 6.93736545783203, \"R2\": -14.348127111309871, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 1.574755 }, { \"step\": 860, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"TrumpApproval\", \"MAE\": 5.054699520351681, \"RMSE\": 6.900248160625591, \"R2\": -14.158172906563095, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 1.645553 }, { \"step\": 880, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"TrumpApproval\", \"MAE\": 5.051365258335469, \"RMSE\": 6.881479694679562, \"R2\": -14.000915574481752, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 1.717914 }, { \"step\": 900, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"TrumpApproval\", \"MAE\": 5.025346349320704, \"RMSE\": 6.842894221251935, \"R2\": -13.895672840614656, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 1.791764 }, { \"step\": 920, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"TrumpApproval\", \"MAE\": 4.9962011230931616, \"RMSE\": 6.800765764747934, \"R2\": -13.95456831471299, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 1.867216 }, { \"step\": 940, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"TrumpApproval\", \"MAE\": 4.964612353038952, \"RMSE\": 6.764345134580912, \"R2\": -13.920332812605436, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 1.944198 }, { \"step\": 960, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"TrumpApproval\", \"MAE\": 4.9296416909632255, \"RMSE\": 6.717192667284049, \"R2\": -13.866880687662514, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 2.02269 }, { \"step\": 980, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"TrumpApproval\", \"MAE\": 4.910978624057615, \"RMSE\": 6.682715130965835, \"R2\": -14.000438748691009, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 2.102738 }, { \"step\": 1000, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 1\", \"dataset\": \"TrumpApproval\", \"MAE\": 4.906394390750375, \"RMSE\": 6.665596501187553, \"R2\": -14.174901311798424, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 2.184247 }, { \"step\": 11, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"ChickWeights\", \"MAE\": 40.361343182089286, \"RMSE\": 50.93510711941157, \"R2\": -3338.580868182736, \"Memory in Mb\": 0.0034055709838867, \"Time in s\": 0.003338 }, { \"step\": 22, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"ChickWeights\", \"MAE\": 33.77268754890631, \"RMSE\": 41.67984599422324, \"R2\": -1838.845575618055, \"Memory in Mb\": 0.0034055709838867, \"Time in s\": 0.00919 }, { \"step\": 33, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"ChickWeights\", \"MAE\": 32.39258875507137, \"RMSE\": 38.96806999674433, \"R2\": -1053.6287703611629, \"Memory in Mb\": 0.0034055709838867, \"Time in s\": 0.01639 }, { \"step\": 44, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"ChickWeights\", \"MAE\": 30.94881031854868, \"RMSE\": 36.76152485506615, \"R2\": -1062.9809670274265, \"Memory in Mb\": 0.0034055709838867, \"Time in s\": 0.024921 }, { \"step\": 55, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"ChickWeights\", \"MAE\": 33.955857035779495, \"RMSE\": 41.369655851763525, \"R2\": -333.094700988151, \"Memory in Mb\": 0.0034055709838867, \"Time in s\": 0.034827 }, { \"step\": 66, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"ChickWeights\", \"MAE\": 34.010493836145145, \"RMSE\": 40.92418807176112, \"R2\": -139.93270784533922, \"Memory in Mb\": 0.0034055709838867, \"Time in s\": 0.046059 }, { \"step\": 77, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"ChickWeights\", \"MAE\": 34.064338631511326, \"RMSE\": 40.5595538563462, \"R2\": -91.78246602216656, \"Memory in Mb\": 0.0034055709838867, \"Time in s\": 0.058629 }, { \"step\": 88, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"ChickWeights\", \"MAE\": 34.363478110253816, \"RMSE\": 40.47408671194747, \"R2\": -82.76869054449229, \"Memory in Mb\": 0.0034055709838867, \"Time in s\": 0.072516 }, { \"step\": 99, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"ChickWeights\", \"MAE\": 34.108671766629826, \"RMSE\": 39.953033914579606, \"R2\": -67.2701887367714, \"Memory in Mb\": 0.0034055709838867, \"Time in s\": 0.087732 }, { \"step\": 110, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"ChickWeights\", \"MAE\": 35.5272246808861, \"RMSE\": 41.29414928968925, \"R2\": -49.9285816695273, \"Memory in Mb\": 0.0034055709838867, \"Time in s\": 0.104612 }, { \"step\": 121, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"ChickWeights\", \"MAE\": 35.85052464333277, \"RMSE\": 41.48724749727828, \"R2\": -38.59084342971371, \"Memory in Mb\": 0.0034055709838867, \"Time in s\": 0.122819 }, { \"step\": 132, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"ChickWeights\", \"MAE\": 36.414238795248615, \"RMSE\": 42.10793271457587, \"R2\": -32.332913655998325, \"Memory in Mb\": 0.0034055709838867, \"Time in s\": 0.142364 }, { \"step\": 143, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"ChickWeights\", \"MAE\": 37.27092918309872, \"RMSE\": 43.0841670883325, \"R2\": -27.26495387953688, \"Memory in Mb\": 0.0034055709838867, \"Time in s\": 0.163229 }, { \"step\": 154, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"ChickWeights\", \"MAE\": 37.68834060456834, \"RMSE\": 43.351809236536255, \"R2\": -23.3206899065102, \"Memory in Mb\": 0.0034055709838867, \"Time in s\": 0.18538 }, { \"step\": 165, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"ChickWeights\", \"MAE\": 37.63751931524077, \"RMSE\": 43.32469674855668, \"R2\": -18.52621065458175, \"Memory in Mb\": 0.0034055709838867, \"Time in s\": 0.208847 }, { \"step\": 176, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"ChickWeights\", \"MAE\": 38.77878602757167, \"RMSE\": 44.74953718825682, \"R2\": -15.796635622291838, \"Memory in Mb\": 0.0034055709838867, \"Time in s\": 0.233634 }, { \"step\": 187, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"ChickWeights\", \"MAE\": 39.47954967522975, \"RMSE\": 45.39032172466195, \"R2\": -13.149195443978996, \"Memory in Mb\": 0.0034055709838867, \"Time in s\": 0.259707 }, { \"step\": 198, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"ChickWeights\", \"MAE\": 40.91548075261064, \"RMSE\": 46.96428169788168, \"R2\": -11.049082215962626, \"Memory in Mb\": 0.0034055709838867, \"Time in s\": 0.287109 }, { \"step\": 209, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"ChickWeights\", \"MAE\": 41.53723737741547, \"RMSE\": 47.716579905431935, \"R2\": -9.253665706385002, \"Memory in Mb\": 0.0034055709838867, \"Time in s\": 0.31584 }, { \"step\": 220, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"ChickWeights\", \"MAE\": 41.99935380922626, \"RMSE\": 48.63121942098776, \"R2\": -8.324525171754294, \"Memory in Mb\": 0.0034055709838867, \"Time in s\": 0.345843 }, { \"step\": 231, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"ChickWeights\", \"MAE\": 42.76794868726854, \"RMSE\": 49.65880643089243, \"R2\": -7.316484115890946, \"Memory in Mb\": 0.0034055709838867, \"Time in s\": 0.377141 }, { \"step\": 242, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"ChickWeights\", \"MAE\": 44.368299619960766, \"RMSE\": 51.88245138837915, \"R2\": -6.304578357179455, \"Memory in Mb\": 0.0034055709838867, \"Time in s\": 0.409767 }, { \"step\": 253, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"ChickWeights\", \"MAE\": 45.426835880148886, \"RMSE\": 53.192808855117775, \"R2\": -5.668628208432534, \"Memory in Mb\": 0.0034055709838867, \"Time in s\": 0.443674 }, { \"step\": 264, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"ChickWeights\", \"MAE\": 46.93737170570451, \"RMSE\": 55.9021194350506, \"R2\": -5.729354993233594, \"Memory in Mb\": 0.0034589767456054, \"Time in s\": 0.478844 }, { \"step\": 275, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"ChickWeights\", \"MAE\": 48.507353400069945, \"RMSE\": 58.8434937271261, \"R2\": -5.560984055006233, \"Memory in Mb\": 0.0034589767456054, \"Time in s\": 0.515365 }, { \"step\": 286, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"ChickWeights\", \"MAE\": 49.83788736782816, \"RMSE\": 60.74084767697289, \"R2\": -5.054961799673954, \"Memory in Mb\": 0.0034589767456054, \"Time in s\": 0.5532039999999999 }, { \"step\": 297, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"ChickWeights\", \"MAE\": 51.62361105051165, \"RMSE\": 63.27455882125991, \"R2\": -4.698656745997159, \"Memory in Mb\": 0.0034589767456054, \"Time in s\": 0.5923289999999999 }, { \"step\": 308, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"ChickWeights\", \"MAE\": 53.1847087657747, \"RMSE\": 65.32139627595005, \"R2\": -4.409001340116382, \"Memory in Mb\": 0.0034589767456054, \"Time in s\": 0.6328059999999999 }, { \"step\": 319, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"ChickWeights\", \"MAE\": 55.18568520771326, \"RMSE\": 70.36874449488667, \"R2\": -4.696335728503607, \"Memory in Mb\": 0.0034589767456054, \"Time in s\": 0.6746009999999999 }, { \"step\": 330, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"ChickWeights\", \"MAE\": 56.89081675494835, \"RMSE\": 72.51519786174504, \"R2\": -4.176742159590063, \"Memory in Mb\": 0.0034589767456054, \"Time in s\": 0.7176629999999999 }, { \"step\": 341, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"ChickWeights\", \"MAE\": 58.741890997125225, \"RMSE\": 75.13624143509449, \"R2\": -3.976681879584251, \"Memory in Mb\": 0.0034589767456054, \"Time in s\": 0.7620199999999999 }, { \"step\": 352, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"ChickWeights\", \"MAE\": 60.2425190521958, \"RMSE\": 76.8261590755005, \"R2\": -3.729812475420129, \"Memory in Mb\": 0.0034589767456054, \"Time in s\": 0.8076819999999999 }, { \"step\": 363, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"ChickWeights\", \"MAE\": 62.22918871806441, \"RMSE\": 80.45649530418282, \"R2\": -4.037201263859445, \"Memory in Mb\": 0.0034589767456054, \"Time in s\": 0.854609 }, { \"step\": 374, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"ChickWeights\", \"MAE\": 64.15805403621584, \"RMSE\": 84.29062360683722, \"R2\": -4.082442538013045, \"Memory in Mb\": 0.0034589767456054, \"Time in s\": 0.902845 }, { \"step\": 385, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"ChickWeights\", \"MAE\": 65.35464556519459, \"RMSE\": 85.58979700811152, \"R2\": -3.6363575203718743, \"Memory in Mb\": 0.0034589767456054, \"Time in s\": 0.952397 }, { \"step\": 396, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"ChickWeights\", \"MAE\": 67.08652533638559, \"RMSE\": 87.70251677464411, \"R2\": -3.538952097004609, \"Memory in Mb\": 0.0034589767456054, \"Time in s\": 1.00322 }, { \"step\": 407, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"ChickWeights\", \"MAE\": 68.34565949899115, \"RMSE\": 89.3540667816312, \"R2\": -3.3794635851122994, \"Memory in Mb\": 0.0034589767456054, \"Time in s\": 1.055319 }, { \"step\": 418, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"ChickWeights\", \"MAE\": 70.95506872236689, \"RMSE\": 94.70758550832085, \"R2\": -3.618420231648276, \"Memory in Mb\": 0.0034589767456054, \"Time in s\": 1.108722 }, { \"step\": 429, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"ChickWeights\", \"MAE\": 72.57879663609631, \"RMSE\": 96.822819417077, \"R2\": -3.2894241164712765, \"Memory in Mb\": 0.0034589767456054, \"Time in s\": 1.163438 }, { \"step\": 440, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"ChickWeights\", \"MAE\": 74.40667828471298, \"RMSE\": 99.12463362784464, \"R2\": -3.1775863007897582, \"Memory in Mb\": 0.0034589767456054, \"Time in s\": 1.219419 }, { \"step\": 451, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"ChickWeights\", \"MAE\": 75.93202948942836, \"RMSE\": 101.47969042740628, \"R2\": -3.192319994595937, \"Memory in Mb\": 0.0034589767456054, \"Time in s\": 1.276691 }, { \"step\": 462, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"ChickWeights\", \"MAE\": 78.59712471455953, \"RMSE\": 106.68213481552291, \"R2\": -3.3518185198786687, \"Memory in Mb\": 0.0034589767456054, \"Time in s\": 1.335258 }, { \"step\": 473, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"ChickWeights\", \"MAE\": 81.70573920737151, \"RMSE\": 112.24508574603004, \"R2\": -3.253866822086197, \"Memory in Mb\": 0.0034589767456054, \"Time in s\": 1.395089 }, { \"step\": 484, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"ChickWeights\", \"MAE\": 82.72251416230137, \"RMSE\": 113.16810597159808, \"R2\": -3.0871576545332315, \"Memory in Mb\": 0.0034589767456054, \"Time in s\": 1.456215 }, { \"step\": 495, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"ChickWeights\", \"MAE\": 84.44211178292737, \"RMSE\": 115.99711612480068, \"R2\": -3.039385949207989, \"Memory in Mb\": 0.0034589767456054, \"Time in s\": 1.518639 }, { \"step\": 506, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"ChickWeights\", \"MAE\": 86.55149997089892, \"RMSE\": 119.94151559804617, \"R2\": -3.193242857597108, \"Memory in Mb\": 0.0034589767456054, \"Time in s\": 1.582321 }, { \"step\": 517, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"ChickWeights\", \"MAE\": 87.81070722823603, \"RMSE\": 121.26627191062052, \"R2\": -3.0449837234699952, \"Memory in Mb\": 0.0034589767456054, \"Time in s\": 1.64731 }, { \"step\": 528, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"ChickWeights\", \"MAE\": 90.44199567451936, \"RMSE\": 126.238673662019, \"R2\": -2.9411377147279807, \"Memory in Mb\": 0.0034589767456054, \"Time in s\": 1.713605 }, { \"step\": 539, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"ChickWeights\", \"MAE\": 91.59067370330054, \"RMSE\": 127.24192286613216, \"R2\": -2.8386881284150203, \"Memory in Mb\": 0.0034589767456054, \"Time in s\": 1.781138 }, { \"step\": 550, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"ChickWeights\", \"MAE\": 93.78609063040562, \"RMSE\": 130.999704877259, \"R2\": -2.943372518937519, \"Memory in Mb\": 0.0034589767456054, \"Time in s\": 1.84997 }, { \"step\": 561, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"ChickWeights\", \"MAE\": 96.49865979675556, \"RMSE\": 135.93192637304293, \"R2\": -3.060223463970532, \"Memory in Mb\": 0.0034589767456054, \"Time in s\": 1.920139 }, { \"step\": 572, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"ChickWeights\", \"MAE\": 99.56805853722273, \"RMSE\": 141.40025114882988, \"R2\": -3.0639630782357843, \"Memory in Mb\": 0.0034589767456054, \"Time in s\": 1.991547 }, { \"step\": 20, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"TrumpApproval\", \"MAE\": 38.256966119949794, \"RMSE\": 53.46437671117289, \"R2\": -6703.762875072117, \"Memory in Mb\": 0.0043020248413085, \"Time in s\": 0.004258 }, { \"step\": 40, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"TrumpApproval\", \"MAE\": 36.86518407094958, \"RMSE\": 46.91757933405302, \"R2\": -883.9863015306486, \"Memory in Mb\": 0.0043020248413085, \"Time in s\": 0.0112559999999999 }, { \"step\": 60, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"TrumpApproval\", \"MAE\": 34.81538726709292, \"RMSE\": 43.77024226536395, \"R2\": -1037.408328049847, \"Memory in Mb\": 0.0043020248413085, \"Time in s\": 0.019887 }, { \"step\": 80, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"TrumpApproval\", \"MAE\": 32.82099099523828, \"RMSE\": 40.95636211937148, \"R2\": -911.003802678922, \"Memory in Mb\": 0.0043020248413085, \"Time in s\": 0.0300789999999999 }, { \"step\": 100, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"TrumpApproval\", \"MAE\": 32.41507697560151, \"RMSE\": 39.67328525303196, \"R2\": -535.9329715822871, \"Memory in Mb\": 0.0043020248413085, \"Time in s\": 0.041942 }, { \"step\": 120, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"TrumpApproval\", \"MAE\": 32.126533810299065, \"RMSE\": 38.74392424963554, \"R2\": -433.4111998192701, \"Memory in Mb\": 0.0043020248413085, \"Time in s\": 0.055368 }, { \"step\": 140, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"TrumpApproval\", \"MAE\": 31.578696449953306, \"RMSE\": 37.792227545537656, \"R2\": -426.12792465890055, \"Memory in Mb\": 0.0043020248413085, \"Time in s\": 0.070349 }, { \"step\": 160, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"TrumpApproval\", \"MAE\": 30.74326128631808, \"RMSE\": 36.75147996394125, \"R2\": -355.2131008927076, \"Memory in Mb\": 0.0043020248413085, \"Time in s\": 0.086898 }, { \"step\": 180, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"TrumpApproval\", \"MAE\": 30.01457804514465, \"RMSE\": 35.792387341570375, \"R2\": -295.7316609698654, \"Memory in Mb\": 0.0043020248413085, \"Time in s\": 0.104998 }, { \"step\": 200, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"TrumpApproval\", \"MAE\": 29.69358993814741, \"RMSE\": 35.287452593511624, \"R2\": -288.27615949420783, \"Memory in Mb\": 0.0043020248413085, \"Time in s\": 0.124741 }, { \"step\": 220, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"TrumpApproval\", \"MAE\": 29.653103799764548, \"RMSE\": 34.981694493871686, \"R2\": -305.6605175079266, \"Memory in Mb\": 0.0043020248413085, \"Time in s\": 0.146064 }, { \"step\": 240, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"TrumpApproval\", \"MAE\": 29.438535707776744, \"RMSE\": 34.575414655319626, \"R2\": -300.8328104705327, \"Memory in Mb\": 0.0043020248413085, \"Time in s\": 0.168934 }, { \"step\": 260, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"TrumpApproval\", \"MAE\": 29.47891951475184, \"RMSE\": 34.56744094622709, \"R2\": -304.7589578451177, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 0.193372 }, { \"step\": 280, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"TrumpApproval\", \"MAE\": 29.26309099262944, \"RMSE\": 34.21824031120625, \"R2\": -312.6907330516863, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 0.2193709999999999 }, { \"step\": 300, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"TrumpApproval\", \"MAE\": 29.22587791049338, \"RMSE\": 34.04470159277256, \"R2\": -304.37628668920115, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 0.246997 }, { \"step\": 320, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"TrumpApproval\", \"MAE\": 29.39018713755992, \"RMSE\": 34.027497016659055, \"R2\": -319.1729973057711, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 0.276184 }, { \"step\": 340, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"TrumpApproval\", \"MAE\": 29.42027037377748, \"RMSE\": 33.93053373578466, \"R2\": -335.4190027425263, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 0.306921 }, { \"step\": 360, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"TrumpApproval\", \"MAE\": 29.664307062669348, \"RMSE\": 34.045635299267715, \"R2\": -340.48671423689984, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 0.339233 }, { \"step\": 380, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"TrumpApproval\", \"MAE\": 29.83498723746747, \"RMSE\": 34.11808971579651, \"R2\": -353.80514851974, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 0.373101 }, { \"step\": 400, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"TrumpApproval\", \"MAE\": 30.001082895685222, \"RMSE\": 34.17628998525774, \"R2\": -365.0785427385626, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 0.408596 }, { \"step\": 420, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"TrumpApproval\", \"MAE\": 30.14554243966483, \"RMSE\": 34.20268208319664, \"R2\": -377.7780423674053, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 0.445652 }, { \"step\": 440, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"TrumpApproval\", \"MAE\": 30.318997649838, \"RMSE\": 34.29163258825692, \"R2\": -372.8612574130533, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 0.484259 }, { \"step\": 460, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"TrumpApproval\", \"MAE\": 30.44816166194883, \"RMSE\": 34.34474274542465, \"R2\": -354.6310813895727, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 0.524426 }, { \"step\": 480, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"TrumpApproval\", \"MAE\": 30.547707035634, \"RMSE\": 34.3460988289287, \"R2\": -346.476863625336, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 0.5661419999999999 }, { \"step\": 500, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"TrumpApproval\", \"MAE\": 30.677713278160866, \"RMSE\": 34.40102042286509, \"R2\": -340.27577766039445, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 0.6094289999999999 }, { \"step\": 520, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"TrumpApproval\", \"MAE\": 30.774567489967996, \"RMSE\": 34.41616981796676, \"R2\": -341.1727526506248, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 0.6543389999999999 }, { \"step\": 540, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"TrumpApproval\", \"MAE\": 30.8774276068303, \"RMSE\": 34.45885995564912, \"R2\": -340.7651110108783, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 0.7007559999999999 }, { \"step\": 560, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"TrumpApproval\", \"MAE\": 30.809160821418956, \"RMSE\": 34.35467967149731, \"R2\": -346.7959646189386, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 0.7487349999999999 }, { \"step\": 580, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"TrumpApproval\", \"MAE\": 30.794626934327955, \"RMSE\": 34.31319945532044, \"R2\": -354.5377185045104, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 0.798249 }, { \"step\": 600, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"TrumpApproval\", \"MAE\": 30.95165244409411, \"RMSE\": 34.41180694760753, \"R2\": -352.62847406429205, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 0.849322 }, { \"step\": 620, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"TrumpApproval\", \"MAE\": 31.05924245979316, \"RMSE\": 34.47559777081781, \"R2\": -348.53049085374005, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 0.901986 }, { \"step\": 640, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"TrumpApproval\", \"MAE\": 31.17486346702509, \"RMSE\": 34.55495775749415, \"R2\": -343.578007451748, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 0.956154 }, { \"step\": 660, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"TrumpApproval\", \"MAE\": 31.18138785276698, \"RMSE\": 34.53217565702543, \"R2\": -340.6493968041527, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 1.011873 }, { \"step\": 680, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"TrumpApproval\", \"MAE\": 31.262098564397714, \"RMSE\": 34.57860649898553, \"R2\": -346.98225985471817, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 1.06915 }, { \"step\": 700, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"TrumpApproval\", \"MAE\": 31.21351117376332, \"RMSE\": 34.56642775102796, \"R2\": -355.8704038677769, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 1.127995 }, { \"step\": 720, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"TrumpApproval\", \"MAE\": 31.187412408778744, \"RMSE\": 34.52480046664081, \"R2\": -362.8329367085535, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 1.188423 }, { \"step\": 740, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"TrumpApproval\", \"MAE\": 31.11951946368673, \"RMSE\": 34.44075758421072, \"R2\": -360.84595808308967, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 1.250358 }, { \"step\": 760, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"TrumpApproval\", \"MAE\": 31.06286774404838, \"RMSE\": 34.392033812715184, \"R2\": -363.7319274626152, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 1.313829 }, { \"step\": 780, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"TrumpApproval\", \"MAE\": 31.122129081397333, \"RMSE\": 34.4147077867913, \"R2\": -365.8490836115921, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 1.378848 }, { \"step\": 800, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"TrumpApproval\", \"MAE\": 31.106214253085103, \"RMSE\": 34.39160753515401, \"R2\": -368.6700713862459, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 1.445416 }, { \"step\": 820, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"TrumpApproval\", \"MAE\": 31.221689813214, \"RMSE\": 34.48240321448139, \"R2\": -374.7057202811433, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 1.513551 }, { \"step\": 840, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"TrumpApproval\", \"MAE\": 31.279425902447173, \"RMSE\": 34.50697415799086, \"R2\": -378.7344488771949, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 1.583215 }, { \"step\": 860, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"TrumpApproval\", \"MAE\": 31.342876956175303, \"RMSE\": 34.54160049479585, \"R2\": -378.8414458535583, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 1.65438 }, { \"step\": 880, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"TrumpApproval\", \"MAE\": 31.26946109805924, \"RMSE\": 34.463329541799936, \"R2\": -375.2431199026635, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 1.727121 }, { \"step\": 900, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"TrumpApproval\", \"MAE\": 31.252963502360984, \"RMSE\": 34.437602544585566, \"R2\": -376.2648040183398, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 1.801358 }, { \"step\": 920, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"TrumpApproval\", \"MAE\": 31.17140034618248, \"RMSE\": 34.35879469425013, \"R2\": -380.710483609337, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 1.877212 }, { \"step\": 940, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"TrumpApproval\", \"MAE\": 31.028682196255257, \"RMSE\": 34.23221645186374, \"R2\": -381.1175926718204, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 1.954607 }, { \"step\": 960, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"TrumpApproval\", \"MAE\": 31.091829199349828, \"RMSE\": 34.349474902309865, \"R2\": -387.76257537598985, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 2.033516 }, { \"step\": 980, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"TrumpApproval\", \"MAE\": 31.00094350988743, \"RMSE\": 34.303507421677764, \"R2\": -394.25294990859175, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 2.113994 }, { \"step\": 1000, \"track\": \"Regression\", \"model\": \"Passive-Aggressive Regressor, mode 2\", \"dataset\": \"TrumpApproval\", \"MAE\": 31.128814077399102, \"RMSE\": 34.425663215951964, \"R2\": -403.7738674316034, \"Memory in Mb\": 0.0044355392456054, \"Time in s\": 2.195941 }, { \"step\": 11, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ChickWeights\", \"MAE\": 4.6439393939393945, \"RMSE\": 12.708027567111456, \"R2\": -206.8805289598106, \"Memory in Mb\": 0.007791519165039, \"Time in s\": 0.002301 }, { \"step\": 22, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ChickWeights\", \"MAE\": 2.7674242424242426, \"RMSE\": 9.021574170013263, \"R2\": -85.19732920009746, \"Memory in Mb\": 0.0116405487060546, \"Time in s\": 0.007035 }, { \"step\": 33, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ChickWeights\", \"MAE\": 2.366161616161617, \"RMSE\": 7.437810062008745, \"R2\": -37.42129411139464, \"Memory in Mb\": 0.0161724090576171, \"Time in s\": 0.013843 }, { \"step\": 44, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ChickWeights\", \"MAE\": 2.015530303030304, \"RMSE\": 6.463663489621867, \"R2\": -31.893061768560024, \"Memory in Mb\": 0.0202007293701171, \"Time in s\": 0.022662 }, { \"step\": 55, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ChickWeights\", \"MAE\": 2.2124242424242424, \"RMSE\": 6.080421054665558, \"R2\": -6.217272109648366, \"Memory in Mb\": 0.0243968963623046, \"Time in s\": 0.033503 }, { \"step\": 66, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ChickWeights\", \"MAE\": 2.280050505050505, \"RMSE\": 5.694858940322259, \"R2\": -1.7290883479828647, \"Memory in Mb\": 0.0287837982177734, \"Time in s\": 0.046733 }, { \"step\": 77, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ChickWeights\", \"MAE\": 2.61926406926407, \"RMSE\": 5.707950266794224, \"R2\": -0.8375532519268223, \"Memory in Mb\": 0.0331974029541015, \"Time in s\": 0.062616 }, { \"step\": 88, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ChickWeights\", \"MAE\": 2.530492424242425, \"RMSE\": 5.412982721609634, \"R2\": -0.4983072905775765, \"Memory in Mb\": 0.0375041961669921, \"Time in s\": 0.081466 }, { \"step\": 99, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ChickWeights\", \"MAE\": 2.4755892255892267, \"RMSE\": 5.17010990945742, \"R2\": -0.1432234574096695, \"Memory in Mb\": 0.0422878265380859, \"Time in s\": 0.103593 }, { \"step\": 110, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ChickWeights\", \"MAE\": 2.7716666666666683, \"RMSE\": 5.296236752390676, \"R2\": 0.1622405877971293, \"Memory in Mb\": 0.0433177947998046, \"Time in s\": 0.129424 }, { \"step\": 121, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ChickWeights\", \"MAE\": 3.180853994490359, \"RMSE\": 5.621206607854847, \"R2\": 0.2731837882445769, \"Memory in Mb\": 0.0438785552978515, \"Time in s\": 0.158775 }, { \"step\": 132, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ChickWeights\", \"MAE\": 3.3642676767676765, \"RMSE\": 5.706770043255583, \"R2\": 0.3877536814355664, \"Memory in Mb\": 0.0436267852783203, \"Time in s\": 0.191664 }, { \"step\": 143, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ChickWeights\", \"MAE\": 3.646736596736596, \"RMSE\": 5.919243012407738, \"R2\": 0.4664867393310171, \"Memory in Mb\": 0.0439319610595703, \"Time in s\": 0.228099 }, { \"step\": 154, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ChickWeights\", \"MAE\": 3.7550865800865783, \"RMSE\": 5.97572666401829, \"R2\": 0.537892640768072, \"Memory in Mb\": 0.0440692901611328, \"Time in s\": 0.268063 }, { \"step\": 165, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ChickWeights\", \"MAE\": 4.093838383838381, \"RMSE\": 6.488494998076776, \"R2\": 0.562039588096868, \"Memory in Mb\": 0.0446529388427734, \"Time in s\": 0.311593 }, { \"step\": 176, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ChickWeights\", \"MAE\": 4.458428030303029, \"RMSE\": 6.947478945595657, \"R2\": 0.5951448357515823, \"Memory in Mb\": 0.0446796417236328, \"Time in s\": 0.358632 }, { \"step\": 187, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ChickWeights\", \"MAE\": 4.792959001782529, \"RMSE\": 7.272258331212408, \"R2\": 0.6368016898131145, \"Memory in Mb\": 0.0447597503662109, \"Time in s\": 0.40948 }, { \"step\": 198, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ChickWeights\", \"MAE\": 5.229713804713803, \"RMSE\": 7.766788141562423, \"R2\": 0.6704650236153215, \"Memory in Mb\": 0.0442829132080078, \"Time in s\": 0.46388 }, { \"step\": 209, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ChickWeights\", \"MAE\": 5.61188197767145, \"RMSE\": 8.429860803311705, \"R2\": 0.6799768871245477, \"Memory in Mb\": 0.0443363189697265, \"Time in s\": 0.521779 }, { \"step\": 220, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ChickWeights\", \"MAE\": 6.048560606060604, \"RMSE\": 9.536044923225656, \"R2\": 0.6414638231876792, \"Memory in Mb\": 0.0443096160888671, \"Time in s\": 0.583178 }, { \"step\": 231, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ChickWeights\", \"MAE\": 6.582178932178929, \"RMSE\": 10.20324912411692, \"R2\": 0.648905367768132, \"Memory in Mb\": 0.0448932647705078, \"Time in s\": 0.6480619999999999 }, { \"step\": 242, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ChickWeights\", \"MAE\": 7.071418732782365, \"RMSE\": 10.928542055135823, \"R2\": 0.6759002976153703, \"Memory in Mb\": 0.0449466705322265, \"Time in s\": 0.7164429999999999 }, { \"step\": 253, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ChickWeights\", \"MAE\": 7.477799736495386, \"RMSE\": 11.323352624926212, \"R2\": 0.6978095597045382, \"Memory in Mb\": 0.045053482055664, \"Time in s\": 0.7883119999999999 }, { \"step\": 264, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ChickWeights\", \"MAE\": 7.970770202020199, \"RMSE\": 12.28335187867794, \"R2\": 0.6750992767833781, \"Memory in Mb\": 0.0446300506591796, \"Time in s\": 0.8636689999999999 }, { \"step\": 275, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ChickWeights\", \"MAE\": 8.55812121212121, \"RMSE\": 13.382565810664548, \"R2\": 0.6606476529151027, \"Memory in Mb\": 0.0446834564208984, \"Time in s\": 0.942555 }, { \"step\": 286, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ChickWeights\", \"MAE\": 9.054137529137526, \"RMSE\": 14.013384412631826, \"R2\": 0.6777181990167639, \"Memory in Mb\": 0.0448436737060546, \"Time in s\": 1.024953 }, { \"step\": 297, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ChickWeights\", \"MAE\": 9.468967452300786, \"RMSE\": 14.435360812541292, \"R2\": 0.7034011013652389, \"Memory in Mb\": 0.0454006195068359, \"Time in s\": 1.110877 }, { \"step\": 308, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ChickWeights\", \"MAE\": 9.90871212121212, \"RMSE\": 15.173853281638724, \"R2\": 0.7081243055691319, \"Memory in Mb\": 0.0454273223876953, \"Time in s\": 1.200187 }, { \"step\": 319, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ChickWeights\", \"MAE\": 10.713740856844304, \"RMSE\": 17.013635837866804, \"R2\": 0.6670107307192514, \"Memory in Mb\": 0.0455341339111328, \"Time in s\": 1.2928449999999998 }, { \"step\": 330, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ChickWeights\", \"MAE\": 11.460252525252525, \"RMSE\": 18.125243873896306, \"R2\": 0.6765805165314649, \"Memory in Mb\": 0.045083999633789, \"Time in s\": 1.388826 }, { \"step\": 341, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ChickWeights\", \"MAE\": 11.901710654936462, \"RMSE\": 18.5766916053512, \"R2\": 0.6957870549438744, \"Memory in Mb\": 0.0451908111572265, \"Time in s\": 1.488176 }, { \"step\": 352, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ChickWeights\", \"MAE\": 12.310464015151512, \"RMSE\": 18.922178666477887, \"R2\": 0.7130752857476492, \"Memory in Mb\": 0.0451641082763671, \"Time in s\": 1.591164 }, { \"step\": 363, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ChickWeights\", \"MAE\": 12.780394857667584, \"RMSE\": 19.823234941774256, \"R2\": 0.694215027528111, \"Memory in Mb\": 0.0456142425537109, \"Time in s\": 1.697605 }, { \"step\": 374, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ChickWeights\", \"MAE\": 13.344073083778964, \"RMSE\": 20.889730456192645, \"R2\": 0.6878383009059359, \"Memory in Mb\": 0.0456142425537109, \"Time in s\": 1.807438 }, { \"step\": 385, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ChickWeights\", \"MAE\": 13.830865800865798, \"RMSE\": 21.557316750546796, \"R2\": 0.7058815074667231, \"Memory in Mb\": 0.045694351196289, \"Time in s\": 1.920665 }, { \"step\": 396, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ChickWeights\", \"MAE\": 14.08051346801346, \"RMSE\": 21.615344438143325, \"R2\": 0.7242879119502419, \"Memory in Mb\": 0.0452175140380859, \"Time in s\": 2.037277 }, { \"step\": 407, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ChickWeights\", \"MAE\": 14.665069615069608, \"RMSE\": 22.79756192033108, \"R2\": 0.714918470135155, \"Memory in Mb\": 0.0452442169189453, \"Time in s\": 2.157231 }, { \"step\": 418, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ChickWeights\", \"MAE\": 15.362400318979262, \"RMSE\": 24.076729101709564, \"R2\": 0.7015174886253861, \"Memory in Mb\": 0.0457477569580078, \"Time in s\": 2.280525 }, { \"step\": 429, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ChickWeights\", \"MAE\": 15.914413364413358, \"RMSE\": 24.924104546372128, \"R2\": 0.7157616535295273, \"Memory in Mb\": 0.0458278656005859, \"Time in s\": 2.407192 }, { \"step\": 440, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ChickWeights\", \"MAE\": 16.21655303030303, \"RMSE\": 25.17906749713446, \"R2\": 0.730448642009748, \"Memory in Mb\": 0.0458545684814453, \"Time in s\": 2.537243 }, { \"step\": 451, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ChickWeights\", \"MAE\": 16.571359940872135, \"RMSE\": 25.529131814454708, \"R2\": 0.7346810631079229, \"Memory in Mb\": 0.0458812713623046, \"Time in s\": 2.670662 }, { \"step\": 462, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ChickWeights\", \"MAE\": 17.517063492063492, \"RMSE\": 27.45837911410348, \"R2\": 0.7117049574257082, \"Memory in Mb\": 0.0453777313232421, \"Time in s\": 2.807433 }, { \"step\": 473, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ChickWeights\", \"MAE\": 18.23357998590557, \"RMSE\": 28.586680380220997, \"R2\": 0.7240841374900429, \"Memory in Mb\": 0.0453510284423828, \"Time in s\": 2.947633 }, { \"step\": 484, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ChickWeights\", \"MAE\": 18.61876721763086, \"RMSE\": 29.038858036362505, \"R2\": 0.7308884346272555, \"Memory in Mb\": 0.0458812713623046, \"Time in s\": 3.0911699999999995 }, { \"step\": 495, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ChickWeights\", \"MAE\": 19.16138047138048, \"RMSE\": 29.754410032566323, \"R2\": 0.7342191699916846, \"Memory in Mb\": 0.0458812713623046, \"Time in s\": 3.2380789999999995 }, { \"step\": 506, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ChickWeights\", \"MAE\": 19.69344532279316, \"RMSE\": 30.658970587616192, \"R2\": 0.7260154404176653, \"Memory in Mb\": 0.0458545684814453, \"Time in s\": 3.388348 }, { \"step\": 517, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ChickWeights\", \"MAE\": 20.317762733720187, \"RMSE\": 31.53258587823862, \"R2\": 0.7265009007981393, \"Memory in Mb\": 0.0453777313232421, \"Time in s\": 3.541995 }, { \"step\": 528, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ChickWeights\", \"MAE\": 21.03841540404041, \"RMSE\": 32.63466371480821, \"R2\": 0.7366125677104822, \"Memory in Mb\": 0.0454311370849609, \"Time in s\": 3.699025 }, { \"step\": 539, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ChickWeights\", \"MAE\": 21.282900432900444, \"RMSE\": 32.8391739011002, \"R2\": 0.7443140702924032, \"Memory in Mb\": 0.0454044342041015, \"Time in s\": 3.859333 }, { \"step\": 550, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ChickWeights\", \"MAE\": 21.858333333333334, \"RMSE\": 33.61129662942374, \"R2\": 0.7404041745313898, \"Memory in Mb\": 0.0460147857666015, \"Time in s\": 4.022901 }, { \"step\": 561, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ChickWeights\", \"MAE\": 22.36307189542484, \"RMSE\": 34.18934989679706, \"R2\": 0.7431446126310046, \"Memory in Mb\": 0.0460681915283203, \"Time in s\": 4.189756 }, { \"step\": 572, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"ChickWeights\", \"MAE\": 22.904341491841496, \"RMSE\": 34.79445522931405, \"R2\": 0.7539238777546076, \"Memory in Mb\": 0.046121597290039, \"Time in s\": 4.359905 }, { \"step\": 20, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.5579731333333355, \"RMSE\": 9.79490509533104, \"R2\": -224.0374880099697, \"Memory in Mb\": 0.0161190032958984, \"Time in s\": 0.00492 }, { \"step\": 40, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.814472516666667, \"RMSE\": 6.975921914401759, \"R2\": -18.564491994995524, \"Memory in Mb\": 0.0286769866943359, \"Time in s\": 0.014556 }, { \"step\": 60, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.386012811111112, \"RMSE\": 5.70893041508138, \"R2\": -16.665248891500116, \"Memory in Mb\": 0.0407314300537109, \"Time in s\": 0.029158 }, { \"step\": 80, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.1425351583333334, \"RMSE\": 4.950945892995169, \"R2\": -12.326934431680348, \"Memory in Mb\": 0.0527858734130859, \"Time in s\": 0.04994 }, { \"step\": 100, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.0609460066666667, \"RMSE\": 4.443635225860514, \"R2\": -5.735976224554387, \"Memory in Mb\": 0.065317153930664, \"Time in s\": 0.078368 }, { \"step\": 120, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.0173975888888886, \"RMSE\": 4.080152828774464, \"R2\": -3.8177766328983793, \"Memory in Mb\": 0.0658702850341796, \"Time in s\": 0.115087 }, { \"step\": 140, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.9499007047619044, \"RMSE\": 3.785073461941064, \"R2\": -3.284514427728187, \"Memory in Mb\": 0.0653667449951171, \"Time in s\": 0.160142 }, { \"step\": 160, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.8997031916666666, \"RMSE\": 3.548063392436267, \"R2\": -2.320037309218333, \"Memory in Mb\": 0.0653667449951171, \"Time in s\": 0.213621 }, { \"step\": 180, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.8894699925925924, \"RMSE\": 3.3651237174821174, \"R2\": -1.6229174672077478, \"Memory in Mb\": 0.0658702850341796, \"Time in s\": 0.275231 }, { \"step\": 200, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.8368248133333331, \"RMSE\": 3.1964619940401167, \"R2\": -1.3736195901717156, \"Memory in Mb\": 0.0653667449951171, \"Time in s\": 0.3451079999999999 }, { \"step\": 220, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.7973693939393937, \"RMSE\": 3.051737973437425, \"R2\": -1.333837761501293, \"Memory in Mb\": 0.0653667449951171, \"Time in s\": 0.4231859999999999 }, { \"step\": 240, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.7882918027777774, \"RMSE\": 2.9302063484469683, \"R2\": -1.1678441811700535, \"Memory in Mb\": 0.0658702850341796, \"Time in s\": 0.5093219999999999 }, { \"step\": 260, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.756480287179487, \"RMSE\": 2.818118800540444, \"R2\": -1.032185390259123, \"Memory in Mb\": 0.065500259399414, \"Time in s\": 0.603278 }, { \"step\": 280, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.7152656523809521, \"RMSE\": 2.716213897230066, \"R2\": -0.9765794643185606, \"Memory in Mb\": 0.065500259399414, \"Time in s\": 0.705058 }, { \"step\": 300, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.6924420288888888, \"RMSE\": 2.626670145740793, \"R2\": -0.8178051192110904, \"Memory in Mb\": 0.0660037994384765, \"Time in s\": 0.8148869999999999 }, { \"step\": 320, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.6685162833333335, \"RMSE\": 2.544831183351663, \"R2\": -0.7907817018280727, \"Memory in Mb\": 0.0660037994384765, \"Time in s\": 0.932496 }, { \"step\": 340, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.6729953196078432, \"RMSE\": 2.478015515638401, \"R2\": -0.7943500832815562, \"Memory in Mb\": 0.065500259399414, \"Time in s\": 1.057867 }, { \"step\": 360, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.6574798574074076, \"RMSE\": 2.410620514027796, \"R2\": -0.7120191674014065, \"Memory in Mb\": 0.0660037994384765, \"Time in s\": 1.1910109999999998 }, { \"step\": 380, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.6458375333333335, \"RMSE\": 2.3511270035956984, \"R2\": -0.6848943678054311, \"Memory in Mb\": 0.0660037994384765, \"Time in s\": 1.3319119999999998 }, { \"step\": 400, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.6474776666666668, \"RMSE\": 2.299895164719867, \"R2\": -0.6578320154352482, \"Memory in Mb\": 0.065500259399414, \"Time in s\": 1.4805999999999997 }, { \"step\": 420, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.6263061492063494, \"RMSE\": 2.245732257498697, \"R2\": -0.6329783328857519, \"Memory in Mb\": 0.0660037994384765, \"Time in s\": 1.6370479999999996 }, { \"step\": 440, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.6161101106060607, \"RMSE\": 2.196509834675512, \"R2\": -0.5339119919932027, \"Memory in Mb\": 0.0660037994384765, \"Time in s\": 1.8010919999999997 }, { \"step\": 460, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.6114796710144929, \"RMSE\": 2.151899880839346, \"R2\": -0.3961217660875815, \"Memory in Mb\": 0.065500259399414, \"Time in s\": 1.972734 }, { \"step\": 480, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.595679659722222, \"RMSE\": 2.1075134371992843, \"R2\": -0.3083133320099125, \"Memory in Mb\": 0.065500259399414, \"Time in s\": 2.151984 }, { \"step\": 500, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.5871453373333329, \"RMSE\": 2.067421382434369, \"R2\": -0.2325961934637197, \"Memory in Mb\": 0.0660037994384765, \"Time in s\": 2.338834 }, { \"step\": 520, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.5787837589743586, \"RMSE\": 2.029002666882104, \"R2\": -0.1892840306210153, \"Memory in Mb\": 0.065500259399414, \"Time in s\": 2.533354 }, { \"step\": 540, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.5648130308641971, \"RMSE\": 1.991660903442925, \"R2\": -0.1417123847500625, \"Memory in Mb\": 0.065500259399414, \"Time in s\": 2.735472 }, { \"step\": 560, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.5728799083333329, \"RMSE\": 1.964965242398572, \"R2\": -0.1377909554693113, \"Memory in Mb\": 0.0660037994384765, \"Time in s\": 2.945174 }, { \"step\": 580, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.5729694735632179, \"RMSE\": 1.9355227317321977, \"R2\": -0.1312531571152491, \"Memory in Mb\": 0.065500259399414, \"Time in s\": 3.162675 }, { \"step\": 600, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.5633090777777773, \"RMSE\": 1.904172510525728, \"R2\": -0.0827915396879483, \"Memory in Mb\": 0.065500259399414, \"Time in s\": 3.387869 }, { \"step\": 620, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.5627685010752685, \"RMSE\": 1.877938378401011, \"R2\": -0.0371083516253996, \"Memory in Mb\": 0.0660037994384765, \"Time in s\": 3.620659 }, { \"step\": 640, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.5527265229166664, \"RMSE\": 1.8490211996046173, \"R2\": 0.0133784354988776, \"Memory in Mb\": 0.0660037994384765, \"Time in s\": 3.861011 }, { \"step\": 660, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.5494711494949491, \"RMSE\": 1.8235613734746328, \"R2\": 0.0472618750543785, \"Memory in Mb\": 0.065500259399414, \"Time in s\": 4.108899 }, { \"step\": 680, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.5450294392156858, \"RMSE\": 1.7983422134533726, \"R2\": 0.05878940572589, \"Memory in Mb\": 0.0660037994384765, \"Time in s\": 4.364357 }, { \"step\": 700, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.5469558666666661, \"RMSE\": 1.7765896178823126, \"R2\": 0.0572950833274868, \"Memory in Mb\": 0.0660037994384765, \"Time in s\": 4.627459 }, { \"step\": 720, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.543456514814814, \"RMSE\": 1.7539567687409483, \"R2\": 0.0609744119033208, \"Memory in Mb\": 0.065500259399414, \"Time in s\": 4.898124 }, { \"step\": 740, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.5378193549549544, \"RMSE\": 1.7312282531924004, \"R2\": 0.085703628915721, \"Memory in Mb\": 0.0660037994384765, \"Time in s\": 5.1763650000000005 }, { \"step\": 760, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.5339037666666662, \"RMSE\": 1.709797998783685, \"R2\": 0.0985374850173486, \"Memory in Mb\": 0.0660037994384765, \"Time in s\": 5.462206 }, { \"step\": 780, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.5278528008547004, \"RMSE\": 1.688590710651063, \"R2\": 0.116822375782677, \"Memory in Mb\": 0.065500259399414, \"Time in s\": 5.755616 }, { \"step\": 800, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.5283612208333329, \"RMSE\": 1.6702821330140922, \"R2\": 0.1280551636806639, \"Memory in Mb\": 0.065500259399414, \"Time in s\": 6.056664 }, { \"step\": 820, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.5218793715447149, \"RMSE\": 1.650734458460968, \"R2\": 0.1389919937119252, \"Memory in Mb\": 0.0660037994384765, \"Time in s\": 6.365303 }, { \"step\": 840, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.5164909984126979, \"RMSE\": 1.6320321449108954, \"R2\": 0.1505777025063954, \"Memory in Mb\": 0.065500259399414, \"Time in s\": 6.681529 }, { \"step\": 860, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.5203083496124026, \"RMSE\": 1.617111920842916, \"R2\": 0.167474405637749, \"Memory in Mb\": 0.065500259399414, \"Time in s\": 7.005502 }, { \"step\": 880, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.5169651212121207, \"RMSE\": 1.6004898524254525, \"R2\": 0.188553388101048, \"Memory in Mb\": 0.0660037994384765, \"Time in s\": 7.337010999999999 }, { \"step\": 900, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.5126542296296291, \"RMSE\": 1.583708561642625, \"R2\": 0.2021320849395992, \"Memory in Mb\": 0.065500259399414, \"Time in s\": 7.676102999999999 }, { \"step\": 920, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.5104052702898545, \"RMSE\": 1.5679886127118026, \"R2\": 0.2050422348676139, \"Memory in Mb\": 0.065500259399414, \"Time in s\": 8.022855999999999 }, { \"step\": 940, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.5051605113475173, \"RMSE\": 1.551953998854848, \"R2\": 0.2146112384044058, \"Memory in Mb\": 0.0660037994384765, \"Time in s\": 8.377156 }, { \"step\": 960, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.5025567965277773, \"RMSE\": 1.5374441378201589, \"R2\": 0.2211695274632112, \"Memory in Mb\": 0.0660037994384765, \"Time in s\": 8.739002999999999 }, { \"step\": 980, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.4982526563945573, \"RMSE\": 1.522650494486571, \"R2\": 0.2212491734260424, \"Memory in Mb\": 0.065500259399414, \"Time in s\": 9.108430999999998 }, { \"step\": 1000, \"track\": \"Regression\", \"model\": \"k-Nearest Neighbors\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.4939745494666663, \"RMSE\": 1.5080707141004983, \"R2\": 0.2232321406133778, \"Memory in Mb\": 0.0660037994384765, \"Time in s\": 9.485458999999995 }, { \"step\": 11, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 8.042756132756132, \"RMSE\": 17.336048579080593, \"R2\": -385.8634917094176, \"Memory in Mb\": 0.0165748596191406, \"Time in s\": 0.003293 }, { \"step\": 22, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 4.456785613727984, \"RMSE\": 12.282422261556867, \"R2\": -158.770726389092, \"Memory in Mb\": 0.0181541442871093, \"Time in s\": 0.009578 }, { \"step\": 33, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 3.4353973358733074, \"RMSE\": 10.07037651743448, \"R2\": -69.4325218162971, \"Memory in Mb\": 0.0234184265136718, \"Time in s\": 0.01815 }, { \"step\": 44, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 2.736909422894262, \"RMSE\": 8.732393473100391, \"R2\": -59.03623058514604, \"Memory in Mb\": 0.0244712829589843, \"Time in s\": 0.029162 }, { \"step\": 55, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 2.788577579622257, \"RMSE\": 8.074088551816661, \"R2\": -11.726025456653014, \"Memory in Mb\": 0.0313148498535156, \"Time in s\": 0.042723 }, { \"step\": 66, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 3.395880085598137, \"RMSE\": 7.878422021930021, \"R2\": -4.223121571879303, \"Memory in Mb\": 0.0407905578613281, \"Time in s\": 0.0593589999999999 }, { \"step\": 77, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 3.889526501621088, \"RMSE\": 7.800910386370324, \"R2\": -2.432180745921895, \"Memory in Mb\": 0.0471076965332031, \"Time in s\": 0.0796159999999999 }, { \"step\": 88, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 4.072650698433535, \"RMSE\": 7.572197783925699, \"R2\": -1.9320509270116557, \"Memory in Mb\": 0.0528984069824218, \"Time in s\": 0.103875 }, { \"step\": 99, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 4.410984939713907, \"RMSE\": 7.55185413515251, \"R2\": -1.439151418709002, \"Memory in Mb\": 0.0539512634277343, \"Time in s\": 0.132276 }, { \"step\": 110, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 4.441558524813062, \"RMSE\": 7.364764038532391, \"R2\": -0.6199522309877294, \"Memory in Mb\": 0.0555305480957031, \"Time in s\": 0.164898 }, { \"step\": 121, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 4.487290951327676, \"RMSE\": 7.260940155844585, \"R2\": -0.2126939871368238, \"Memory in Mb\": 0.0555305480957031, \"Time in s\": 0.201538 }, { \"step\": 132, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 4.401729970486312, \"RMSE\": 7.0591187066650845, \"R2\": 0.0632010249038049, \"Memory in Mb\": 0.0555305480957031, \"Time in s\": 0.242248 }, { \"step\": 143, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 4.303599977233167, \"RMSE\": 6.863829202938119, \"R2\": 0.2826256992169514, \"Memory in Mb\": 0.0560569763183593, \"Time in s\": 0.2871789999999999 }, { \"step\": 154, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 4.247967976141752, \"RMSE\": 6.717580819449276, \"R2\": 0.4160344373982124, \"Memory in Mb\": 0.0560569763183593, \"Time in s\": 0.3361619999999999 }, { \"step\": 165, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 4.525268599337025, \"RMSE\": 6.978492074792776, \"R2\": 0.493394283015475, \"Memory in Mb\": 0.0560569763183593, \"Time in s\": 0.3890969999999999 }, { \"step\": 176, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 4.7434869323510185, \"RMSE\": 7.161143757518859, \"R2\": 0.5698598432460567, \"Memory in Mb\": 0.0565834045410156, \"Time in s\": 0.4460369999999999 }, { \"step\": 187, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 4.817684977356876, \"RMSE\": 7.1877471099050325, \"R2\": 0.6451941261958376, \"Memory in Mb\": 0.0565834045410156, \"Time in s\": 0.5072089999999999 }, { \"step\": 198, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 4.83667165494537, \"RMSE\": 7.176577259975889, \"R2\": 0.7186458480933114, \"Memory in Mb\": 0.0565834045410156, \"Time in s\": 0.5725709999999999 }, { \"step\": 209, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 5.073405719834179, \"RMSE\": 7.569308518085582, \"R2\": 0.7419802486075263, \"Memory in Mb\": 0.0217466354370117, \"Time in s\": 0.645323 }, { \"step\": 220, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 5.671396913729996, \"RMSE\": 8.67042326781336, \"R2\": 0.7036008152378226, \"Memory in Mb\": 0.0280637741088867, \"Time in s\": 0.719465 }, { \"step\": 231, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 5.870013976865108, \"RMSE\": 8.892004937785565, \"R2\": 0.7333469233470653, \"Memory in Mb\": 0.0333280563354492, \"Time in s\": 0.7950900000000001 }, { \"step\": 242, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 6.098410134572789, \"RMSE\": 9.27860472778795, \"R2\": 0.7663748869623117, \"Memory in Mb\": 0.0380659103393554, \"Time in s\": 0.8723510000000001 }, { \"step\": 253, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 6.1962339865774, \"RMSE\": 9.406595007903094, \"R2\": 0.7914570321252903, \"Memory in Mb\": 0.0417509078979492, \"Time in s\": 0.951359 }, { \"step\": 264, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 6.851942913488504, \"RMSE\": 10.678395276366356, \"R2\": 0.7544562538164442, \"Memory in Mb\": 0.0418310165405273, \"Time in s\": 1.031981 }, { \"step\": 275, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 7.351838545672251, \"RMSE\": 11.801369148896674, \"R2\": 0.7361015298851068, \"Memory in Mb\": 0.0418310165405273, \"Time in s\": 1.114342 }, { \"step\": 286, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 7.621792166351879, \"RMSE\": 12.282711040561283, \"R2\": 0.7524071035845484, \"Memory in Mb\": 0.0423574447631835, \"Time in s\": 1.198422 }, { \"step\": 297, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 7.637256630205925, \"RMSE\": 12.295347873811286, \"R2\": 0.7848229800793282, \"Memory in Mb\": 0.0423574447631835, \"Time in s\": 1.284253 }, { \"step\": 308, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 8.1943326666584, \"RMSE\": 13.18128308543095, \"R2\": 0.7797471460356308, \"Memory in Mb\": 0.0423574447631835, \"Time in s\": 1.3717719999999998 }, { \"step\": 319, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 9.301321372784988, \"RMSE\": 15.883856969554804, \"R2\": 0.7097662534619076, \"Memory in Mb\": 0.0423574447631835, \"Time in s\": 1.4609289999999997 }, { \"step\": 330, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 9.759535973032726, \"RMSE\": 16.540475274696632, \"R2\": 0.7306639846914664, \"Memory in Mb\": 0.0423574447631835, \"Time in s\": 1.5518979999999998 }, { \"step\": 341, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 9.98108531256273, \"RMSE\": 16.6656027575944, \"R2\": 0.7551596457293974, \"Memory in Mb\": 0.0423574447631835, \"Time in s\": 1.6445489999999998 }, { \"step\": 352, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 10.172493780682656, \"RMSE\": 16.824682995393008, \"R2\": 0.773160093080841, \"Memory in Mb\": 0.0423574447631835, \"Time in s\": 1.7389499999999998 }, { \"step\": 363, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 11.068151856114426, \"RMSE\": 18.263714825485387, \"R2\": 0.7404354867888504, \"Memory in Mb\": 0.0423574447631835, \"Time in s\": 1.835003 }, { \"step\": 374, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 11.603116520774073, \"RMSE\": 19.443156920913136, \"R2\": 0.7295745843003554, \"Memory in Mb\": 0.0423574447631835, \"Time in s\": 1.932986 }, { \"step\": 385, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 12.00507936887919, \"RMSE\": 20.0961554988217, \"R2\": 0.744401153020958, \"Memory in Mb\": 0.0423574447631835, \"Time in s\": 2.032719 }, { \"step\": 396, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 12.159003512064782, \"RMSE\": 20.104597547074984, \"R2\": 0.7614813985176707, \"Memory in Mb\": 0.0423574447631835, \"Time in s\": 2.134109 }, { \"step\": 407, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 13.058561054914293, \"RMSE\": 21.64678300128301, \"R2\": 0.7429728504217219, \"Memory in Mb\": 0.0393133163452148, \"Time in s\": 2.2395339999999995 }, { \"step\": 418, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 13.849374886718222, \"RMSE\": 23.13707582414104, \"R2\": 0.7243608784812086, \"Memory in Mb\": 0.039839744567871, \"Time in s\": 2.346639 }, { \"step\": 429, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 14.418764158229274, \"RMSE\": 24.09396728520024, \"R2\": 0.7343803671174814, \"Memory in Mb\": 0.0408926010131835, \"Time in s\": 2.455353 }, { \"step\": 440, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 14.611969435637262, \"RMSE\": 24.1872515908579, \"R2\": 0.7512659254336986, \"Memory in Mb\": 0.0414190292358398, \"Time in s\": 2.5657939999999995 }, { \"step\": 451, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 15.14954210400284, \"RMSE\": 24.823452151261105, \"R2\": 0.7491462990276416, \"Memory in Mb\": 0.0429983139038085, \"Time in s\": 2.6778409999999995 }, { \"step\": 462, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 16.266748325298664, \"RMSE\": 26.99226997645693, \"R2\": 0.7214095667925529, \"Memory in Mb\": 0.0435247421264648, \"Time in s\": 2.791571 }, { \"step\": 473, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 17.063164501315207, \"RMSE\": 28.33702908143248, \"R2\": 0.7288823142196059, \"Memory in Mb\": 0.0435247421264648, \"Time in s\": 2.9070249999999995 }, { \"step\": 484, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 17.41028324926407, \"RMSE\": 28.63458736095403, \"R2\": 0.7383292654808864, \"Memory in Mb\": 0.0435247421264648, \"Time in s\": 3.0241479999999994 }, { \"step\": 495, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 17.881702409973563, \"RMSE\": 29.18189849457619, \"R2\": 0.7443486730566713, \"Memory in Mb\": 0.0435247421264648, \"Time in s\": 3.1430089999999997 }, { \"step\": 506, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 18.783373559654446, \"RMSE\": 30.65392766804094, \"R2\": 0.7261055653266129, \"Memory in Mb\": 0.0435247421264648, \"Time in s\": 3.263499999999999 }, { \"step\": 517, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 19.52135237811833, \"RMSE\": 31.66784012367412, \"R2\": 0.7241496029986892, \"Memory in Mb\": 0.0435247421264648, \"Time in s\": 3.3857279999999994 }, { \"step\": 528, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 20.32410387080197, \"RMSE\": 32.88418602247989, \"R2\": 0.7325694870351915, \"Memory in Mb\": 0.0435247421264648, \"Time in s\": 3.509655999999999 }, { \"step\": 539, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 20.50845806362208, \"RMSE\": 32.89095814819447, \"R2\": 0.7435070498169007, \"Memory in Mb\": 0.0435247421264648, \"Time in s\": 3.635277999999999 }, { \"step\": 550, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 21.507291986413147, \"RMSE\": 34.52927015042095, \"R2\": 0.7260306568753008, \"Memory in Mb\": 0.0435247421264648, \"Time in s\": 3.762600999999999 }, { \"step\": 561, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 22.222091157093345, \"RMSE\": 35.46412346985515, \"R2\": 0.7236334646353288, \"Memory in Mb\": 0.0435247421264648, \"Time in s\": 3.891587999999999 }, { \"step\": 572, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 23.084218600655667, \"RMSE\": 36.66377836765904, \"R2\": 0.7267728639741885, \"Memory in Mb\": 0.044051170349121, \"Time in s\": 4.022359999999999 }, { \"step\": 20, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 4.834704431652337, \"RMSE\": 13.708514217962266, \"R2\": -439.7934984576362, \"Memory in Mb\": 0.0508193969726562, \"Time in s\": 0.006949 }, { \"step\": 40, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 3.4692310697037447, \"RMSE\": 9.813795721313518, \"R2\": -37.72035957928713, \"Memory in Mb\": 0.0739822387695312, \"Time in s\": 0.020525 }, { \"step\": 60, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.530247618203559, \"RMSE\": 8.024836796214231, \"R2\": -33.90460110966681, \"Memory in Mb\": 0.0866165161132812, \"Time in s\": 0.041034 }, { \"step\": 80, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.1398752670733447, \"RMSE\": 6.982837000856316, \"R2\": -25.510487239912003, \"Memory in Mb\": 0.09661865234375, \"Time in s\": 0.06908 }, { \"step\": 100, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.2521629689485394, \"RMSE\": 6.362737158647257, \"R2\": -12.810573390910957, \"Memory in Mb\": 0.1060943603515625, \"Time in s\": 0.105159 }, { \"step\": 120, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.275331183116589, \"RMSE\": 5.895687482983747, \"R2\": -9.059182991303912, \"Memory in Mb\": 0.1103057861328125, \"Time in s\": 0.149228 }, { \"step\": 140, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.181766409647037, \"RMSE\": 5.493495699082884, \"R2\": -8.025069637302263, \"Memory in Mb\": 0.1124114990234375, \"Time in s\": 0.201546 }, { \"step\": 160, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.0641207293789914, \"RMSE\": 5.165105496730293, \"R2\": -6.03588310696345, \"Memory in Mb\": 0.1166229248046875, \"Time in s\": 0.262282 }, { \"step\": 180, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.9901037542149176, \"RMSE\": 4.906162642056599, \"R2\": -4.575276834209563, \"Memory in Mb\": 0.1187286376953125, \"Time in s\": 0.331582 }, { \"step\": 200, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.854788525917255, \"RMSE\": 4.661016718308231, \"R2\": -4.0470005140641305, \"Memory in Mb\": 0.015085220336914, \"Time in s\": 0.422383 }, { \"step\": 220, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.777996659033335, \"RMSE\": 4.4592908674997, \"R2\": -3.98319384245183, \"Memory in Mb\": 0.0312490463256835, \"Time in s\": 0.517121 }, { \"step\": 240, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.696058551537428, \"RMSE\": 4.277699003809556, \"R2\": -3.620107687238352, \"Memory in Mb\": 0.0370397567749023, \"Time in s\": 0.616341 }, { \"step\": 260, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.6130985138696277, \"RMSE\": 4.114896288193841, \"R2\": -3.332738894340625, \"Memory in Mb\": 0.044569969177246, \"Time in s\": 0.720441 }, { \"step\": 280, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.5797289220452373, \"RMSE\": 3.98595914915012, \"R2\": -3.256494063385272, \"Memory in Mb\": 0.0575590133666992, \"Time in s\": 0.829564 }, { \"step\": 300, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.5530598651848762, \"RMSE\": 3.87273637004692, \"R2\": -2.951592153393709, \"Memory in Mb\": 0.0675611495971679, \"Time in s\": 0.944355 }, { \"step\": 320, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.487708679701559, \"RMSE\": 3.753298690745279, \"R2\": -2.895390019001044, \"Memory in Mb\": 0.0754575729370117, \"Time in s\": 1.064913 }, { \"step\": 340, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.447577018485449, \"RMSE\": 3.651821310152781, \"R2\": -2.8968902417113367, \"Memory in Mb\": 0.0807218551635742, \"Time in s\": 1.191641 }, { \"step\": 360, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.4679964351503354, \"RMSE\": 3.577735862687295, \"R2\": -2.771095017816483, \"Memory in Mb\": 0.0886182785034179, \"Time in s\": 1.324785 }, { \"step\": 380, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.433746711521204, \"RMSE\": 3.490600620499155, \"R2\": -2.7138197592485107, \"Memory in Mb\": 0.0938825607299804, \"Time in s\": 1.4646529999999998 }, { \"step\": 400, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.394545215153479, \"RMSE\": 3.4083536810761967, \"R2\": -2.640941921411436, \"Memory in Mb\": 0.1012525558471679, \"Time in s\": 1.611633 }, { \"step\": 420, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.3582303513295786, \"RMSE\": 3.3304244373469776, \"R2\": -2.5913988321456767, \"Memory in Mb\": 0.1054639816284179, \"Time in s\": 1.7657539999999998 }, { \"step\": 440, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.3237600646089562, \"RMSE\": 3.2576165394889824, \"R2\": -2.3739144098898306, \"Memory in Mb\": 0.1112546920776367, \"Time in s\": 1.927203 }, { \"step\": 460, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.313374021763953, \"RMSE\": 3.19664903268704, \"R2\": -2.080839603370824, \"Memory in Mb\": 0.1196775436401367, \"Time in s\": 2.096272 }, { \"step\": 480, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.2871329178228548, \"RMSE\": 3.1331240379472574, \"R2\": -1.891520259695389, \"Memory in Mb\": 0.1275739669799804, \"Time in s\": 2.277254 }, { \"step\": 500, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.257405891185914, \"RMSE\": 3.073015209583509, \"R2\": -1.7232796098685204, \"Memory in Mb\": 0.1323118209838867, \"Time in s\": 2.471873 }, { \"step\": 520, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.236691492049667, \"RMSE\": 3.017934049641527, \"R2\": -1.6311150670478525, \"Memory in Mb\": 0.1375761032104492, \"Time in s\": 2.675336 }, { \"step\": 540, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.2120036461347103, \"RMSE\": 2.9640451937301795, \"R2\": -1.528689807762199, \"Memory in Mb\": 0.1396818161010742, \"Time in s\": 2.8858550000000003 }, { \"step\": 560, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.1973910792850353, \"RMSE\": 2.9159627023633448, \"R2\": -1.5056283897938432, \"Memory in Mb\": 0.1444196701049804, \"Time in s\": 3.1010690000000003 }, { \"step\": 580, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.1729976236433703, \"RMSE\": 2.867868059269123, \"R2\": -1.4835996265310456, \"Memory in Mb\": 0.1470518112182617, \"Time in s\": 3.321073 }, { \"step\": 600, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.164383909889498, \"RMSE\": 2.8255860627341494, \"R2\": -1.384236602114556, \"Memory in Mb\": 0.1414899826049804, \"Time in s\": 3.5505340000000003 }, { \"step\": 620, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.1607847453289375, \"RMSE\": 2.7880287925830083, \"R2\": -1.2858933574415188, \"Memory in Mb\": 0.1441221237182617, \"Time in s\": 3.784549 }, { \"step\": 640, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.1488689392891114, \"RMSE\": 2.748475985188827, \"R2\": -1.179970998158682, \"Memory in Mb\": 0.1462278366088867, \"Time in s\": 4.023051000000001 }, { \"step\": 660, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.1353328006378431, \"RMSE\": 2.711500109160092, \"R2\": -1.1064542348150137, \"Memory in Mb\": 0.0933332443237304, \"Time in s\": 4.270219000000001 }, { \"step\": 680, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.1237673891934872, \"RMSE\": 2.67586665558629, \"R2\": -1.083872122585975, \"Memory in Mb\": 0.1028089523315429, \"Time in s\": 4.520950000000001 }, { \"step\": 700, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.1197870211993892, \"RMSE\": 2.6450513004918434, \"R2\": -1.0896316212520398, \"Memory in Mb\": 0.1107053756713867, \"Time in s\": 4.77542 }, { \"step\": 720, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.0988128320741315, \"RMSE\": 2.6090976903580367, \"R2\": -1.077879402293885, \"Memory in Mb\": 0.1170225143432617, \"Time in s\": 5.033773 }, { \"step\": 740, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.0821696401958585, \"RMSE\": 2.5758102479785587, \"R2\": -1.0239793040320149, \"Memory in Mb\": 0.1207075119018554, \"Time in s\": 5.296116 }, { \"step\": 760, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.0646778366488154, \"RMSE\": 2.542912274197232, \"R2\": -0.9939800998628658, \"Memory in Mb\": 0.1238660812377929, \"Time in s\": 5.562507999999999 }, { \"step\": 780, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.0458773095514022, \"RMSE\": 2.5110610332394967, \"R2\": -0.953051985319172, \"Memory in Mb\": 0.1301832199096679, \"Time in s\": 5.833025999999999 }, { \"step\": 800, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.037522387437363, \"RMSE\": 2.4829468635274075, \"R2\": -0.9268335078372076, \"Memory in Mb\": 0.1405134201049804, \"Time in s\": 6.107975999999999 }, { \"step\": 820, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.0344825476074344, \"RMSE\": 2.459713101244265, \"R2\": -0.9117084698305152, \"Memory in Mb\": 0.1468305587768554, \"Time in s\": 6.387556999999999 }, { \"step\": 840, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.0265033672562451, \"RMSE\": 2.433853565150633, \"R2\": -0.8891007921837151, \"Memory in Mb\": 0.1505155563354492, \"Time in s\": 6.671677999999999 }, { \"step\": 860, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.0098876748057108, \"RMSE\": 2.406249644034785, \"R2\": -0.8433117943327111, \"Memory in Mb\": 0.1520948410034179, \"Time in s\": 6.960477999999999 }, { \"step\": 880, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.9987418826957182, \"RMSE\": 2.3807044173500915, \"R2\": -0.795415858642577, \"Memory in Mb\": 0.1552534103393554, \"Time in s\": 7.253952999999999 }, { \"step\": 900, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.9899512872354768, \"RMSE\": 2.356297962435036, \"R2\": -0.7662040947107138, \"Memory in Mb\": 0.1573591232299804, \"Time in s\": 7.552186999999999 }, { \"step\": 920, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.9763510674237786, \"RMSE\": 2.3315103197053237, \"R2\": -0.7576521562914318, \"Memory in Mb\": 0.1254529953002929, \"Time in s\": 7.860606999999999 }, { \"step\": 940, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.9721072130212216, \"RMSE\": 2.310027291919755, \"R2\": -0.7400479483856388, \"Memory in Mb\": 0.1344251632690429, \"Time in s\": 8.173459999999999 }, { \"step\": 960, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.9655343485152448, \"RMSE\": 2.289186508543074, \"R2\": -0.7266590967915565, \"Memory in Mb\": 0.1402158737182617, \"Time in s\": 8.490537999999999 }, { \"step\": 980, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.9583856890611192, \"RMSE\": 2.2686834337047155, \"R2\": -0.7288044208040367, \"Memory in Mb\": 0.1444272994995117, \"Time in s\": 8.812014999999999 }, { \"step\": 1000, \"track\": \"Regression\", \"model\": \"Hoeffding Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.9497447679766952, \"RMSE\": 2.248146643879841, \"R2\": -0.7262238291744263, \"Memory in Mb\": 0.1486387252807617, \"Time in s\": 9.137959 }, { \"step\": 11, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 8.051220648038832, \"RMSE\": 17.336198122120386, \"R2\": -385.8701660091343, \"Memory in Mb\": 0.0232887268066406, \"Time in s\": 0.00403 }, { \"step\": 22, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 4.498502947359929, \"RMSE\": 12.28528637536428, \"R2\": -158.84524831763767, \"Memory in Mb\": 0.0249290466308593, \"Time in s\": 0.011609 }, { \"step\": 33, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 3.4668695042339137, \"RMSE\": 10.074636808082968, \"R2\": -69.49212762837747, \"Memory in Mb\": 0.0301933288574218, \"Time in s\": 0.022078 }, { \"step\": 44, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 2.7637805804889557, \"RMSE\": 8.735764655686483, \"R2\": -59.08259408516962, \"Memory in Mb\": 0.0313072204589843, \"Time in s\": 0.035307 }, { \"step\": 55, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 2.814517498310432, \"RMSE\": 8.074396776941786, \"R2\": -11.726997097138026, \"Memory in Mb\": 0.0381507873535156, \"Time in s\": 0.051286 }, { \"step\": 66, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 3.396900059747575, \"RMSE\": 7.862006773633152, \"R2\": -4.201378762014764, \"Memory in Mb\": 0.0476264953613281, \"Time in s\": 0.070551 }, { \"step\": 77, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 3.8844336568547537, \"RMSE\": 7.782255505653143, \"R2\": -2.415785129732385, \"Memory in Mb\": 0.0540046691894531, \"Time in s\": 0.093692 }, { \"step\": 88, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 4.068768385552718, \"RMSE\": 7.555909217267645, \"R2\": -1.9194502155140076, \"Memory in Mb\": 0.0597953796386718, \"Time in s\": 0.121065 }, { \"step\": 99, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 4.311602452908636, \"RMSE\": 7.487314706483316, \"R2\": -1.3976387620786477, \"Memory in Mb\": 0.0608482360839843, \"Time in s\": 0.152902 }, { \"step\": 110, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 4.261918758035323, \"RMSE\": 7.240982145259267, \"R2\": -0.5659557565320237, \"Memory in Mb\": 0.0624275207519531, \"Time in s\": 0.189312 }, { \"step\": 121, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 4.32509570871032, \"RMSE\": 7.149348278127394, \"R2\": -0.1757051422939808, \"Memory in Mb\": 0.0624275207519531, \"Time in s\": 0.229979 }, { \"step\": 132, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 4.243770455182887, \"RMSE\": 6.949556168474376, \"R2\": 0.0920549285716371, \"Memory in Mb\": 0.0624275207519531, \"Time in s\": 0.275055 }, { \"step\": 143, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 4.119311205048765, \"RMSE\": 6.740083059431663, \"R2\": 0.3082592266545521, \"Memory in Mb\": 0.0241641998291015, \"Time in s\": 0.330922 }, { \"step\": 154, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 4.094718549433554, \"RMSE\": 6.618738421062464, \"R2\": 0.4330929316851147, \"Memory in Mb\": 0.034926414489746, \"Time in s\": 0.389383 }, { \"step\": 165, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 4.353591485820727, \"RMSE\": 6.858418841195889, \"R2\": 0.5106778054828556, \"Memory in Mb\": 0.041365623474121, \"Time in s\": 0.450937 }, { \"step\": 176, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 4.494676115333661, \"RMSE\": 6.99651956882687, \"R2\": 0.5894091082089881, \"Memory in Mb\": 0.0471563339233398, \"Time in s\": 0.515937 }, { \"step\": 187, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 4.531460188122701, \"RMSE\": 6.982633238942946, \"R2\": 0.6651551017011323, \"Memory in Mb\": 0.052016258239746, \"Time in s\": 0.584675 }, { \"step\": 198, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 4.550856564096301, \"RMSE\": 6.948954565412159, \"R2\": 0.736210476532317, \"Memory in Mb\": 0.0546483993530273, \"Time in s\": 0.6572239999999999 }, { \"step\": 209, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 4.745146525796211, \"RMSE\": 7.286245359964537, \"R2\": 0.7609173164436883, \"Memory in Mb\": 0.0546483993530273, \"Time in s\": 0.733675 }, { \"step\": 220, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 5.330595718754616, \"RMSE\": 8.515887777891804, \"R2\": 0.7140722769094752, \"Memory in Mb\": 0.0552968978881835, \"Time in s\": 0.814194 }, { \"step\": 231, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 5.50563091305374, \"RMSE\": 8.701126762111178, \"R2\": 0.7446721431039482, \"Memory in Mb\": 0.0552968978881835, \"Time in s\": 0.898563 }, { \"step\": 242, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 5.723610994733609, \"RMSE\": 9.068119167211083, \"R2\": 0.7768542529953268, \"Memory in Mb\": 0.0552968978881835, \"Time in s\": 0.986859 }, { \"step\": 253, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 5.834317193911452, \"RMSE\": 9.203767847944404, \"R2\": 0.8003533769470821, \"Memory in Mb\": 0.0552968978881835, \"Time in s\": 1.07894 }, { \"step\": 264, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 6.561326226922799, \"RMSE\": 10.595386608942691, \"R2\": 0.7582588922727441, \"Memory in Mb\": 0.0553770065307617, \"Time in s\": 1.17475 }, { \"step\": 275, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 7.055343281410319, \"RMSE\": 11.793355798881397, \"R2\": 0.7364597921881992, \"Memory in Mb\": 0.0553770065307617, \"Time in s\": 1.274735 }, { \"step\": 286, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 7.331998951413002, \"RMSE\": 12.245186296589464, \"R2\": 0.7539176280650666, \"Memory in Mb\": 0.0539121627807617, \"Time in s\": 1.383231 }, { \"step\": 297, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 7.416966166983629, \"RMSE\": 12.289761227218738, \"R2\": 0.7850184759487971, \"Memory in Mb\": 0.0544385910034179, \"Time in s\": 1.495361 }, { \"step\": 308, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 7.99448083149163, \"RMSE\": 13.217085318753208, \"R2\": 0.7785490451915651, \"Memory in Mb\": 0.0545606613159179, \"Time in s\": 1.6112419999999998 }, { \"step\": 319, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 9.157233410060112, \"RMSE\": 16.13339057164046, \"R2\": 0.7005755447430102, \"Memory in Mb\": 0.0561399459838867, \"Time in s\": 1.73087 }, { \"step\": 330, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 9.47509121278654, \"RMSE\": 16.446724789755304, \"R2\": 0.7337084949314072, \"Memory in Mb\": 0.0561399459838867, \"Time in s\": 1.854306 }, { \"step\": 341, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 9.757592549477597, \"RMSE\": 16.701217161288277, \"R2\": 0.7541120796137563, \"Memory in Mb\": 0.0561399459838867, \"Time in s\": 1.981549 }, { \"step\": 352, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 9.93501095513177, \"RMSE\": 16.87017564150386, \"R2\": 0.7719317193583981, \"Memory in Mb\": 0.0561399459838867, \"Time in s\": 2.112498 }, { \"step\": 363, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 10.85670423687554, \"RMSE\": 18.405576814105093, \"R2\": 0.7363875320655082, \"Memory in Mb\": 0.0561399459838867, \"Time in s\": 2.247206 }, { \"step\": 374, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 11.59956477803306, \"RMSE\": 20.214372545093333, \"R2\": 0.7076961913412723, \"Memory in Mb\": 0.0658864974975586, \"Time in s\": 2.386792 }, { \"step\": 385, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 12.011712885443345, \"RMSE\": 20.838356414394227, \"R2\": 0.7251727140820715, \"Memory in Mb\": 0.0713338851928711, \"Time in s\": 2.531165 }, { \"step\": 396, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 12.02179706392092, \"RMSE\": 20.699504894468426, \"R2\": 0.7471567277697432, \"Memory in Mb\": 0.0781774520874023, \"Time in s\": 2.680539 }, { \"step\": 407, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 12.867904342374958, \"RMSE\": 22.04935022682606, \"R2\": 0.733324041761514, \"Memory in Mb\": 0.0825719833374023, \"Time in s\": 2.8351709999999994 }, { \"step\": 418, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 13.7726214629037, \"RMSE\": 23.76360253855293, \"R2\": 0.7092307493255587, \"Memory in Mb\": 0.0846776962280273, \"Time in s\": 2.9951159999999994 }, { \"step\": 429, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 14.320664029675132, \"RMSE\": 24.67720892632965, \"R2\": 0.7213650338700139, \"Memory in Mb\": 0.0656805038452148, \"Time in s\": 3.164622 }, { \"step\": 440, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 14.560745017781516, \"RMSE\": 24.854467305977835, \"R2\": 0.7373537773889289, \"Memory in Mb\": 0.0706624984741211, \"Time in s\": 3.338776 }, { \"step\": 451, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 15.05271815758178, \"RMSE\": 25.416929688531035, \"R2\": 0.7370081245929037, \"Memory in Mb\": 0.0787420272827148, \"Time in s\": 3.5178329999999995 }, { \"step\": 462, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 16.182360709465044, \"RMSE\": 27.521690548068367, \"R2\": 0.7103739673524856, \"Memory in Mb\": 0.0857076644897461, \"Time in s\": 3.702085 }, { \"step\": 473, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 17.028395162723214, \"RMSE\": 29.038217655150596, \"R2\": 0.7152989103492285, \"Memory in Mb\": 0.0888662338256836, \"Time in s\": 3.891670999999999 }, { \"step\": 484, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 17.39410374569335, \"RMSE\": 29.368989764604034, \"R2\": 0.7247347991767862, \"Memory in Mb\": 0.0889272689819336, \"Time in s\": 4.084988999999999 }, { \"step\": 495, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 17.903720470221085, \"RMSE\": 29.931789274576047, \"R2\": 0.7310408495158558, \"Memory in Mb\": 0.0889272689819336, \"Time in s\": 4.281388999999999 }, { \"step\": 506, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 18.828904303353827, \"RMSE\": 31.360943457558232, \"R2\": 0.7133254165496098, \"Memory in Mb\": 0.0895147323608398, \"Time in s\": 4.480870999999999 }, { \"step\": 517, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 19.65231385288099, \"RMSE\": 32.53535290181733, \"R2\": 0.7088292337107098, \"Memory in Mb\": 0.0743856430053711, \"Time in s\": 4.686204999999998 }, { \"step\": 528, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 20.485248099981963, \"RMSE\": 33.800991401884744, \"R2\": 0.7174497851371298, \"Memory in Mb\": 0.0787191390991211, \"Time in s\": 4.894319999999999 }, { \"step\": 539, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 20.62693587606907, \"RMSE\": 33.76714158926765, \"R2\": 0.7296595823775684, \"Memory in Mb\": 0.0872030258178711, \"Time in s\": 5.105511999999998 }, { \"step\": 550, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 21.61100465238265, \"RMSE\": 35.363345192969206, \"R2\": 0.7126350131259431, \"Memory in Mb\": 0.0935201644897461, \"Time in s\": 5.319765999999999 }, { \"step\": 561, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 22.391562087266266, \"RMSE\": 36.4058649659661, \"R2\": 0.708760885936744, \"Memory in Mb\": 0.0934362411499023, \"Time in s\": 5.537231999999999 }, { \"step\": 572, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 23.25574522992599, \"RMSE\": 37.57896806973795, \"R2\": 0.7129622004044582, \"Memory in Mb\": 0.0946111679077148, \"Time in s\": 5.757816999999998 }, { \"step\": 20, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 4.828377634536296, \"RMSE\": 13.70786256219322, \"R2\": -439.7515918302183, \"Memory in Mb\": 0.0575942993164062, \"Time in s\": 0.005178 }, { \"step\": 40, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 3.453811275213839, \"RMSE\": 9.811073218407971, \"R2\": -37.69887927291551, \"Memory in Mb\": 0.0808181762695312, \"Time in s\": 0.01451 }, { \"step\": 60, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.5116544078850294, \"RMSE\": 8.021960641037959, \"R2\": -33.879585508404254, \"Memory in Mb\": 0.0934524536132812, \"Time in s\": 0.027873 }, { \"step\": 80, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.1224425015381523, \"RMSE\": 6.9797990571526345, \"R2\": -25.487425023640156, \"Memory in Mb\": 0.103515625, \"Time in s\": 0.045557 }, { \"step\": 100, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.246653919301699, \"RMSE\": 6.363694444016854, \"R2\": -12.814729355257526, \"Memory in Mb\": 0.1129913330078125, \"Time in s\": 0.06794 }, { \"step\": 120, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.270681160376927, \"RMSE\": 5.896666779393501, \"R2\": -9.06252500695684, \"Memory in Mb\": 0.1172027587890625, \"Time in s\": 0.095003 }, { \"step\": 140, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.1411489845018936, \"RMSE\": 5.486121567062232, \"R2\": -8.000856498367144, \"Memory in Mb\": 0.1193084716796875, \"Time in s\": 0.126859 }, { \"step\": 160, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.9595309296437795, \"RMSE\": 5.145701533389061, \"R2\": -5.983118424699933, \"Memory in Mb\": 0.048110008239746, \"Time in s\": 0.170894 }, { \"step\": 180, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.8606850760789413, \"RMSE\": 4.874784956472401, \"R2\": -4.504190782470528, \"Memory in Mb\": 0.0645513534545898, \"Time in s\": 0.218268 }, { \"step\": 200, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.753768292507887, \"RMSE\": 4.635064394721464, \"R2\": -3.990954055000616, \"Memory in Mb\": 0.0751142501831054, \"Time in s\": 0.2693379999999999 }, { \"step\": 220, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.6442100676088158, \"RMSE\": 4.426753978888705, \"R2\": -3.910740120483753, \"Memory in Mb\": 0.0809926986694336, \"Time in s\": 0.324221 }, { \"step\": 240, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.5586094458315778, \"RMSE\": 4.243291414416048, \"R2\": -3.546083102970604, \"Memory in Mb\": 0.0831594467163086, \"Time in s\": 0.383071 }, { \"step\": 260, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.4757349309283123, \"RMSE\": 4.079934399827006, \"R2\": -3.259426129696055, \"Memory in Mb\": 0.0869779586791992, \"Time in s\": 0.445946 }, { \"step\": 280, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.429182741517093, \"RMSE\": 3.94384178403937, \"R2\": -3.1670173911200505, \"Memory in Mb\": 0.0965147018432617, \"Time in s\": 0.513038 }, { \"step\": 300, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.395554812880875, \"RMSE\": 3.827170050036498, \"R2\": -2.859150937529973, \"Memory in Mb\": 0.1050596237182617, \"Time in s\": 0.584571 }, { \"step\": 320, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.342072644309252, \"RMSE\": 3.7085248950294263, \"R2\": -2.8030066952692625, \"Memory in Mb\": 0.1113767623901367, \"Time in s\": 0.660678 }, { \"step\": 340, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.2981450416106572, \"RMSE\": 3.6023493777501, \"R2\": -2.7920215666096264, \"Memory in Mb\": 0.0969266891479492, \"Time in s\": 0.7468359999999999 }, { \"step\": 360, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.3017629599133824, \"RMSE\": 3.52722030011446, \"R2\": -2.665355451896766, \"Memory in Mb\": 0.1048231124877929, \"Time in s\": 0.837109 }, { \"step\": 380, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.2791887566109008, \"RMSE\": 3.4421979658375847, \"R2\": -2.6115379824259644, \"Memory in Mb\": 0.1101484298706054, \"Time in s\": 0.931726 }, { \"step\": 400, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.2309239845123985, \"RMSE\": 3.35653802988408, \"R2\": -2.531080237482485, \"Memory in Mb\": 0.1175184249877929, \"Time in s\": 1.030965 }, { \"step\": 420, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.204488166009232, \"RMSE\": 3.279580566778272, \"R2\": -2.4825797995498564, \"Memory in Mb\": 0.1217298507690429, \"Time in s\": 1.13497 }, { \"step\": 440, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.1805943216757937, \"RMSE\": 3.209931042788128, \"R2\": -2.2758615914195226, \"Memory in Mb\": 0.1275205612182617, \"Time in s\": 1.243696 }, { \"step\": 460, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.1747748413206256, \"RMSE\": 3.150463961675437, \"R2\": -1.9924589881120156, \"Memory in Mb\": 0.1354780197143554, \"Time in s\": 1.3572460000000002 }, { \"step\": 480, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.1643181646721343, \"RMSE\": 3.0907225956471227, \"R2\": -1.8137863387673288, \"Memory in Mb\": 0.1433744430541992, \"Time in s\": 1.47581 }, { \"step\": 500, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.1467427258000205, \"RMSE\": 3.033773569773335, \"R2\": -1.6541724792729693, \"Memory in Mb\": 0.1362333297729492, \"Time in s\": 1.6042770000000002 }, { \"step\": 520, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.1310531754619513, \"RMSE\": 2.979836071514613, \"R2\": -1.5651047078316291, \"Memory in Mb\": 0.1415586471557617, \"Time in s\": 1.7377410000000002 }, { \"step\": 540, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.1142715036195536, \"RMSE\": 2.927551584361061, \"R2\": -1.466806182052764, \"Memory in Mb\": 0.1436643600463867, \"Time in s\": 1.876231 }, { \"step\": 560, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.1052485405576076, \"RMSE\": 2.88137538641146, \"R2\": -1.4465405352520455, \"Memory in Mb\": 0.1484022140502929, \"Time in s\": 2.0198690000000004 }, { \"step\": 580, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.0859142446379713, \"RMSE\": 2.834429565013283, \"R2\": -1.4260211929714153, \"Memory in Mb\": 0.1510343551635742, \"Time in s\": 2.168629 }, { \"step\": 600, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.0769717813954385, \"RMSE\": 2.7922355683541964, \"R2\": -1.3282862925750138, \"Memory in Mb\": 0.1547193527221679, \"Time in s\": 2.322692 }, { \"step\": 620, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.0766885404316004, \"RMSE\": 2.756385805344716, \"R2\": -1.234299900153052, \"Memory in Mb\": 0.1578779220581054, \"Time in s\": 2.481953 }, { \"step\": 640, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.066276571170311, \"RMSE\": 2.7176435544384367, \"R2\": -1.1313354614936588, \"Memory in Mb\": 0.1599836349487304, \"Time in s\": 2.646434 }, { \"step\": 660, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.0564634149761172, \"RMSE\": 2.6824145051502803, \"R2\": -1.061505763110988, \"Memory in Mb\": 0.1636686325073242, \"Time in s\": 2.816279 }, { \"step\": 680, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.0488950225738152, \"RMSE\": 2.6482621618516, \"R2\": -1.0410990478472515, \"Memory in Mb\": 0.1663007736206054, \"Time in s\": 2.991546 }, { \"step\": 700, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.0494335446356584, \"RMSE\": 2.619695292588707, \"R2\": -1.0497603683353351, \"Memory in Mb\": 0.1547193527221679, \"Time in s\": 3.177748 }, { \"step\": 720, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.0311572549278785, \"RMSE\": 2.584165722213007, \"R2\": -1.038357615399126, \"Memory in Mb\": 0.1594572067260742, \"Time in s\": 3.369155 }, { \"step\": 740, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.0166711191684197, \"RMSE\": 2.551525288520744, \"R2\": -0.9859947129346353, \"Memory in Mb\": 0.1632032394409179, \"Time in s\": 3.565871 }, { \"step\": 760, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.998612246058934, \"RMSE\": 2.518502326974768, \"R2\": -0.9558825700512557, \"Memory in Mb\": 0.1647825241088867, \"Time in s\": 3.768021 }, { \"step\": 780, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.9842422617410423, \"RMSE\": 2.4876653972852822, \"R2\": -0.916828228451222, \"Memory in Mb\": 0.1695814132690429, \"Time in s\": 3.975687 }, { \"step\": 800, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.9822601758268574, \"RMSE\": 2.462050972175237, \"R2\": -0.8945384291924348, \"Memory in Mb\": 0.1616849899291992, \"Time in s\": 4.194112 }, { \"step\": 820, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.9867607749501652, \"RMSE\": 2.442599600947298, \"R2\": -0.8851995145588711, \"Memory in Mb\": 0.1643171310424804, \"Time in s\": 4.4181 }, { \"step\": 840, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.9793985197413392, \"RMSE\": 2.417271328475225, \"R2\": -0.8634469862274994, \"Memory in Mb\": 0.1680021286010742, \"Time in s\": 4.647504 }, { \"step\": 860, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.9647458553132984, \"RMSE\": 2.3903176435066817, \"R2\": -0.8189831286734526, \"Memory in Mb\": 0.1701078414916992, \"Time in s\": 4.882465 }, { \"step\": 880, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.9550000119490066, \"RMSE\": 2.3652388018081414, \"R2\": -0.7721647401587992, \"Memory in Mb\": 0.1727399826049804, \"Time in s\": 5.123038 }, { \"step\": 900, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.945028697359752, \"RMSE\": 2.34076619049233, \"R2\": -0.7429966155689833, \"Memory in Mb\": 0.1115369796752929, \"Time in s\": 5.374164 }, { \"step\": 920, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.9340382087332012, \"RMSE\": 2.316653968016665, \"R2\": -0.7353240501222194, \"Memory in Mb\": 0.1163969039916992, \"Time in s\": 5.629799 }, { \"step\": 940, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.93159447743873, \"RMSE\": 2.295899595899596, \"R2\": -0.7188294143518761, \"Memory in Mb\": 0.1223096847534179, \"Time in s\": 5.8899680000000005 }, { \"step\": 960, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.9266882702688716, \"RMSE\": 2.2758824318695603, \"R2\": -0.7066477496763941, \"Memory in Mb\": 0.1296796798706054, \"Time in s\": 6.154803 }, { \"step\": 980, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.9199947893356224, \"RMSE\": 2.255690063945526, \"R2\": -0.7090584581073034, \"Memory in Mb\": 0.1349439620971679, \"Time in s\": 6.424549 }, { \"step\": 1000, \"track\": \"Regression\", \"model\": \"Hoeffding Adaptive Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.910675347240908, \"RMSE\": 2.2342958873570486, \"R2\": -0.7050189374098543, \"Memory in Mb\": 0.1382246017456054, \"Time in s\": 6.699174 }, { \"step\": 11, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 41.63636363636363, \"RMSE\": 41.64569169030137, \"R2\": -2231.5319148936137, \"Memory in Mb\": 0.0096149444580078, \"Time in s\": 0.001833 }, { \"step\": 22, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 41.31818181818181, \"RMSE\": 41.32960638133835, \"R2\": -1808.0547045951903, \"Memory in Mb\": 0.0126094818115234, \"Time in s\": 0.00539 }, { \"step\": 33, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 41.12121212121212, \"RMSE\": 41.13871582091424, \"R2\": -1174.393494897962, \"Memory in Mb\": 0.015787124633789, \"Time in s\": 0.009921 }, { \"step\": 44, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 41.159090909090914, \"RMSE\": 41.17451771534076, \"R2\": -1333.7620984139928, \"Memory in Mb\": 0.0188732147216796, \"Time in s\": 0.015497 }, { \"step\": 55, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 41.5090909090909, \"RMSE\": 41.57075020645253, \"R2\": -336.3506066081568, \"Memory in Mb\": 0.0218257904052734, \"Time in s\": 0.022338 }, { \"step\": 66, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 42.681818181818166, \"RMSE\": 42.82080349691271, \"R2\": -153.29834830483878, \"Memory in Mb\": 0.0246181488037109, \"Time in s\": 0.030239 }, { \"step\": 77, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 43.50649350649351, \"RMSE\": 43.70978671356627, \"R2\": -106.75487995129542, \"Memory in Mb\": 0.0275020599365234, \"Time in s\": 0.039152 }, { \"step\": 88, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 44.21590909090909, \"RMSE\": 44.43649707984724, \"R2\": -99.97346126163, \"Memory in Mb\": 0.0300197601318359, \"Time in s\": 0.049123 }, { \"step\": 99, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 45.05050505050505, \"RMSE\": 45.309262771858165, \"R2\": -86.8022342468144, \"Memory in Mb\": 0.0329036712646484, \"Time in s\": 0.060256 }, { \"step\": 110, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 46.16363636363636, \"RMSE\": 46.52487115902242, \"R2\": -63.64797006437341, \"Memory in Mb\": 0.2696781158447265, \"Time in s\": 0.074892 }, { \"step\": 121, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 47.21487603305785, \"RMSE\": 47.67304278378361, \"R2\": -51.27707184490422, \"Memory in Mb\": 0.2696781158447265, \"Time in s\": 0.095588 }, { \"step\": 132, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 48.29545454545455, \"RMSE\": 48.843054157105485, \"R2\": -43.84882422437649, \"Memory in Mb\": 0.2696781158447265, \"Time in s\": 0.122121 }, { \"step\": 143, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 49.44055944055945, \"RMSE\": 50.100318941519305, \"R2\": -37.220279564063546, \"Memory in Mb\": 0.2696781158447265, \"Time in s\": 0.154488 }, { \"step\": 154, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 50.532467532467535, \"RMSE\": 51.29137544271156, \"R2\": -33.04474826644667, \"Memory in Mb\": 0.2696781158447265, \"Time in s\": 0.192654 }, { \"step\": 165, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 51.690909090909095, \"RMSE\": 52.61253451297311, \"R2\": -27.795548438273773, \"Memory in Mb\": 0.2696781158447265, \"Time in s\": 0.236647 }, { \"step\": 176, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 53.00568181818182, \"RMSE\": 54.11860921749895, \"R2\": -23.566226925646237, \"Memory in Mb\": 0.2696781158447265, \"Time in s\": 0.286373 }, { \"step\": 187, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 54.41176470588235, \"RMSE\": 55.733754017636336, \"R2\": -20.33250305682894, \"Memory in Mb\": 0.2696781158447265, \"Time in s\": 0.341821 }, { \"step\": 198, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 56.02525252525252, \"RMSE\": 57.635786091488654, \"R2\": -17.146924852486976, \"Memory in Mb\": 0.2696781158447265, \"Time in s\": 0.403009 }, { \"step\": 209, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 55.16354936929098, \"RMSE\": 57.0482200725598, \"R2\": -13.656313160472004, \"Memory in Mb\": 0.6838865280151367, \"Time in s\": 0.4890500000000001 }, { \"step\": 220, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 53.62203856749311, \"RMSE\": 56.03531795068661, \"R2\": -11.37998411824978, \"Memory in Mb\": 0.6869077682495117, \"Time in s\": 0.5845720000000001 }, { \"step\": 231, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 52.77279286370195, \"RMSE\": 55.29408706815337, \"R2\": -9.311090357596036, \"Memory in Mb\": 0.6899290084838867, \"Time in s\": 0.6897040000000001 }, { \"step\": 242, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 52.49661908339594, \"RMSE\": 55.0071045368674, \"R2\": -7.210918602421254, \"Memory in Mb\": 0.6929502487182617, \"Time in s\": 0.8041680000000001 }, { \"step\": 253, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 52.25631812193077, \"RMSE\": 54.71344660515688, \"R2\": -6.055353919833875, \"Memory in Mb\": 0.6947126388549805, \"Time in s\": 0.92814 }, { \"step\": 264, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 51.62511478420569, \"RMSE\": 54.312843786153664, \"R2\": -5.352168023774992, \"Memory in Mb\": 0.6947126388549805, \"Time in s\": 1.061635 }, { \"step\": 275, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 51.4425344352617, \"RMSE\": 54.29364548356293, \"R2\": -4.585603291722447, \"Memory in Mb\": 0.6947126388549805, \"Time in s\": 1.204612 }, { \"step\": 286, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 51.75651621106165, \"RMSE\": 54.635705044608144, \"R2\": -3.8989478253777694, \"Memory in Mb\": 0.6947126388549805, \"Time in s\": 1.357118 }, { \"step\": 297, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 52.373839404142416, \"RMSE\": 55.25476711535166, \"R2\": -3.3456400671942, \"Memory in Mb\": 0.6947126388549805, \"Time in s\": 1.518976 }, { \"step\": 308, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 52.87239275875638, \"RMSE\": 55.86677247417265, \"R2\": -2.9565197175813718, \"Memory in Mb\": 0.6947126388549805, \"Time in s\": 1.690095 }, { \"step\": 319, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 52.69554478958866, \"RMSE\": 56.2770501442128, \"R2\": -2.6433309475704183, \"Memory in Mb\": 0.6947126388549805, \"Time in s\": 1.870449 }, { \"step\": 330, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 53.85316804407712, \"RMSE\": 57.75044402630399, \"R2\": -2.2832890424968197, \"Memory in Mb\": 0.6947126388549805, \"Time in s\": 2.06005 }, { \"step\": 341, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 54.90678041411178, \"RMSE\": 59.01114057562677, \"R2\": -2.0697921090482247, \"Memory in Mb\": 0.6947126388549805, \"Time in s\": 2.258925 }, { \"step\": 352, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 56.00533746556472, \"RMSE\": 60.30224520856101, \"R2\": -1.9140207825503284, \"Memory in Mb\": 0.6947126388549805, \"Time in s\": 2.467073 }, { \"step\": 363, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 55.99599298772852, \"RMSE\": 60.54917173074773, \"R2\": -1.852879941931207, \"Memory in Mb\": 0.6947126388549805, \"Time in s\": 2.684461 }, { \"step\": 374, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 56.87222492302705, \"RMSE\": 61.81275171085535, \"R2\": -1.7331917323651345, \"Memory in Mb\": 0.6947126388549805, \"Time in s\": 2.911073 }, { \"step\": 385, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 58.41786698150333, \"RMSE\": 63.95254893573906, \"R2\": -1.588502821427925, \"Memory in Mb\": 0.6947126388549805, \"Time in s\": 3.146928 }, { \"step\": 396, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 59.7033976124885, \"RMSE\": 65.46926983257002, \"R2\": -1.5293357430909813, \"Memory in Mb\": 0.6947126388549805, \"Time in s\": 3.392032 }, { \"step\": 407, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 60.057805647389294, \"RMSE\": 66.17359973042984, \"R2\": -1.4019380007417157, \"Memory in Mb\": 1.1097631454467771, \"Time in s\": 3.672033 }, { \"step\": 418, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 59.7070864579051, \"RMSE\": 66.11592086962122, \"R2\": -1.2507954049688483, \"Memory in Mb\": 1.1127843856811523, \"Time in s\": 3.965768 }, { \"step\": 429, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 60.122823673891816, \"RMSE\": 66.73609937588846, \"R2\": -1.0378169857688957, \"Memory in Mb\": 1.1158056259155271, \"Time in s\": 4.272702 }, { \"step\": 440, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 60.39504675635191, \"RMSE\": 66.96100690444877, \"R2\": -0.906365593827489, \"Memory in Mb\": 1.1188268661499023, \"Time in s\": 4.593074 }, { \"step\": 451, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 60.27126048587789, \"RMSE\": 66.93502892662679, \"R2\": -0.8239085862185902, \"Memory in Mb\": 1.120589256286621, \"Time in s\": 4.926735 }, { \"step\": 462, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 60.340686610373176, \"RMSE\": 67.43825007380137, \"R2\": -0.7390015352251049, \"Memory in Mb\": 1.120589256286621, \"Time in s\": 5.273856 }, { \"step\": 473, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 61.40703262301831, \"RMSE\": 69.11306667757516, \"R2\": -0.6127592621572406, \"Memory in Mb\": 1.120589256286621, \"Time in s\": 5.634397 }, { \"step\": 484, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 61.95796621360106, \"RMSE\": 69.71422620021941, \"R2\": -0.5510154280248158, \"Memory in Mb\": 1.120589256286621, \"Time in s\": 6.008344 }, { \"step\": 495, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 62.59018166487368, \"RMSE\": 70.55352405729404, \"R2\": -0.4943708535906215, \"Memory in Mb\": 1.120589256286621, \"Time in s\": 6.395653 }, { \"step\": 506, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 62.49664579133251, \"RMSE\": 70.88193125644693, \"R2\": -0.4644752452013045, \"Memory in Mb\": 1.120589256286621, \"Time in s\": 6.796304 }, { \"step\": 517, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 63.25224079915844, \"RMSE\": 71.92080214464903, \"R2\": -0.4228062717918979, \"Memory in Mb\": 1.120589256286621, \"Time in s\": 7.21023 }, { \"step\": 528, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 64.80783657170488, \"RMSE\": 74.3681944005728, \"R2\": -0.367764222300833, \"Memory in Mb\": 1.120589256286621, \"Time in s\": 7.637592 }, { \"step\": 539, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 65.59959781369417, \"RMSE\": 75.30113885843834, \"R2\": -0.3443906138479853, \"Memory in Mb\": 1.120589256286621, \"Time in s\": 8.078512 }, { \"step\": 550, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 65.79684627343133, \"RMSE\": 76.01328745307667, \"R2\": -0.3277190973108916, \"Memory in Mb\": 1.120589256286621, \"Time in s\": 8.532886 }, { \"step\": 561, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 66.6512855136148, \"RMSE\": 77.20436469287773, \"R2\": -0.3097569166669509, \"Memory in Mb\": 1.120589256286621, \"Time in s\": 9.000807 }, { \"step\": 572, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"ChickWeights\", \"MAE\": 68.11975592628174, \"RMSE\": 79.56492566870935, \"R2\": -0.2867456678376987, \"Memory in Mb\": 1.120589256286621, \"Time in s\": 9.482145 }, { \"step\": 20, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 43.8732195, \"RMSE\": 43.87807788634269, \"R2\": -4514.954899312423, \"Memory in Mb\": 0.0199413299560546, \"Time in s\": 0.002755 }, { \"step\": 40, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 42.4932955, \"RMSE\": 42.52255283421693, \"R2\": -725.9491167623446, \"Memory in Mb\": 0.0317363739013671, \"Time in s\": 0.008058 }, { \"step\": 60, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 42.2167785, \"RMSE\": 42.2386240157387, \"R2\": -966.0073736019044, \"Memory in Mb\": 0.0438976287841796, \"Time in s\": 0.015549 }, { \"step\": 80, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 41.975705625, \"RMSE\": 41.99760868559829, \"R2\": -957.9655948743646, \"Memory in Mb\": 0.0562419891357421, \"Time in s\": 0.025294 }, { \"step\": 100, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 41.37550450000001, \"RMSE\": 41.410913785433536, \"R2\": -583.9966399141301, \"Memory in Mb\": 0.5381031036376953, \"Time in s\": 0.041246 }, { \"step\": 120, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 40.936110000000006, \"RMSE\": 40.97829382197767, \"R2\": -484.9611418859003, \"Memory in Mb\": 0.5386066436767578, \"Time in s\": 0.070023 }, { \"step\": 140, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 40.6885472857143, \"RMSE\": 40.72961738075088, \"R2\": -495.1050461477588, \"Memory in Mb\": 0.5391101837158203, \"Time in s\": 0.110787 }, { \"step\": 160, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 40.35105437500001, \"RMSE\": 40.39801158334292, \"R2\": -429.4078677932073, \"Memory in Mb\": 0.5393619537353516, \"Time in s\": 0.163463 }, { \"step\": 180, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 40.00981655555555, \"RMSE\": 40.06373388340122, \"R2\": -370.7794659133543, \"Memory in Mb\": 0.5396137237548828, \"Time in s\": 0.227995 }, { \"step\": 200, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 39.80633095, \"RMSE\": 39.860362966711, \"R2\": -368.1089073295326, \"Memory in Mb\": 0.5077581405639648, \"Time in s\": 0.320041 }, { \"step\": 220, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 36.497516001377406, \"RMSE\": 38.01945344470104, \"R2\": -361.2329206514933, \"Memory in Mb\": 1.3602590560913086, \"Time in s\": 0.441408 }, { \"step\": 240, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 33.64243104419191, \"RMSE\": 36.40668421494773, \"R2\": -333.65237138497804, \"Memory in Mb\": 1.360762596130371, \"Time in s\": 0.581306 }, { \"step\": 260, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 31.222114965034955, \"RMSE\": 34.98371838354962, \"R2\": -312.16748668977897, \"Memory in Mb\": 1.3610143661499023, \"Time in s\": 0.739627 }, { \"step\": 280, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 29.18205946861472, \"RMSE\": 33.71869814960704, \"R2\": -303.5986275675674, \"Memory in Mb\": 1.361769676208496, \"Time in s\": 0.915776 }, { \"step\": 300, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 27.34275770505051, \"RMSE\": 32.57805191350732, \"R2\": -278.63174197976707, \"Memory in Mb\": 1.3620214462280271, \"Time in s\": 1.109879 }, { \"step\": 320, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 25.81388747443183, \"RMSE\": 31.5521424826706, \"R2\": -274.2849072221064, \"Memory in Mb\": 1.3630285263061523, \"Time in s\": 1.321838 }, { \"step\": 340, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 24.51835124153299, \"RMSE\": 30.62414457186519, \"R2\": -273.0482727941538, \"Memory in Mb\": 1.3640356063842771, \"Time in s\": 1.551694 }, { \"step\": 360, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 23.451930423400693, \"RMSE\": 29.78792492645533, \"R2\": -260.4155562259403, \"Memory in Mb\": 1.3660497665405271, \"Time in s\": 1.799643 }, { \"step\": 380, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 22.46844053349284, \"RMSE\": 29.014219480552867, \"R2\": -255.5915105297988, \"Memory in Mb\": 1.3665533065795898, \"Time in s\": 2.065559 }, { \"step\": 400, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 21.59490700757577, \"RMSE\": 28.301677882839343, \"R2\": -250.0434007116766, \"Memory in Mb\": 0.510127067565918, \"Time in s\": 2.355987 }, { \"step\": 420, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 20.62268781294523, \"RMSE\": 27.62086591367872, \"R2\": -246.0239415518119, \"Memory in Mb\": 1.3623762130737305, \"Time in s\": 2.674434 }, { \"step\": 440, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 19.786863931462925, \"RMSE\": 26.990398924900397, \"R2\": -230.60756767519212, \"Memory in Mb\": 1.3643903732299805, \"Time in s\": 3.010878 }, { \"step\": 460, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 19.05732899619648, \"RMSE\": 26.404670160589287, \"R2\": -209.2038511633616, \"Memory in Mb\": 1.3666563034057615, \"Time in s\": 3.365293 }, { \"step\": 480, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 18.376512097202227, \"RMSE\": 25.854792215140314, \"R2\": -195.90337768575387, \"Memory in Mb\": 1.3701810836791992, \"Time in s\": 3.737716 }, { \"step\": 500, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 17.755044410127518, \"RMSE\": 25.338820973360427, \"R2\": -184.1550753065148, \"Memory in Mb\": 1.3716917037963867, \"Time in s\": 4.128280999999999 }, { \"step\": 520, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 17.16611419898163, \"RMSE\": 24.851444862058347, \"R2\": -177.4118263333629, \"Memory in Mb\": 1.3737058639526367, \"Time in s\": 4.537221 }, { \"step\": 540, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 16.628565596068775, \"RMSE\": 24.392285078947275, \"R2\": -170.25012213753183, \"Memory in Mb\": 1.3747129440307615, \"Time in s\": 4.964375 }, { \"step\": 560, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 16.091244232649693, \"RMSE\": 23.955027361350904, \"R2\": -168.10096043791202, \"Memory in Mb\": 1.3752164840698242, \"Time in s\": 5.410107999999999 }, { \"step\": 580, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 15.590768135673304, \"RMSE\": 23.54051091957351, \"R2\": -166.33817208986073, \"Memory in Mb\": 1.3764753341674805, \"Time in s\": 5.874175999999999 }, { \"step\": 600, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 15.168708628495342, \"RMSE\": 23.15108754841241, \"R2\": -159.05714501634571, \"Memory in Mb\": 0.5124959945678711, \"Time in s\": 6.365194 }, { \"step\": 620, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 14.742446374247312, \"RMSE\": 22.77953961802373, \"R2\": -151.59887848495535, \"Memory in Mb\": 3.064208030700684, \"Time in s\": 6.921285 }, { \"step\": 640, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 14.319364852585176, \"RMSE\": 22.42187566882095, \"R2\": -144.08105420081068, \"Memory in Mb\": 3.0679845809936523, \"Time in s\": 7.51197 }, { \"step\": 660, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 13.916412195872256, \"RMSE\": 22.080274918425697, \"R2\": -138.68241285181185, \"Memory in Mb\": 3.0712575912475586, \"Time in s\": 8.136981 }, { \"step\": 680, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 13.515604789075644, \"RMSE\": 21.753254558457893, \"R2\": -136.71797028279042, \"Memory in Mb\": 3.074782371520996, \"Time in s\": 8.796442 }, { \"step\": 700, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 13.16391092204058, \"RMSE\": 21.44141764506316, \"R2\": -136.3120101768532, \"Memory in Mb\": 3.0773000717163086, \"Time in s\": 9.490636 }, { \"step\": 720, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 12.828283113852926, \"RMSE\": 21.142484202016185, \"R2\": -135.44313416922282, \"Memory in Mb\": 3.078558921813965, \"Time in s\": 10.219341 }, { \"step\": 740, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 12.50446646701278, \"RMSE\": 20.855361315179096, \"R2\": -131.6825380828392, \"Memory in Mb\": 3.0800695419311523, \"Time in s\": 10.982324 }, { \"step\": 760, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 12.187542748969031, \"RMSE\": 20.57929219886472, \"R2\": -129.592708960364, \"Memory in Mb\": 3.0813283920288086, \"Time in s\": 11.779656 }, { \"step\": 780, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 11.899403743710543, \"RMSE\": 20.31464229706916, \"R2\": -126.82553676745258, \"Memory in Mb\": 3.08359432220459, \"Time in s\": 12.611571 }, { \"step\": 800, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 11.634366305883283, \"RMSE\": 20.06137952581079, \"R2\": -124.7856004590591, \"Memory in Mb\": 3.084601402282715, \"Time in s\": 13.493909000000002 }, { \"step\": 820, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 11.363415331478278, \"RMSE\": 19.815492221289517, \"R2\": -123.0687724200615, \"Memory in Mb\": 3.08560848236084, \"Time in s\": 14.412321000000002 }, { \"step\": 840, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 11.106640469158773, \"RMSE\": 19.57848368678801, \"R2\": -121.2430978899656, \"Memory in Mb\": 3.086615562438965, \"Time in s\": 15.363464000000002 }, { \"step\": 860, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 10.873909665943762, \"RMSE\": 19.35022618912736, \"R2\": -118.20364312373844, \"Memory in Mb\": 3.087119102478028, \"Time in s\": 16.347229000000002 }, { \"step\": 880, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 10.65545006969969, \"RMSE\": 19.130035299019603, \"R2\": -114.92727947355436, \"Memory in Mb\": 3.0873708724975586, \"Time in s\": 17.36361 }, { \"step\": 900, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 10.439309697188907, \"RMSE\": 18.916827199314994, \"R2\": -112.83532852765144, \"Memory in Mb\": 3.08762264251709, \"Time in s\": 18.412326 }, { \"step\": 920, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 10.21789524284777, \"RMSE\": 18.710158789526105, \"R2\": -112.19133803320568, \"Memory in Mb\": 3.087874412536621, \"Time in s\": 19.493438 }, { \"step\": 940, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 10.012578535125469, \"RMSE\": 18.510293787577226, \"R2\": -110.72583714230213, \"Memory in Mb\": 3.077906608581543, \"Time in s\": 20.736809 }, { \"step\": 960, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 9.811853150109153, \"RMSE\": 18.316579311485903, \"R2\": -109.54344305213982, \"Memory in Mb\": 3.0804243087768555, \"Time in s\": 22.013587 }, { \"step\": 980, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 9.61909067795052, \"RMSE\": 18.12881604876013, \"R2\": -109.39183420714345, \"Memory in Mb\": 3.080927848815918, \"Time in s\": 23.322572 }, { \"step\": 1000, \"track\": \"Regression\", \"model\": \"Stochastic Gradient Tree\", \"dataset\": \"TrumpApproval\", \"MAE\": 9.438738635632271, \"RMSE\": 17.946847607318464, \"R2\": -109.00797869183796, \"Memory in Mb\": 3.082438468933105, \"Time in s\": 24.663779 }, { \"step\": 11, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ChickWeights\", \"MAE\": 7.837563210503649, \"RMSE\": 16.830121687224917, \"R2\": -363.61289911513376, \"Memory in Mb\": 0.1506233215332031, \"Time in s\": 0.018991 }, { \"step\": 22, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ChickWeights\", \"MAE\": 4.3557641651310055, \"RMSE\": 11.925612892987612, \"R2\": -149.62275175212707, \"Memory in Mb\": 0.1761512756347656, \"Time in s\": 0.051966 }, { \"step\": 33, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ChickWeights\", \"MAE\": 3.371112580593177, \"RMSE\": 9.780386843070694, \"R2\": -65.43453306461763, \"Memory in Mb\": 0.2142868041992187, \"Time in s\": 0.1028409999999999 }, { \"step\": 44, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ChickWeights\", \"MAE\": 2.695097509519297, \"RMSE\": 8.482165721989492, \"R2\": -55.64483692929184, \"Memory in Mb\": 0.2312774658203125, \"Time in s\": 0.1714339999999999 }, { \"step\": 55, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ChickWeights\", \"MAE\": 2.750371500828058, \"RMSE\": 7.825470627419848, \"R2\": -10.954370249441634, \"Memory in Mb\": 0.2869682312011719, \"Time in s\": 0.2559499999999999 }, { \"step\": 66, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ChickWeights\", \"MAE\": 2.874973614360605, \"RMSE\": 7.312672972792191, \"R2\": -3.4999113348114523, \"Memory in Mb\": 0.3332901000976562, \"Time in s\": 0.3603679999999999 }, { \"step\": 77, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ChickWeights\", \"MAE\": 3.049190601733834, \"RMSE\": 7.064366487423796, \"R2\": -1.814660484448317, \"Memory in Mb\": 0.3119354248046875, \"Time in s\": 0.4921819999999999 }, { \"step\": 88, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ChickWeights\", \"MAE\": 2.9760015514160614, \"RMSE\": 6.690266116634344, \"R2\": -1.2888345310585096, \"Memory in Mb\": 0.3463325500488281, \"Time in s\": 0.6436869999999999 }, { \"step\": 99, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ChickWeights\", \"MAE\": 3.4458208345296213, \"RMSE\": 6.801756467406213, \"R2\": -0.9786716534372656, \"Memory in Mb\": 0.3914375305175781, \"Time in s\": 0.8148389999999999 }, { \"step\": 110, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ChickWeights\", \"MAE\": 3.8036130411377513, \"RMSE\": 6.901055458118455, \"R2\": -0.4223797783359673, \"Memory in Mb\": 0.4219093322753906, \"Time in s\": 1.008913 }, { \"step\": 121, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ChickWeights\", \"MAE\": 4.044286594485068, \"RMSE\": 6.961321070951229, \"R2\": -0.1146764842737158, \"Memory in Mb\": 0.4422340393066406, \"Time in s\": 1.227422 }, { \"step\": 132, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ChickWeights\", \"MAE\": 4.278051046290026, \"RMSE\": 6.992189163715883, \"R2\": 0.0808809347253165, \"Memory in Mb\": 0.4695549011230469, \"Time in s\": 1.467051 }, { \"step\": 143, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ChickWeights\", \"MAE\": 4.514698307868387, \"RMSE\": 7.09845673012605, \"R2\": 0.232743181062298, \"Memory in Mb\": 0.5039863586425781, \"Time in s\": 1.72858 }, { \"step\": 154, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ChickWeights\", \"MAE\": 4.711682816733759, \"RMSE\": 7.225211881769595, \"R2\": 0.3244420464758215, \"Memory in Mb\": 0.5465545654296875, \"Time in s\": 2.00923 }, { \"step\": 165, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ChickWeights\", \"MAE\": 5.098965674365188, \"RMSE\": 7.74448667501397, \"R2\": 0.3760752970367085, \"Memory in Mb\": 0.5543212890625, \"Time in s\": 2.317031 }, { \"step\": 176, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ChickWeights\", \"MAE\": 5.613573109580669, \"RMSE\": 8.405070008975855, \"R2\": 0.4074460804745814, \"Memory in Mb\": 0.5778732299804688, \"Time in s\": 2.650775 }, { \"step\": 187, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ChickWeights\", \"MAE\": 6.0300184245211925, \"RMSE\": 8.816070592615688, \"R2\": 0.4662285490406689, \"Memory in Mb\": 0.597381591796875, \"Time in s\": 3.008386 }, { \"step\": 198, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ChickWeights\", \"MAE\": 6.135166503700917, \"RMSE\": 8.873071763991604, \"R2\": 0.5699028278126741, \"Memory in Mb\": 0.6156463623046875, \"Time in s\": 3.389767 }, { \"step\": 209, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ChickWeights\", \"MAE\": 6.663870316688535, \"RMSE\": 9.680163486851024, \"R2\": 0.5780063435312484, \"Memory in Mb\": 0.6372909545898438, \"Time in s\": 3.797698 }, { \"step\": 220, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ChickWeights\", \"MAE\": 7.114976257039628, \"RMSE\": 10.70304519196575, \"R2\": 0.548340525531499, \"Memory in Mb\": 0.6516532897949219, \"Time in s\": 4.233731 }, { \"step\": 231, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ChickWeights\", \"MAE\": 7.558935022860611, \"RMSE\": 11.205896813348822, \"R2\": 0.5765126454701623, \"Memory in Mb\": 0.6456108093261719, \"Time in s\": 4.696369 }, { \"step\": 242, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ChickWeights\", \"MAE\": 7.8784966144348445, \"RMSE\": 11.546829616877767, \"R2\": 0.6381907286214681, \"Memory in Mb\": 0.6543197631835938, \"Time in s\": 5.184542 }, { \"step\": 253, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ChickWeights\", \"MAE\": 8.045465222989945, \"RMSE\": 11.7730800696534, \"R2\": 0.6733287963504477, \"Memory in Mb\": 0.6542396545410156, \"Time in s\": 5.698149999999999 }, { \"step\": 264, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ChickWeights\", \"MAE\": 8.55861765945315, \"RMSE\": 12.698578392308455, \"R2\": 0.6527621165047097, \"Memory in Mb\": 0.7007179260253906, \"Time in s\": 6.239901999999999 }, { \"step\": 275, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ChickWeights\", \"MAE\": 9.012932654808608, \"RMSE\": 13.883768164483952, \"R2\": 0.6347528903285174, \"Memory in Mb\": 0.7182159423828125, \"Time in s\": 6.810226999999999 }, { \"step\": 286, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ChickWeights\", \"MAE\": 9.46045520422096, \"RMSE\": 14.421722772543973, \"R2\": 0.6586625054492083, \"Memory in Mb\": 0.7397651672363281, \"Time in s\": 7.406784999999999 }, { \"step\": 297, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ChickWeights\", \"MAE\": 9.467084537445258, \"RMSE\": 14.393153360282469, \"R2\": 0.7051330126585751, \"Memory in Mb\": 0.6962127685546875, \"Time in s\": 8.032812 }, { \"step\": 308, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ChickWeights\", \"MAE\": 9.993822911601686, \"RMSE\": 15.306673373378164, \"R2\": 0.7029922374440833, \"Memory in Mb\": 0.712249755859375, \"Time in s\": 8.684709 }, { \"step\": 319, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ChickWeights\", \"MAE\": 10.906399564516825, \"RMSE\": 17.709412566907307, \"R2\": 0.6392184800849823, \"Memory in Mb\": 0.7230567932128906, \"Time in s\": 9.367603 }, { \"step\": 330, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ChickWeights\", \"MAE\": 11.354718196032138, \"RMSE\": 18.25739128586918, \"R2\": 0.6718473555535345, \"Memory in Mb\": 0.748199462890625, \"Time in s\": 10.07931 }, { \"step\": 341, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ChickWeights\", \"MAE\": 11.709200775641314, \"RMSE\": 18.709755653596343, \"R2\": 0.691413317744465, \"Memory in Mb\": 0.7508468627929688, \"Time in s\": 10.818764 }, { \"step\": 352, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ChickWeights\", \"MAE\": 12.007404346564588, \"RMSE\": 19.02446579939404, \"R2\": 0.7099648583835785, \"Memory in Mb\": 0.7794418334960938, \"Time in s\": 11.583692 }, { \"step\": 363, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ChickWeights\", \"MAE\": 12.612646669962947, \"RMSE\": 20.078004824306745, \"R2\": 0.6863045708062432, \"Memory in Mb\": 0.8219375610351562, \"Time in s\": 12.374611 }, { \"step\": 374, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ChickWeights\", \"MAE\": 13.305874945245664, \"RMSE\": 21.69198202373745, \"R2\": 0.6634013148524545, \"Memory in Mb\": 0.8368568420410156, \"Time in s\": 13.19172 }, { \"step\": 385, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ChickWeights\", \"MAE\": 13.874152914620147, \"RMSE\": 22.398641632517744, \"R2\": 0.6824761966895349, \"Memory in Mb\": 0.8399162292480469, \"Time in s\": 14.039444 }, { \"step\": 396, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ChickWeights\", \"MAE\": 14.02164789427361, \"RMSE\": 22.45496751107478, \"R2\": 0.7024524705388633, \"Memory in Mb\": 0.8451423645019531, \"Time in s\": 14.916422 }, { \"step\": 407, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ChickWeights\", \"MAE\": 14.881080524519056, \"RMSE\": 23.990711305579165, \"R2\": 0.684297135752792, \"Memory in Mb\": 0.844085693359375, \"Time in s\": 15.821424 }, { \"step\": 418, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ChickWeights\", \"MAE\": 15.672731015633325, \"RMSE\": 25.84901428202966, \"R2\": 0.6559576619146927, \"Memory in Mb\": 0.8621559143066406, \"Time in s\": 16.75642 }, { \"step\": 429, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ChickWeights\", \"MAE\": 16.52730145447373, \"RMSE\": 27.03527355015745, \"R2\": 0.6655701177301542, \"Memory in Mb\": 0.8826904296875, \"Time in s\": 17.718062999999997 }, { \"step\": 440, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ChickWeights\", \"MAE\": 16.835543678126854, \"RMSE\": 27.29532071757395, \"R2\": 0.6832339396752392, \"Memory in Mb\": 0.8854255676269531, \"Time in s\": 18.709096 }, { \"step\": 451, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ChickWeights\", \"MAE\": 17.2847401584601, \"RMSE\": 27.83347178305896, \"R2\": 0.6846223453976918, \"Memory in Mb\": 0.9156723022460938, \"Time in s\": 19.725565 }, { \"step\": 462, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ChickWeights\", \"MAE\": 18.263556988874104, \"RMSE\": 29.983264015814683, \"R2\": 0.6562480279042127, \"Memory in Mb\": 0.9476966857910156, \"Time in s\": 20.769031 }, { \"step\": 473, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ChickWeights\", \"MAE\": 19.168820167599023, \"RMSE\": 31.233753579350584, \"R2\": 0.6706197338145063, \"Memory in Mb\": 0.9631462097167968, \"Time in s\": 21.841163 }, { \"step\": 484, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ChickWeights\", \"MAE\": 19.66691155441429, \"RMSE\": 31.66292403296529, \"R2\": 0.6800549958221269, \"Memory in Mb\": 0.9797592163085938, \"Time in s\": 22.942597 }, { \"step\": 495, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ChickWeights\", \"MAE\": 20.1980005296915, \"RMSE\": 32.326142080110245, \"R2\": 0.6862897411402615, \"Memory in Mb\": 1.003559112548828, \"Time in s\": 24.073219 }, { \"step\": 506, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ChickWeights\", \"MAE\": 21.038101965066165, \"RMSE\": 33.861783151779306, \"R2\": 0.6657814123530009, \"Memory in Mb\": 1.0402488708496094, \"Time in s\": 25.232436 }, { \"step\": 517, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ChickWeights\", \"MAE\": 21.79336863548733, \"RMSE\": 34.905834748448235, \"R2\": 0.6648549707242759, \"Memory in Mb\": 1.059162139892578, \"Time in s\": 26.421057 }, { \"step\": 528, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ChickWeights\", \"MAE\": 22.71415913579816, \"RMSE\": 36.0778798798642, \"R2\": 0.6781016272298894, \"Memory in Mb\": 1.080280303955078, \"Time in s\": 27.640294 }, { \"step\": 539, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ChickWeights\", \"MAE\": 22.973012146363907, \"RMSE\": 36.13494365478664, \"R2\": 0.690416970144236, \"Memory in Mb\": 1.1105842590332031, \"Time in s\": 28.88878 }, { \"step\": 550, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ChickWeights\", \"MAE\": 23.822482702164027, \"RMSE\": 37.524990725820025, \"R2\": 0.6764299185798421, \"Memory in Mb\": 1.15643310546875, \"Time in s\": 30.16877 }, { \"step\": 561, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ChickWeights\", \"MAE\": 24.754282528765707, \"RMSE\": 38.80763200620001, \"R2\": 0.669066082535116, \"Memory in Mb\": 1.1719589233398438, \"Time in s\": 31.481567 }, { \"step\": 572, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"ChickWeights\", \"MAE\": 25.964764492717645, \"RMSE\": 40.6034258569803, \"R2\": 0.6648997528039327, \"Memory in Mb\": 1.186126708984375, \"Time in s\": 32.828648 }, { \"step\": 20, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"TrumpApproval\", \"MAE\": 4.656631863584941, \"RMSE\": 13.301513571178564, \"R2\": -414.0080590272913, \"Memory in Mb\": 0.20159912109375, \"Time in s\": 0.060655 }, { \"step\": 40, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"TrumpApproval\", \"MAE\": 3.3092233454389866, \"RMSE\": 9.514642226769206, \"R2\": -35.395716979334736, \"Memory in Mb\": 0.2895278930664062, \"Time in s\": 0.186458 }, { \"step\": 60, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.370245548222252, \"RMSE\": 7.779742234417967, \"R2\": -31.80504797847069, \"Memory in Mb\": 0.3228263854980469, \"Time in s\": 0.354007 }, { \"step\": 80, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.0011825545667588, \"RMSE\": 6.767036502792006, \"R2\": -23.897224712465302, \"Memory in Mb\": 0.3692893981933594, \"Time in s\": 0.55542 }, { \"step\": 100, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.056871489932448, \"RMSE\": 6.139015712379544, \"R2\": -11.856454989718417, \"Memory in Mb\": 0.4076881408691406, \"Time in s\": 0.79515 }, { \"step\": 120, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.003092266983848, \"RMSE\": 5.651046465030358, \"R2\": -8.241693395555247, \"Memory in Mb\": 0.4241790771484375, \"Time in s\": 1.072178 }, { \"step\": 140, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.8854667406427696, \"RMSE\": 5.2532308662388045, \"R2\": -7.252888139218157, \"Memory in Mb\": 0.4439773559570312, \"Time in s\": 1.385482 }, { \"step\": 160, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.9340845560700957, \"RMSE\": 4.984838031470717, \"R2\": -5.553334350331147, \"Memory in Mb\": 0.4557876586914062, \"Time in s\": 1.738613 }, { \"step\": 180, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.9596878280460808, \"RMSE\": 4.757627458420989, \"R2\": -4.242801539132487, \"Memory in Mb\": 0.4726028442382812, \"Time in s\": 2.128929 }, { \"step\": 200, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.9000266951648128, \"RMSE\": 4.53846274922039, \"R2\": -3.785084131964374, \"Memory in Mb\": 0.5018348693847656, \"Time in s\": 2.555084 }, { \"step\": 220, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.7756062902244436, \"RMSE\": 4.331882031903988, \"R2\": -3.702506681895046, \"Memory in Mb\": 0.5390548706054688, \"Time in s\": 3.014926 }, { \"step\": 240, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.722064335127714, \"RMSE\": 4.161337926832492, \"R2\": -3.3721758647478204, \"Memory in Mb\": 0.5583267211914062, \"Time in s\": 3.510014 }, { \"step\": 260, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.668741133448935, \"RMSE\": 4.009095884933904, \"R2\": -3.112800247055813, \"Memory in Mb\": 0.5867843627929688, \"Time in s\": 4.039729 }, { \"step\": 280, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.596023168515899, \"RMSE\": 3.86837254754146, \"R2\": -3.0090634555073343, \"Memory in Mb\": 0.6034698486328125, \"Time in s\": 4.6068750000000005 }, { \"step\": 300, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.5862781822088614, \"RMSE\": 3.757725401796155, \"R2\": -2.72037165832895, \"Memory in Mb\": 0.6344451904296875, \"Time in s\": 5.213001 }, { \"step\": 320, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.5269819355711358, \"RMSE\": 3.644015209511257, \"R2\": -2.67185104068617, \"Memory in Mb\": 0.6524238586425781, \"Time in s\": 5.854158 }, { \"step\": 340, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.4570659608871932, \"RMSE\": 3.537119081966105, \"R2\": -2.6559352799702967, \"Memory in Mb\": 0.6771697998046875, \"Time in s\": 6.534176 }, { \"step\": 360, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.4064005847783465, \"RMSE\": 3.441784227878547, \"R2\": -2.4899419787597266, \"Memory in Mb\": 0.7120590209960938, \"Time in s\": 7.253771 }, { \"step\": 380, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.353481677960258, \"RMSE\": 3.352627624678765, \"R2\": -2.426029812278527, \"Memory in Mb\": 0.7612266540527344, \"Time in s\": 8.012458 }, { \"step\": 400, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.3254238940946812, \"RMSE\": 3.27422063813923, \"R2\": -2.360008106665105, \"Memory in Mb\": 0.7831077575683594, \"Time in s\": 8.809453000000001 }, { \"step\": 420, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.2822507927277378, \"RMSE\": 3.197239276537114, \"R2\": -2.309899038967682, \"Memory in Mb\": 0.8153495788574219, \"Time in s\": 9.645719 }, { \"step\": 440, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.2469389910195894, \"RMSE\": 3.127027882921648, \"R2\": -2.108834809107039, \"Memory in Mb\": 0.8549957275390625, \"Time in s\": 10.520540000000002 }, { \"step\": 460, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.209007072666545, \"RMSE\": 3.060196802253249, \"R2\": -1.8234356170996369, \"Memory in Mb\": 0.8641319274902344, \"Time in s\": 11.437965000000002 }, { \"step\": 480, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.173565091899577, \"RMSE\": 2.996986036098964, \"R2\": -1.6456994146583188, \"Memory in Mb\": 0.9048995971679688, \"Time in s\": 12.395296000000002 }, { \"step\": 500, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.1545102154576106, \"RMSE\": 2.9412312685847275, \"R2\": -1.494716293761022, \"Memory in Mb\": 0.9429206848144532, \"Time in s\": 13.397268000000002 }, { \"step\": 520, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.1235341152888954, \"RMSE\": 2.885679937630878, \"R2\": -1.4055626483775598, \"Memory in Mb\": 0.9187583923339844, \"Time in s\": 14.455292000000002 }, { \"step\": 540, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.0931325909773704, \"RMSE\": 2.8325152336083046, \"R2\": -1.3092471918321815, \"Memory in Mb\": 0.9785118103027344, \"Time in s\": 15.562633000000002 }, { \"step\": 560, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.080423433010604, \"RMSE\": 2.7861157835692825, \"R2\": -1.2874470666919131, \"Memory in Mb\": 0.8416099548339844, \"Time in s\": 16.721025 }, { \"step\": 580, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.0538460644054608, \"RMSE\": 2.73873459117484, \"R2\": -1.2649736085810763, \"Memory in Mb\": 0.9269638061523438, \"Time in s\": 17.931443 }, { \"step\": 600, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.037762850657022, \"RMSE\": 2.6954140775252133, \"R2\": -1.1696179031883165, \"Memory in Mb\": 1.0152244567871094, \"Time in s\": 19.191903 }, { \"step\": 620, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.022747434499167, \"RMSE\": 2.654976815796094, \"R2\": -1.0729218181826925, \"Memory in Mb\": 0.968769073486328, \"Time in s\": 20.501534 }, { \"step\": 640, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.006861222675929, \"RMSE\": 2.615186194972241, \"R2\": -0.9736586966592728, \"Memory in Mb\": 0.803070068359375, \"Time in s\": 21.860299 }, { \"step\": 660, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.9869414277387362, \"RMSE\": 2.5762284524749504, \"R2\": -0.9015227260176624, \"Memory in Mb\": 0.7759437561035156, \"Time in s\": 23.260671 }, { \"step\": 680, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.966244541930286, \"RMSE\": 2.538677410892474, \"R2\": -0.8756731719459285, \"Memory in Mb\": 0.8428535461425781, \"Time in s\": 24.702724 }, { \"step\": 700, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.9583039577617944, \"RMSE\": 2.5060806369029174, \"R2\": -0.8758219540617476, \"Memory in Mb\": 0.9465827941894532, \"Time in s\": 26.183847 }, { \"step\": 720, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.9436352676783604, \"RMSE\": 2.472672874668102, \"R2\": -0.8662636043278715, \"Memory in Mb\": 1.0379486083984375, \"Time in s\": 27.712492 }, { \"step\": 740, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.9279533445219876, \"RMSE\": 2.44025252932111, \"R2\": -0.816552178803118, \"Memory in Mb\": 1.1120071411132812, \"Time in s\": 29.286906 }, { \"step\": 760, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.9160723193407864, \"RMSE\": 2.4102496967776017, \"R2\": -0.7913569678512289, \"Memory in Mb\": 1.17376708984375, \"Time in s\": 30.91286 }, { \"step\": 780, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.8969957702252379, \"RMSE\": 2.379401465215729, \"R2\": -0.753616871824967, \"Memory in Mb\": 1.2616004943847656, \"Time in s\": 32.587144 }, { \"step\": 800, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.8879951793456313, \"RMSE\": 2.351379467833174, \"R2\": -0.7280439441517783, \"Memory in Mb\": 1.355243682861328, \"Time in s\": 34.314383 }, { \"step\": 820, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.8778121160020841, \"RMSE\": 2.323761508818731, \"R2\": -0.7062232791684944, \"Memory in Mb\": 1.4321250915527344, \"Time in s\": 36.09551 }, { \"step\": 840, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.8674818777939237, \"RMSE\": 2.297554956673723, \"R2\": -0.683441607427212, \"Memory in Mb\": 1.4874954223632812, \"Time in s\": 37.948423 }, { \"step\": 860, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.8596582290799093, \"RMSE\": 2.2724190258224723, \"R2\": -0.6439714465644337, \"Memory in Mb\": 1.5595359802246094, \"Time in s\": 39.857894 }, { \"step\": 880, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.8554226716886886, \"RMSE\": 2.248810009475745, \"R2\": -0.601989379910626, \"Memory in Mb\": 1.6191024780273438, \"Time in s\": 41.823765 }, { \"step\": 900, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.8440517106498798, \"RMSE\": 2.2244180426447486, \"R2\": -0.5740310312005918, \"Memory in Mb\": 1.1261253356933594, \"Time in s\": 43.8642 }, { \"step\": 920, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.8347159483348575, \"RMSE\": 2.201058546919984, \"R2\": -0.5664676720057789, \"Memory in Mb\": 1.1678504943847656, \"Time in s\": 45.946931000000006 }, { \"step\": 940, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.8262269751007542, \"RMSE\": 2.1790511985851078, \"R2\": -0.5483240613297584, \"Memory in Mb\": 1.119964599609375, \"Time in s\": 48.07255500000001 }, { \"step\": 960, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.8159800926650994, \"RMSE\": 2.1571455676024542, \"R2\": -0.5332153251602936, \"Memory in Mb\": 1.1733894348144531, \"Time in s\": 50.238479000000005 }, { \"step\": 980, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.80675324923832, \"RMSE\": 2.1360469188887925, \"R2\": -0.5325676025464574, \"Memory in Mb\": 1.2283477783203125, \"Time in s\": 52.4464 }, { \"step\": 1000, \"track\": \"Regression\", \"model\": \"Adaptive Random Forest\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.801132918984696, \"RMSE\": 2.116027645482028, \"R2\": -0.5292923269414216, \"Memory in Mb\": 1.2836189270019531, \"Time in s\": 54.694244000000005 }, { \"step\": 11, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"ChickWeights\", \"MAE\": 4.664574314574316, \"RMSE\": 12.7079745317607, \"R2\": -206.87879383707747, \"Memory in Mb\": 0.0196142196655273, \"Time in s\": 0.002799 }, { \"step\": 22, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"ChickWeights\", \"MAE\": 2.767694704637076, \"RMSE\": 9.018587183866767, \"R2\": -85.14025986830408, \"Memory in Mb\": 0.0211782455444335, \"Time in s\": 0.009348 }, { \"step\": 33, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"ChickWeights\", \"MAE\": 2.3093367298127023, \"RMSE\": 7.420500566500976, \"R2\": -37.24267181629702, \"Memory in Mb\": 0.0263471603393554, \"Time in s\": 0.018276 }, { \"step\": 44, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"ChickWeights\", \"MAE\": 1.892363968348808, \"RMSE\": 6.441521936619904, \"R2\": -31.668094594906044, \"Memory in Mb\": 0.0274343490600585, \"Time in s\": 0.0297909999999999 }, { \"step\": 55, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"ChickWeights\", \"MAE\": 2.1129412159858934, \"RMSE\": 6.114058653243701, \"R2\": -6.297346571779499, \"Memory in Mb\": 0.0340337753295898, \"Time in s\": 0.044022 }, { \"step\": 66, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"ChickWeights\", \"MAE\": 2.832849782567835, \"RMSE\": 6.236602142425367, \"R2\": -2.2730130120415795, \"Memory in Mb\": 0.043257713317871, \"Time in s\": 0.061336 }, { \"step\": 77, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"ChickWeights\", \"MAE\": 3.4069290990236856, \"RMSE\": 6.402381882180361, \"R2\": -1.3118663438824, \"Memory in Mb\": 0.0494871139526367, \"Time in s\": 0.082289 }, { \"step\": 88, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"ChickWeights\", \"MAE\": 3.650377971160808, \"RMSE\": 6.321189272940957, \"R2\": -1.043267371916866, \"Memory in Mb\": 0.0551328659057617, \"Time in s\": 0.107232 }, { \"step\": 99, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"ChickWeights\", \"MAE\": 4.035631404360372, \"RMSE\": 6.4483291916176695, \"R2\": -0.7783857772357967, \"Memory in Mb\": 0.0562467575073242, \"Time in s\": 0.136317 }, { \"step\": 110, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"ChickWeights\", \"MAE\": 4.693189868957898, \"RMSE\": 7.0697740144659305, \"R2\": -0.4927792786841307, \"Memory in Mb\": 0.0576238632202148, \"Time in s\": 0.169599 }, { \"step\": 121, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"ChickWeights\", \"MAE\": 5.274396860168236, \"RMSE\": 7.6542276724395, \"R2\": -0.3476225254437259, \"Memory in Mb\": 0.0577573776245117, \"Time in s\": 0.206842 }, { \"step\": 132, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"ChickWeights\", \"MAE\": 5.247611065864998, \"RMSE\": 7.56430675955835, \"R2\": -0.0756815066101803, \"Memory in Mb\": 0.0578107833862304, \"Time in s\": 0.2481519999999999 }, { \"step\": 143, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"ChickWeights\", \"MAE\": 5.084413296044263, \"RMSE\": 7.343803904848652, \"R2\": 0.1787885014844915, \"Memory in Mb\": 0.058394432067871, \"Time in s\": 0.2937619999999999 }, { \"step\": 154, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"ChickWeights\", \"MAE\": 4.973008915037768, \"RMSE\": 7.173430375731751, \"R2\": 0.3340904988080935, \"Memory in Mb\": 0.0584478378295898, \"Time in s\": 0.343499 }, { \"step\": 165, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"ChickWeights\", \"MAE\": 5.201973475639973, \"RMSE\": 7.389818367745889, \"R2\": 0.4319135436678196, \"Memory in Mb\": 0.0584478378295898, \"Time in s\": 0.397279 }, { \"step\": 176, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"ChickWeights\", \"MAE\": 5.377897753885034, \"RMSE\": 7.538080975572278, \"R2\": 0.5233859928595415, \"Memory in Mb\": 0.0590581893920898, \"Time in s\": 0.455134 }, { \"step\": 187, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"ChickWeights\", \"MAE\": 5.414777515271245, \"RMSE\": 7.541781669769663, \"R2\": 0.6093812059493195, \"Memory in Mb\": 0.0591115951538085, \"Time in s\": 0.517245 }, { \"step\": 198, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"ChickWeights\", \"MAE\": 5.40059238519783, \"RMSE\": 7.511878220104288, \"R2\": 0.6917410630009373, \"Memory in Mb\": 0.0590581893920898, \"Time in s\": 0.583523 }, { \"step\": 209, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"ChickWeights\", \"MAE\": 5.933708937482518, \"RMSE\": 9.717098931216649, \"R2\": 0.5747798982216288, \"Memory in Mb\": 0.0252752304077148, \"Time in s\": 0.662886 }, { \"step\": 220, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"ChickWeights\", \"MAE\": 6.498767742677896, \"RMSE\": 10.515698120348512, \"R2\": 0.5640139167754625, \"Memory in Mb\": 0.0314245223999023, \"Time in s\": 0.744596 }, { \"step\": 231, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"ChickWeights\", \"MAE\": 6.70504504336628, \"RMSE\": 10.67374680573752, \"R2\": 0.6157790851383267, \"Memory in Mb\": 0.0365400314331054, \"Time in s\": 0.828906 }, { \"step\": 242, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"ChickWeights\", \"MAE\": 7.118759598962231, \"RMSE\": 11.248237924166032, \"R2\": 0.6566609789141779, \"Memory in Mb\": 0.0410718917846679, \"Time in s\": 0.916125 }, { \"step\": 253, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"ChickWeights\", \"MAE\": 7.339750662382254, \"RMSE\": 11.39871112384624, \"R2\": 0.6937739359440155, \"Memory in Mb\": 0.0445966720581054, \"Time in s\": 1.006337 }, { \"step\": 264, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"ChickWeights\", \"MAE\": 7.866457552558359, \"RMSE\": 12.301057885719082, \"R2\": 0.6741619364384577, \"Memory in Mb\": 0.0447034835815429, \"Time in s\": 1.099545 }, { \"step\": 275, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"ChickWeights\", \"MAE\": 8.421243223038738, \"RMSE\": 13.285884557144795, \"R2\": 0.6655331879845522, \"Memory in Mb\": 0.0447034835815429, \"Time in s\": 1.19574 }, { \"step\": 286, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"ChickWeights\", \"MAE\": 8.956033363560122, \"RMSE\": 14.109625220244896, \"R2\": 0.6732762786849746, \"Memory in Mb\": 0.0452070236206054, \"Time in s\": 1.295357 }, { \"step\": 297, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"ChickWeights\", \"MAE\": 9.573413802719209, \"RMSE\": 14.887232530340055, \"R2\": 0.6845415314559343, \"Memory in Mb\": 0.0452337265014648, \"Time in s\": 1.398184 }, { \"step\": 308, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"ChickWeights\", \"MAE\": 10.140823162094344, \"RMSE\": 15.798657858475249, \"R2\": 0.6835926538539661, \"Memory in Mb\": 0.0452604293823242, \"Time in s\": 1.504001 }, { \"step\": 319, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"ChickWeights\", \"MAE\": 11.05646041165176, \"RMSE\": 17.826108509419473, \"R2\": 0.6344480842540857, \"Memory in Mb\": 0.0452604293823242, \"Time in s\": 1.6128270000000002 }, { \"step\": 330, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"ChickWeights\", \"MAE\": 11.706749123156325, \"RMSE\": 18.647901518188576, \"R2\": 0.6576594092850414, \"Memory in Mb\": 0.0452871322631835, \"Time in s\": 1.7247210000000002 }, { \"step\": 341, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"ChickWeights\", \"MAE\": 11.849547188265053, \"RMSE\": 18.683751607733637, \"R2\": 0.6922705096711406, \"Memory in Mb\": 0.0452871322631835, \"Time in s\": 1.839831 }, { \"step\": 352, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"ChickWeights\", \"MAE\": 11.96088648820193, \"RMSE\": 18.74329807265456, \"R2\": 0.7184745225672177, \"Memory in Mb\": 0.0452871322631835, \"Time in s\": 1.958081 }, { \"step\": 363, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"ChickWeights\", \"MAE\": 12.783089048199372, \"RMSE\": 19.95838853158221, \"R2\": 0.6900311672733117, \"Memory in Mb\": 0.0453138351440429, \"Time in s\": 2.0793790000000003 }, { \"step\": 374, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"ChickWeights\", \"MAE\": 13.27307991721093, \"RMSE\": 20.988857849066505, \"R2\": 0.6848686892374445, \"Memory in Mb\": 0.0453405380249023, \"Time in s\": 2.203704 }, { \"step\": 385, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"ChickWeights\", \"MAE\": 13.623649100869688, \"RMSE\": 21.545378780740656, \"R2\": 0.7062071700264252, \"Memory in Mb\": 0.0453405380249023, \"Time in s\": 2.331151 }, { \"step\": 396, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"ChickWeights\", \"MAE\": 13.714864044781413, \"RMSE\": 21.4916185882578, \"R2\": 0.7274352207736796, \"Memory in Mb\": 0.0453405380249023, \"Time in s\": 2.4617500000000003 }, { \"step\": 407, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"ChickWeights\", \"MAE\": 14.57407318940339, \"RMSE\": 22.90334645043852, \"R2\": 0.712266679293069, \"Memory in Mb\": 0.0456800460815429, \"Time in s\": 2.5991990000000005 }, { \"step\": 418, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"ChickWeights\", \"MAE\": 15.311297276648313, \"RMSE\": 24.25392212062312, \"R2\": 0.6971079497894322, \"Memory in Mb\": 0.0457334518432617, \"Time in s\": 2.7396560000000005 }, { \"step\": 429, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"ChickWeights\", \"MAE\": 15.833945440380871, \"RMSE\": 25.12811892959106, \"R2\": 0.7110893860103431, \"Memory in Mb\": 0.0457334518432617, \"Time in s\": 2.8832220000000004 }, { \"step\": 440, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"ChickWeights\", \"MAE\": 15.995632485589844, \"RMSE\": 25.20571130808328, \"R2\": 0.7298778762133054, \"Memory in Mb\": 0.0456533432006835, \"Time in s\": 3.0299210000000003 }, { \"step\": 451, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"ChickWeights\", \"MAE\": 16.482571154231422, \"RMSE\": 25.77399383544894, \"R2\": 0.7295670550023294, \"Memory in Mb\": 0.0461835861206054, \"Time in s\": 3.179584 }, { \"step\": 462, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"ChickWeights\", \"MAE\": 17.556958821758087, \"RMSE\": 27.82207110996992, \"R2\": 0.7040173234911381, \"Memory in Mb\": 0.0469274520874023, \"Time in s\": 3.3322830000000003 }, { \"step\": 473, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"ChickWeights\", \"MAE\": 18.31809908164516, \"RMSE\": 29.103026344234387, \"R2\": 0.7140266770057507, \"Memory in Mb\": 0.046980857849121, \"Time in s\": 3.488055 }, { \"step\": 484, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"ChickWeights\", \"MAE\": 18.645508344467764, \"RMSE\": 29.39095020592674, \"R2\": 0.7243229903014706, \"Memory in Mb\": 0.0469541549682617, \"Time in s\": 3.646969 }, { \"step\": 495, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"ChickWeights\", \"MAE\": 19.076944683969508, \"RMSE\": 29.900823849283483, \"R2\": 0.7315970559213162, \"Memory in Mb\": 0.0469007492065429, \"Time in s\": 3.808963 }, { \"step\": 506, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"ChickWeights\", \"MAE\": 19.9412049113122, \"RMSE\": 31.299098765867257, \"R2\": 0.7144549629170223, \"Memory in Mb\": 0.046980857849121, \"Time in s\": 3.97409 }, { \"step\": 517, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"ChickWeights\", \"MAE\": 20.652539482762663, \"RMSE\": 32.28122969713156, \"R2\": 0.7133599532452177, \"Memory in Mb\": 0.0470075607299804, \"Time in s\": 4.142383000000001 }, { \"step\": 528, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"ChickWeights\", \"MAE\": 21.437431132207102, \"RMSE\": 33.471760575953454, \"R2\": 0.7229272102715563, \"Memory in Mb\": 0.0470075607299804, \"Time in s\": 4.313758000000001 }, { \"step\": 539, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"ChickWeights\", \"MAE\": 21.589008276225865, \"RMSE\": 33.459905509370415, \"R2\": 0.7345566785536845, \"Memory in Mb\": 0.0470075607299804, \"Time in s\": 4.488246000000001 }, { \"step\": 550, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"ChickWeights\", \"MAE\": 22.551700866868885, \"RMSE\": 35.03737693702089, \"R2\": 0.717908278090669, \"Memory in Mb\": 0.0470342636108398, \"Time in s\": 4.665796000000001 }, { \"step\": 561, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"ChickWeights\", \"MAE\": 23.243872726229487, \"RMSE\": 35.949191367533466, \"R2\": 0.7160216409608307, \"Memory in Mb\": 0.0470075607299804, \"Time in s\": 4.846464000000001 }, { \"step\": 572, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"ChickWeights\", \"MAE\": 24.092513885911806, \"RMSE\": 37.13693189688246, \"R2\": 0.7196752558485364, \"Memory in Mb\": 0.0469541549682617, \"Time in s\": 5.030284000000001 }, { \"step\": 20, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.695184981652336, \"RMSE\": 9.807184976514188, \"R2\": -224.6021011118197, \"Memory in Mb\": 0.0538091659545898, \"Time in s\": 0.005953 }, { \"step\": 40, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.3994713447037435, \"RMSE\": 7.102066178895935, \"R2\": -19.27845129783118, \"Memory in Mb\": 0.0761518478393554, \"Time in s\": 0.016156 }, { \"step\": 60, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.8170744682035584, \"RMSE\": 5.815253847056423, \"R2\": -17.329373299766118, \"Memory in Mb\": 0.0883970260620117, \"Time in s\": 0.0302489999999999 }, { \"step\": 80, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.604995404573344, \"RMSE\": 5.081770494168446, \"R2\": -13.040545957103586, \"Memory in Mb\": 0.0980443954467773, \"Time in s\": 0.0484799999999999 }, { \"step\": 100, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.824259078948539, \"RMSE\": 4.70488333223354, \"R2\": -6.5512954222403845, \"Memory in Mb\": 0.1071348190307617, \"Time in s\": 0.071134 }, { \"step\": 120, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.918744608116588, \"RMSE\": 4.412336880489357, \"R2\": -4.634185300646759, \"Memory in Mb\": 0.1113233566284179, \"Time in s\": 0.098322 }, { \"step\": 140, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.8761207739327503, \"RMSE\": 4.13187920011476, \"R2\": -4.105616799680584, \"Memory in Mb\": 0.1133375167846679, \"Time in s\": 0.13009 }, { \"step\": 160, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.961232939518506, \"RMSE\": 3.976173487274506, \"R2\": -3.1695661963674864, \"Memory in Mb\": 0.1174459457397461, \"Time in s\": 0.166507 }, { \"step\": 180, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.066134597500757, \"RMSE\": 3.873731518767916, \"R2\": -2.4756944369169624, \"Memory in Mb\": 0.1194601058959961, \"Time in s\": 0.207686 }, { \"step\": 200, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.051125997923389, \"RMSE\": 3.731810291394655, \"R2\": -2.23527456693896, \"Memory in Mb\": 0.0176219940185546, \"Time in s\": 0.262943 }, { \"step\": 220, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.0738811328897206, \"RMSE\": 4.417664564856108, \"R2\": -3.890594467356201, \"Memory in Mb\": 0.0358037948608398, \"Time in s\": 0.32065 }, { \"step\": 240, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.9726100065438288, \"RMSE\": 4.237524240975239, \"R2\": -3.5337340888030546, \"Memory in Mb\": 0.0415029525756835, \"Time in s\": 0.38109 }, { \"step\": 260, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.8594315384151243, \"RMSE\": 4.074751007989252, \"R2\": -3.248610147038553, \"Memory in Mb\": 0.0488462448120117, \"Time in s\": 0.444412 }, { \"step\": 280, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.7773205119132678, \"RMSE\": 3.936654153117972, \"R2\": -3.1518424972300867, \"Memory in Mb\": 0.0637922286987304, \"Time in s\": 0.510957 }, { \"step\": 300, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.8265705896173516, \"RMSE\": 3.8591002097544127, \"R2\": -2.923813511442849, \"Memory in Mb\": 0.0735006332397461, \"Time in s\": 0.581026 }, { \"step\": 320, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.7437620649419607, \"RMSE\": 3.7394874649640353, \"R2\": -2.8667745903740336, \"Memory in Mb\": 0.0810804367065429, \"Time in s\": 0.6546050000000001 }, { \"step\": 340, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.7029951846067328, \"RMSE\": 3.640753753244776, \"R2\": -2.873305462857122, \"Memory in Mb\": 0.0861959457397461, \"Time in s\": 0.7321480000000001 }, { \"step\": 360, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.691125588823449, \"RMSE\": 3.557868621003357, \"R2\": -2.729329365262769, \"Memory in Mb\": 0.0937833786010742, \"Time in s\": 0.8137190000000001 }, { \"step\": 380, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.641476039217788, \"RMSE\": 3.4678199943963026, \"R2\": -2.665503107324644, \"Memory in Mb\": 0.0988988876342773, \"Time in s\": 0.8994270000000001 }, { \"step\": 400, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.6260112424669562, \"RMSE\": 3.3952504336469187, \"R2\": -2.613000890937967, \"Memory in Mb\": 0.1061162948608398, \"Time in s\": 0.989373 }, { \"step\": 420, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.6289201270983786, \"RMSE\": 3.3343146523246907, \"R2\": -2.599793842225358, \"Memory in Mb\": 0.1101179122924804, \"Time in s\": 1.08339 }, { \"step\": 440, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.667060123646852, \"RMSE\": 3.302206999442347, \"R2\": -2.466911261860751, \"Memory in Mb\": 0.1157636642456054, \"Time in s\": 1.1816330000000002 }, { \"step\": 460, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.69667104754334, \"RMSE\": 3.2720484626443, \"R2\": -2.2278892819413008, \"Memory in Mb\": 0.1238470077514648, \"Time in s\": 1.2846220000000002 }, { \"step\": 480, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.6506098779434175, \"RMSE\": 3.2067821053781245, \"R2\": -2.029074572324324, \"Memory in Mb\": 0.1315069198608398, \"Time in s\": 1.3924660000000002 }, { \"step\": 500, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.6365240614669594, \"RMSE\": 3.1603547309397078, \"R2\": -1.8802784791951508, \"Memory in Mb\": 0.0784368515014648, \"Time in s\": 1.505205 }, { \"step\": 520, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.6536721067944389, \"RMSE\": 3.126560253923372, \"R2\": -1.823930193625598, \"Memory in Mb\": 0.0835790634155273, \"Time in s\": 1.621512 }, { \"step\": 540, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.6698160029512246, \"RMSE\": 3.0946441969309766, \"R2\": -1.7564325082786318, \"Memory in Mb\": 0.0873861312866211, \"Time in s\": 1.745676 }, { \"step\": 560, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.6408841434358417, \"RMSE\": 3.046586581366264, \"R2\": -1.735141389893172, \"Memory in Mb\": 0.0885534286499023, \"Time in s\": 1.8735 }, { \"step\": 580, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.6127327645791831, \"RMSE\": 2.999611374258061, \"R2\": -1.7170225021123482, \"Memory in Mb\": 0.0890569686889648, \"Time in s\": 2.004994 }, { \"step\": 600, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.6269498006919805, \"RMSE\": 2.973082395326553, \"R2\": -1.6396488808638732, \"Memory in Mb\": 0.0909147262573242, \"Time in s\": 2.140229 }, { \"step\": 620, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.64112955570108, \"RMSE\": 2.949075530135499, \"R2\": -1.5576036781852802, \"Memory in Mb\": 0.0923452377319336, \"Time in s\": 2.27922 }, { \"step\": 640, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.6562657927450175, \"RMSE\": 2.9273758724267736, \"R2\": -1.4729982020585646, \"Memory in Mb\": 0.0944395065307617, \"Time in s\": 2.422009 }, { \"step\": 660, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.6610090165740414, \"RMSE\": 2.900076441293188, \"R2\": -1.409637238697782, \"Memory in Mb\": 0.0960302352905273, \"Time in s\": 2.568661 }, { \"step\": 680, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.640070345532056, \"RMSE\": 2.8623424740678667, \"R2\": -1.3844340745604549, \"Memory in Mb\": 0.0981245040893554, \"Time in s\": 2.7190639999999995 }, { \"step\": 700, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.6119603204138224, \"RMSE\": 2.8240252200668348, \"R2\": -1.381983091116742, \"Memory in Mb\": 0.1015691757202148, \"Time in s\": 2.8733429999999998 }, { \"step\": 720, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.589173412563986, \"RMSE\": 2.788316481605285, \"R2\": -1.3731423466582644, \"Memory in Mb\": 0.1027364730834961, \"Time in s\": 3.031541 }, { \"step\": 740, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.5872474989945902, \"RMSE\": 2.762320631839069, \"R2\": -1.3276973292362433, \"Memory in Mb\": 0.1038503646850586, \"Time in s\": 3.198810999999999 }, { \"step\": 760, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.573860293324891, \"RMSE\": 2.731605449949154, \"R2\": -1.3008801881813965, \"Memory in Mb\": 0.1038503646850586, \"Time in s\": 3.3700799999999997 }, { \"step\": 780, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.5672492734296428, \"RMSE\": 2.7047187411026274, \"R2\": -1.2659143323804294, \"Memory in Mb\": 0.1064214706420898, \"Time in s\": 3.5453349999999992 }, { \"step\": 800, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.5527653312522924, \"RMSE\": 2.676901954415756, \"R2\": -1.2396196471003753, \"Memory in Mb\": 0.1070318222045898, \"Time in s\": 3.7247 }, { \"step\": 820, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.5366430278321572, \"RMSE\": 2.648967131435787, \"R2\": -1.2172052322327516, \"Memory in Mb\": 0.1085958480834961, \"Time in s\": 3.908095 }, { \"step\": 840, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.5234128351461855, \"RMSE\": 2.622526466573217, \"R2\": -1.1933402061449063, \"Memory in Mb\": 0.1101598739624023, \"Time in s\": 4.095543999999999 }, { \"step\": 860, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.520997799745444, \"RMSE\": 2.601206078568585, \"R2\": -1.1541054380753062, \"Memory in Mb\": 0.1107702255249023, \"Time in s\": 4.287108999999999 }, { \"step\": 880, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.4988763963538276, \"RMSE\": 2.573388458013685, \"R2\": -1.0978034673380694, \"Memory in Mb\": 0.1112470626831054, \"Time in s\": 4.482816999999999 }, { \"step\": 900, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.4758663089418147, \"RMSE\": 2.54594713286987, \"R2\": -1.061955243276925, \"Memory in Mb\": 0.1116437911987304, \"Time in s\": 4.682644999999999 }, { \"step\": 920, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.462937696730923, \"RMSE\": 2.523979038782575, \"R2\": -1.059822201667401, \"Memory in Mb\": 0.1116704940795898, \"Time in s\": 4.886850999999999 }, { \"step\": 940, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.4561394845136584, \"RMSE\": 2.50519739840106, \"R2\": -1.0464959899330828, \"Memory in Mb\": 0.1127042770385742, \"Time in s\": 5.100838999999999 }, { \"step\": 960, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.4393535919618172, \"RMSE\": 2.483254475687026, \"R2\": -1.0318268701719249, \"Memory in Mb\": 0.1132078170776367, \"Time in s\": 5.318975999999998 }, { \"step\": 980, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.4209594300543067, \"RMSE\": 2.4596960058574417, \"R2\": -1.0321742156649796, \"Memory in Mb\": 0.1138181686401367, \"Time in s\": 5.541285999999999 }, { \"step\": 1000, \"track\": \"Regression\", \"model\": \"Adaptive Model Rules\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.4020445673510784, \"RMSE\": 2.4364355463770164, \"R2\": -1.027485181852556, \"Memory in Mb\": 0.1144285202026367, \"Time in s\": 5.767785999999998 }, { \"step\": 11, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ChickWeights\", \"MAE\": 4.674710287324511, \"RMSE\": 12.709622005759083, \"R2\": -206.93269654300337, \"Memory in Mb\": 0.1438665390014648, \"Time in s\": 0.043578 }, { \"step\": 22, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ChickWeights\", \"MAE\": 2.741934273684416, \"RMSE\": 9.017856101646904, \"R2\": -85.12629469646626, \"Memory in Mb\": 0.1680784225463867, \"Time in s\": 0.114891 }, { \"step\": 33, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ChickWeights\", \"MAE\": 2.321314094852809, \"RMSE\": 7.424021720293775, \"R2\": -37.27897402435965, \"Memory in Mb\": 0.2096052169799804, \"Time in s\": 0.217395 }, { \"step\": 44, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ChickWeights\", \"MAE\": 1.9425031371298072, \"RMSE\": 6.446443185481759, \"R2\": -31.71802976156788, \"Memory in Mb\": 0.2417478561401367, \"Time in s\": 0.352059 }, { \"step\": 55, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ChickWeights\", \"MAE\": 2.220127898780405, \"RMSE\": 6.120501061993398, \"R2\": -6.312733162160137, \"Memory in Mb\": 0.3060827255249023, \"Time in s\": 0.516621 }, { \"step\": 66, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ChickWeights\", \"MAE\": 2.329752126186388, \"RMSE\": 5.733717860182345, \"R2\": -1.7664593315707076, \"Memory in Mb\": 0.3567266464233398, \"Time in s\": 0.719974 }, { \"step\": 77, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ChickWeights\", \"MAE\": 2.702798931003846, \"RMSE\": 5.8295610878248265, \"R2\": -0.9166874006339528, \"Memory in Mb\": 0.3732900619506836, \"Time in s\": 0.96062 }, { \"step\": 88, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ChickWeights\", \"MAE\": 2.6099619817757915, \"RMSE\": 5.526618942218035, \"R2\": -0.5618763668879856, \"Memory in Mb\": 0.4128637313842773, \"Time in s\": 1.239929 }, { \"step\": 99, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ChickWeights\", \"MAE\": 2.746820956366501, \"RMSE\": 5.433915350818854, \"R2\": -0.2628661224764999, \"Memory in Mb\": 0.4623746871948242, \"Time in s\": 1.563224 }, { \"step\": 110, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ChickWeights\", \"MAE\": 2.8880448046427323, \"RMSE\": 5.393209741308231, \"R2\": 0.131281330475993, \"Memory in Mb\": 0.5318593978881836, \"Time in s\": 1.933351 }, { \"step\": 121, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ChickWeights\", \"MAE\": 3.102793618966865, \"RMSE\": 5.4667968998241765, \"R2\": 0.312565398150165, \"Memory in Mb\": 0.5604543685913086, \"Time in s\": 2.356966 }, { \"step\": 132, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ChickWeights\", \"MAE\": 3.301393875206229, \"RMSE\": 5.777687225912497, \"R2\": 0.3724425463943938, \"Memory in Mb\": 0.1946859359741211, \"Time in s\": 2.834547 }, { \"step\": 143, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ChickWeights\", \"MAE\": 3.2818761969220303, \"RMSE\": 5.651296511520541, \"R2\": 0.5136946280960023, \"Memory in Mb\": 0.2288389205932617, \"Time in s\": 3.343438 }, { \"step\": 154, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ChickWeights\", \"MAE\": 3.2515588511167266, \"RMSE\": 5.538190901897615, \"R2\": 0.6030852117170243, \"Memory in Mb\": 0.2577199935913086, \"Time in s\": 3.884102 }, { \"step\": 165, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ChickWeights\", \"MAE\": 3.5490619378471253, \"RMSE\": 5.901472315915889, \"R2\": 0.6377005660204649, \"Memory in Mb\": 0.2627325057983398, \"Time in s\": 4.460716 }, { \"step\": 176, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ChickWeights\", \"MAE\": 3.748099204740288, \"RMSE\": 6.114989482363781, \"R2\": 0.6863562530103127, \"Memory in Mb\": 0.2941198348999023, \"Time in s\": 5.073099 }, { \"step\": 187, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ChickWeights\", \"MAE\": 3.855321751115436, \"RMSE\": 6.172883744230479, \"R2\": 0.7383134393857906, \"Memory in Mb\": 0.3320951461791992, \"Time in s\": 5.716163 }, { \"step\": 198, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ChickWeights\", \"MAE\": 4.064611732985858, \"RMSE\": 6.634154210053631, \"R2\": 0.7595694090278171, \"Memory in Mb\": 0.2527418136596679, \"Time in s\": 6.400463 }, { \"step\": 209, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ChickWeights\", \"MAE\": 4.2854283677061895, \"RMSE\": 7.034301545827039, \"R2\": 0.7771654635449772, \"Memory in Mb\": 0.3202161788940429, \"Time in s\": 7.116785 }, { \"step\": 220, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ChickWeights\", \"MAE\": 4.87812017832838, \"RMSE\": 8.464168223538273, \"R2\": 0.7175347810295998, \"Memory in Mb\": 0.3510808944702148, \"Time in s\": 7.873189 }, { \"step\": 231, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ChickWeights\", \"MAE\": 5.139510271881338, \"RMSE\": 8.709209118107985, \"R2\": 0.7441975817879227, \"Memory in Mb\": 0.3200139999389648, \"Time in s\": 8.671972 }, { \"step\": 242, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ChickWeights\", \"MAE\": 5.512568989147682, \"RMSE\": 9.214318878962654, \"R2\": 0.7696009669773758, \"Memory in Mb\": 0.3710927963256836, \"Time in s\": 9.508629 }, { \"step\": 253, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ChickWeights\", \"MAE\": 5.61462831814645, \"RMSE\": 9.300369065456374, \"R2\": 0.7961404678287027, \"Memory in Mb\": 0.4192609786987304, \"Time in s\": 10.381303 }, { \"step\": 264, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ChickWeights\", \"MAE\": 6.307820522062941, \"RMSE\": 10.632794713133398, \"R2\": 0.7565488951511344, \"Memory in Mb\": 0.3416013717651367, \"Time in s\": 11.288641 }, { \"step\": 275, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ChickWeights\", \"MAE\": 6.788015101176612, \"RMSE\": 11.834244259749068, \"R2\": 0.7346291964126817, \"Memory in Mb\": 0.3569021224975586, \"Time in s\": 12.231102 }, { \"step\": 286, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ChickWeights\", \"MAE\": 7.083325494446596, \"RMSE\": 12.279756566760542, \"R2\": 0.7525262008661281, \"Memory in Mb\": 0.3965520858764648, \"Time in s\": 13.20812 }, { \"step\": 297, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ChickWeights\", \"MAE\": 7.191320030958258, \"RMSE\": 12.343414248948324, \"R2\": 0.7831373025283351, \"Memory in Mb\": 0.4258260726928711, \"Time in s\": 14.218151 }, { \"step\": 308, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ChickWeights\", \"MAE\": 7.797209174725968, \"RMSE\": 13.278742843330225, \"R2\": 0.7764780945249113, \"Memory in Mb\": 0.4501142501831054, \"Time in s\": 15.260696 }, { \"step\": 319, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ChickWeights\", \"MAE\": 8.817827198046418, \"RMSE\": 15.9696940597815, \"R2\": 0.7066209046531168, \"Memory in Mb\": 0.4718656539916992, \"Time in s\": 16.339578 }, { \"step\": 330, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ChickWeights\", \"MAE\": 9.357743221083844, \"RMSE\": 16.830542617885925, \"R2\": 0.7211345584533546, \"Memory in Mb\": 0.3236379623413086, \"Time in s\": 17.459623999999998 }, { \"step\": 341, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ChickWeights\", \"MAE\": 9.597227793355117, \"RMSE\": 16.978620563243194, \"R2\": 0.7458759585671038, \"Memory in Mb\": 0.3676939010620117, \"Time in s\": 18.612921 }, { \"step\": 352, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ChickWeights\", \"MAE\": 9.7373515968392, \"RMSE\": 17.046818432657442, \"R2\": 0.7671306392320572, \"Memory in Mb\": 0.3629522323608398, \"Time in s\": 19.795535999999995 }, { \"step\": 363, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ChickWeights\", \"MAE\": 10.474784553852622, \"RMSE\": 18.420082712462506, \"R2\": 0.7359718490826586, \"Memory in Mb\": 0.3571195602416992, \"Time in s\": 21.012742 }, { \"step\": 374, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ChickWeights\", \"MAE\": 11.136965297216438, \"RMSE\": 20.001694167276444, \"R2\": 0.7138145773791849, \"Memory in Mb\": 0.3998785018920898, \"Time in s\": 22.263945 }, { \"step\": 385, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ChickWeights\", \"MAE\": 11.663755213802826, \"RMSE\": 20.72310945995512, \"R2\": 0.7282041838356077, \"Memory in Mb\": 0.3288450241088867, \"Time in s\": 23.553922 }, { \"step\": 396, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ChickWeights\", \"MAE\": 11.794892571100942, \"RMSE\": 20.71536814309967, \"R2\": 0.7467690419180344, \"Memory in Mb\": 0.4024953842163086, \"Time in s\": 24.874665 }, { \"step\": 407, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ChickWeights\", \"MAE\": 12.7361976076631, \"RMSE\": 22.396840643020628, \"R2\": 0.7248523596518419, \"Memory in Mb\": 0.4475545883178711, \"Time in s\": 26.248019 }, { \"step\": 418, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ChickWeights\", \"MAE\": 13.669165628181592, \"RMSE\": 24.17119440887557, \"R2\": 0.6991706950778176, \"Memory in Mb\": 0.4968290328979492, \"Time in s\": 27.657996 }, { \"step\": 429, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ChickWeights\", \"MAE\": 14.21091951782044, \"RMSE\": 24.907688885339496, \"R2\": 0.7161359437094021, \"Memory in Mb\": 0.5376424789428711, \"Time in s\": 29.100111 }, { \"step\": 440, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ChickWeights\", \"MAE\": 14.453337266265764, \"RMSE\": 25.002813963201465, \"R2\": 0.7342091543116155, \"Memory in Mb\": 0.5657072067260742, \"Time in s\": 30.577019 }, { \"step\": 451, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ChickWeights\", \"MAE\": 14.795462221424788, \"RMSE\": 25.396591749368834, \"R2\": 0.7374288341656263, \"Memory in Mb\": 0.2583265304565429, \"Time in s\": 32.092868 }, { \"step\": 462, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ChickWeights\", \"MAE\": 16.121556592117344, \"RMSE\": 28.147488646444906, \"R2\": 0.6970529793362255, \"Memory in Mb\": 0.3047628402709961, \"Time in s\": 33.634783000000006 }, { \"step\": 473, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ChickWeights\", \"MAE\": 16.919755395139642, \"RMSE\": 29.31210430009691, \"R2\": 0.7099030173412568, \"Memory in Mb\": 0.3544912338256836, \"Time in s\": 35.20760200000001 }, { \"step\": 484, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ChickWeights\", \"MAE\": 17.222944705236912, \"RMSE\": 29.519121223412064, \"R2\": 0.7219133474565596, \"Memory in Mb\": 0.3983259201049804, \"Time in s\": 36.808400000000006 }, { \"step\": 495, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ChickWeights\", \"MAE\": 17.875632407347712, \"RMSE\": 30.34841895302658, \"R2\": 0.7235012910446976, \"Memory in Mb\": 0.4324254989624023, \"Time in s\": 38.43633400000001 }, { \"step\": 506, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ChickWeights\", \"MAE\": 18.888217233083253, \"RMSE\": 31.835585198521954, \"R2\": 0.7045822227904736, \"Memory in Mb\": 0.4678411483764648, \"Time in s\": 40.09290200000001 }, { \"step\": 517, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ChickWeights\", \"MAE\": 19.824673890348247, \"RMSE\": 33.232962754926376, \"R2\": 0.6962090391160322, \"Memory in Mb\": 0.4975500106811523, \"Time in s\": 41.78457100000001 }, { \"step\": 528, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ChickWeights\", \"MAE\": 20.55396214529221, \"RMSE\": 34.09704280994323, \"R2\": 0.712478586733055, \"Memory in Mb\": 0.5351285934448242, \"Time in s\": 43.50851800000001 }, { \"step\": 539, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ChickWeights\", \"MAE\": 20.794926737672743, \"RMSE\": 34.209729391662115, \"R2\": 0.7225264054992676, \"Memory in Mb\": 0.576685905456543, \"Time in s\": 45.26814300000001 }, { \"step\": 550, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ChickWeights\", \"MAE\": 21.840113545070352, \"RMSE\": 36.13121679398973, \"R2\": 0.7000199686285788, \"Memory in Mb\": 0.4753904342651367, \"Time in s\": 47.07007900000001 }, { \"step\": 561, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ChickWeights\", \"MAE\": 22.59682566601873, \"RMSE\": 37.08324430393829, \"R2\": 0.6978222813826447, \"Memory in Mb\": 0.5189352035522461, \"Time in s\": 48.90757000000001 }, { \"step\": 572, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"ChickWeights\", \"MAE\": 23.516206925607506, \"RMSE\": 38.207190389030345, \"R2\": 0.703284935102584, \"Memory in Mb\": 0.5585355758666992, \"Time in s\": 50.782863000000006 }, { \"step\": 20, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.677140920600926, \"RMSE\": 9.804891856735376, \"R2\": -224.4966127051096, \"Memory in Mb\": 0.2373647689819336, \"Time in s\": 0.10297 }, { \"step\": 40, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.227777813220112, \"RMSE\": 7.083306817310631, \"R2\": -19.171465983096805, \"Memory in Mb\": 0.3270711898803711, \"Time in s\": 0.289755 }, { \"step\": 60, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.615301860635012, \"RMSE\": 5.7908261762165685, \"R2\": -17.175707266102673, \"Memory in Mb\": 0.3493108749389648, \"Time in s\": 0.561259 }, { \"step\": 80, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.3541232617236425, \"RMSE\": 5.026566774725167, \"R2\": -12.73715546617699, \"Memory in Mb\": 0.3968191146850586, \"Time in s\": 0.918423 }, { \"step\": 100, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.2177336486572592, \"RMSE\": 4.516233376106315, \"R2\": -5.957872973758095, \"Memory in Mb\": 0.4107885360717773, \"Time in s\": 1.362708 }, { \"step\": 120, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.1154353306650455, \"RMSE\": 4.135716354504269, \"R2\": -3.949887064372274, \"Memory in Mb\": 0.4562673568725586, \"Time in s\": 1.881933 }, { \"step\": 140, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.2694693870086102, \"RMSE\": 4.857381406328817, \"R2\": -6.05598089151511, \"Memory in Mb\": 0.2026891708374023, \"Time in s\": 2.479905 }, { \"step\": 160, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.197729670480587, \"RMSE\": 4.553187715474859, \"R2\": -4.467532002416268, \"Memory in Mb\": 0.3215742111206054, \"Time in s\": 3.138645 }, { \"step\": 180, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.1382354235034091, \"RMSE\": 4.302111654125532, \"R2\": -3.2869252353070264, \"Memory in Mb\": 0.4017667770385742, \"Time in s\": 3.869636 }, { \"step\": 200, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.0619299314905082, \"RMSE\": 4.083716699099621, \"R2\": -2.874210730330613, \"Memory in Mb\": 0.4772901535034179, \"Time in s\": 4.674132999999999 }, { \"step\": 220, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.009898171671029, \"RMSE\": 3.898715068147509, \"R2\": -2.8090719143731837, \"Memory in Mb\": 0.5200605392456055, \"Time in s\": 5.552957999999999 }, { \"step\": 240, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.9646445055554522, \"RMSE\": 3.736185930434801, \"R2\": -2.5244277699140394, \"Memory in Mb\": 0.590418815612793, \"Time in s\": 6.501358999999999 }, { \"step\": 260, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.9172332240820484, \"RMSE\": 3.59133014967386, \"R2\": -2.300314999970906, \"Memory in Mb\": 0.677699089050293, \"Time in s\": 7.522147999999999 }, { \"step\": 280, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.8713372213927624, \"RMSE\": 3.461793231901636, \"R2\": -2.2106176891844487, \"Memory in Mb\": 0.7459287643432617, \"Time in s\": 8.615362 }, { \"step\": 300, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.8589405406556339, \"RMSE\": 3.350216865329824, \"R2\": -1.9572094631572516, \"Memory in Mb\": 0.8300580978393555, \"Time in s\": 9.791068 }, { \"step\": 320, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.8332253596869799, \"RMSE\": 3.2462964384589745, \"R2\": -1.91407713381932, \"Memory in Mb\": 0.8134641647338867, \"Time in s\": 11.051061 }, { \"step\": 340, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.8452804825423809, \"RMSE\": 3.1781241500612394, \"R2\": -1.9514868559348704, \"Memory in Mb\": 0.7938528060913086, \"Time in s\": 12.398121 }, { \"step\": 360, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.8633895166787324, \"RMSE\": 3.1040171399305283, \"R2\": -1.8385669899509445, \"Memory in Mb\": 0.8660383224487305, \"Time in s\": 13.839894 }, { \"step\": 380, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.8513159306308952, \"RMSE\": 3.0258511220091995, \"R2\": -1.790715802132521, \"Memory in Mb\": 0.930495262145996, \"Time in s\": 15.372487 }, { \"step\": 400, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.8510161564157176, \"RMSE\": 2.9641796059686003, \"R2\": -1.7538068241726803, \"Memory in Mb\": 0.9046812057495116, \"Time in s\": 17.004842 }, { \"step\": 420, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.8518447067444684, \"RMSE\": 2.91289276041272, \"R2\": -1.747346648183163, \"Memory in Mb\": 0.2984609603881836, \"Time in s\": 18.737382 }, { \"step\": 440, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.8395925714721912, \"RMSE\": 2.8496340177457244, \"R2\": -1.5817388594042834, \"Memory in Mb\": 0.3453207015991211, \"Time in s\": 20.534513 }, { \"step\": 460, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.8276812048623222, \"RMSE\": 2.790693875069675, \"R2\": -1.3480296319058032, \"Memory in Mb\": 0.3807516098022461, \"Time in s\": 22.405168 }, { \"step\": 480, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.8089244758990292, \"RMSE\": 2.7333778802045163, \"R2\": -1.2007484949677454, \"Memory in Mb\": 0.4482488632202148, \"Time in s\": 24.346508 }, { \"step\": 500, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.7928829832001318, \"RMSE\": 2.679994202833863, \"R2\": -1.0712404739849797, \"Memory in Mb\": 0.4975194931030273, \"Time in s\": 26.364916 }, { \"step\": 520, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.7968627360462042, \"RMSE\": 2.655906961990266, \"R2\": -1.037727311679443, \"Memory in Mb\": 0.3753881454467773, \"Time in s\": 28.465481 }, { \"step\": 540, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.7742648164612971, \"RMSE\": 2.606576025481956, \"R2\": -0.9555400217068246, \"Memory in Mb\": 0.4063673019409179, \"Time in s\": 30.635854 }, { \"step\": 560, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.7686196547282114, \"RMSE\": 2.5632583130611786, \"R2\": -0.9361432250468024, \"Memory in Mb\": 0.4374494552612304, \"Time in s\": 32.873421 }, { \"step\": 580, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.7604339230100455, \"RMSE\": 2.522208925126442, \"R2\": -0.9209911676265596, \"Memory in Mb\": 0.4967718124389648, \"Time in s\": 35.174013 }, { \"step\": 600, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.7505205215384793, \"RMSE\": 2.4816305596178174, \"R2\": -0.839105014250833, \"Memory in Mb\": 0.569575309753418, \"Time in s\": 37.541938 }, { \"step\": 620, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.7415649894603582, \"RMSE\": 2.4434454055931187, \"R2\": -0.7557664113175795, \"Memory in Mb\": 0.6584272384643555, \"Time in s\": 39.976025 }, { \"step\": 640, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.7318408439896782, \"RMSE\": 2.406886195682516, \"R2\": -0.6717754169622376, \"Memory in Mb\": 0.7120962142944336, \"Time in s\": 42.479937 }, { \"step\": 660, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.7165688958291918, \"RMSE\": 2.37061113672817, \"R2\": -0.6101020988109156, \"Memory in Mb\": 0.8089780807495117, \"Time in s\": 45.058017 }, { \"step\": 680, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.7019759043195819, \"RMSE\": 2.336022363731008, \"R2\": -0.5881668313132071, \"Memory in Mb\": 0.8398160934448242, \"Time in s\": 47.720249 }, { \"step\": 700, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.7046161229979929, \"RMSE\": 2.307765546298159, \"R2\": -0.5906876272832315, \"Memory in Mb\": 0.9275884628295898, \"Time in s\": 50.471651 }, { \"step\": 720, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.699791115281057, \"RMSE\": 2.2802792890719745, \"R2\": -0.5871418479715558, \"Memory in Mb\": 0.9087285995483398, \"Time in s\": 53.310919 }, { \"step\": 740, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.6993154625867034, \"RMSE\": 2.2532668970074776, \"R2\": -0.5488294820654547, \"Memory in Mb\": 0.8719320297241211, \"Time in s\": 56.236366 }, { \"step\": 760, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.6904813287783395, \"RMSE\": 2.224703478329614, \"R2\": -0.5261679577762028, \"Memory in Mb\": 0.929518699645996, \"Time in s\": 59.248631 }, { \"step\": 780, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.6809571474431378, \"RMSE\": 2.1968956754751456, \"R2\": -0.4949206250779803, \"Memory in Mb\": 1.0446271896362305, \"Time in s\": 62.34821899999999 }, { \"step\": 800, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.6801944660295238, \"RMSE\": 2.1726014141558028, \"R2\": -0.4752630155166446, \"Memory in Mb\": 1.1031560897827148, \"Time in s\": 65.54039399999999 }, { \"step\": 820, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.6800251543540311, \"RMSE\": 2.1538267014914845, \"R2\": -0.4657984869085023, \"Memory in Mb\": 1.0328702926635742, \"Time in s\": 68.82165799999999 }, { \"step\": 840, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.6756860923792902, \"RMSE\": 2.1342936501586, \"R2\": -0.4526954783446735, \"Memory in Mb\": 0.7475957870483398, \"Time in s\": 72.18837399999998 }, { \"step\": 860, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.6733560632024954, \"RMSE\": 2.111426905397114, \"R2\": -0.4192847599850244, \"Memory in Mb\": 0.8119535446166992, \"Time in s\": 75.62699399999998 }, { \"step\": 880, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.6699617254268444, \"RMSE\": 2.0889651464935617, \"R2\": -0.3823451381863831, \"Memory in Mb\": 0.8655576705932617, \"Time in s\": 79.14156799999998 }, { \"step\": 900, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.6641763033522209, \"RMSE\": 2.070781028281848, \"R2\": -0.3641082228169974, \"Memory in Mb\": 0.8467855453491211, \"Time in s\": 82.73512999999998 }, { \"step\": 920, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.6581948551182502, \"RMSE\": 2.04921868376074, \"R2\": -0.3577970506072152, \"Memory in Mb\": 0.9400205612182616, \"Time in s\": 86.40810799999998 }, { \"step\": 940, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.6518145768911946, \"RMSE\": 2.0285221107553344, \"R2\": -0.3417959776408932, \"Memory in Mb\": 0.8168668746948242, \"Time in s\": 90.15720199999998 }, { \"step\": 960, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.6459384626205039, \"RMSE\": 2.0083339154310043, \"R2\": -0.328972794111009, \"Memory in Mb\": 0.9120321273803712, \"Time in s\": 93.98330699999998 }, { \"step\": 980, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.6434185155449816, \"RMSE\": 1.989534246313576, \"R2\": -0.3295384550375804, \"Memory in Mb\": 0.9782476425170898, \"Time in s\": 97.888458 }, { \"step\": 1000, \"track\": \"Regression\", \"model\": \"Streaming Random Patches\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.6405613133684143, \"RMSE\": 1.9713432953350207, \"R2\": -0.3273099653285773, \"Memory in Mb\": 1.0593442916870115, \"Time in s\": 101.872577 }, { \"step\": 11, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"ChickWeights\", \"MAE\": 10.961585696594296, \"RMSE\": 17.742218537059934, \"R2\": -404.203665453531, \"Memory in Mb\": 0.1782550811767578, \"Time in s\": 0.00837 }, { \"step\": 22, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"ChickWeights\", \"MAE\": 5.889263372306967, \"RMSE\": 12.56756036562654, \"R2\": -166.2750319368382, \"Memory in Mb\": 0.1910533905029297, \"Time in s\": 0.024461 }, { \"step\": 33, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"ChickWeights\", \"MAE\": 4.385855416063054, \"RMSE\": 10.300574999811516, \"R2\": -72.68935558725912, \"Memory in Mb\": 0.2280941009521484, \"Time in s\": 0.04853 }, { \"step\": 44, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"ChickWeights\", \"MAE\": 3.447994945519213, \"RMSE\": 8.931729546653537, \"R2\": -61.80843214992921, \"Memory in Mb\": 0.2438068389892578, \"Time in s\": 0.081553 }, { \"step\": 55, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"ChickWeights\", \"MAE\": 3.35253400433443, \"RMSE\": 8.24824849102831, \"R2\": -12.280953122644297, \"Memory in Mb\": 0.2916851043701172, \"Time in s\": 0.123768 }, { \"step\": 66, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"ChickWeights\", \"MAE\": 3.890626377385135, \"RMSE\": 8.046318086318358, \"R2\": -4.448112248710658, \"Memory in Mb\": 0.3612346649169922, \"Time in s\": 0.176552 }, { \"step\": 77, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"ChickWeights\", \"MAE\": 4.3386080839549335, \"RMSE\": 7.968508799771546, \"R2\": -2.581242138287016, \"Memory in Mb\": 0.4169635772705078, \"Time in s\": 0.2426829999999999 }, { \"step\": 88, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"ChickWeights\", \"MAE\": 4.490657725713863, \"RMSE\": 7.741140850532905, \"R2\": -2.064344233357061, \"Memory in Mb\": 0.4634113311767578, \"Time in s\": 0.322916 }, { \"step\": 99, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"ChickWeights\", \"MAE\": 4.7848487383098295, \"RMSE\": 7.706963997319479, \"R2\": -1.5403773740733393, \"Memory in Mb\": 0.4844684600830078, \"Time in s\": 0.418203 }, { \"step\": 110, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"ChickWeights\", \"MAE\": 4.819508170706054, \"RMSE\": 7.525876668153812, \"R2\": -0.6916040970280823, \"Memory in Mb\": 0.5139484405517578, \"Time in s\": 0.528794 }, { \"step\": 121, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"ChickWeights\", \"MAE\": 4.831868630589585, \"RMSE\": 7.4067264415397105, \"R2\": -0.261880180240535, \"Memory in Mb\": 0.5271091461181641, \"Time in s\": 0.654758 }, { \"step\": 132, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"ChickWeights\", \"MAE\": 4.689813959013141, \"RMSE\": 7.185628463196342, \"R2\": 0.0293225226637304, \"Memory in Mb\": 0.5355319976806641, \"Time in s\": 0.796727 }, { \"step\": 143, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"ChickWeights\", \"MAE\": 4.575735548989308, \"RMSE\": 6.988996035323699, \"R2\": 0.2562234762018912, \"Memory in Mb\": 0.5445156097412109, \"Time in s\": 0.954849 }, { \"step\": 154, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"ChickWeights\", \"MAE\": 4.5047654468276, \"RMSE\": 6.840884466051986, \"R2\": 0.3943998861813385, \"Memory in Mb\": 0.5490932464599609, \"Time in s\": 1.128949 }, { \"step\": 165, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"ChickWeights\", \"MAE\": 4.788922390491115, \"RMSE\": 7.112527411058041, \"R2\": 0.4737467226899117, \"Memory in Mb\": 0.5519008636474609, \"Time in s\": 1.318602 }, { \"step\": 176, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"ChickWeights\", \"MAE\": 4.970710142849908, \"RMSE\": 7.252656159084941, \"R2\": 0.5587960603089545, \"Memory in Mb\": 0.5291376113891602, \"Time in s\": 1.527133 }, { \"step\": 187, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"ChickWeights\", \"MAE\": 5.027942129925975, \"RMSE\": 7.276897995624471, \"R2\": 0.6363381045701173, \"Memory in Mb\": 0.5012483596801758, \"Time in s\": 1.7549830000000002 }, { \"step\": 198, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"ChickWeights\", \"MAE\": 5.049038224539579, \"RMSE\": 7.28413559977408, \"R2\": 0.7101491068659838, \"Memory in Mb\": 0.3717927932739258, \"Time in s\": 2.012416 }, { \"step\": 209, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"ChickWeights\", \"MAE\": 5.250417000807892, \"RMSE\": 7.635375364934655, \"R2\": 0.7374564682191251, \"Memory in Mb\": 0.3473329544067383, \"Time in s\": 2.287127 }, { \"step\": 220, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"ChickWeights\", \"MAE\": 5.8380227010176045, \"RMSE\": 8.722095838276593, \"R2\": 0.7000574251839753, \"Memory in Mb\": 0.3545808792114258, \"Time in s\": 2.575553 }, { \"step\": 231, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"ChickWeights\", \"MAE\": 6.044258003678201, \"RMSE\": 8.960072203538655, \"R2\": 0.7292489015049135, \"Memory in Mb\": 0.391514778137207, \"Time in s\": 2.875072 }, { \"step\": 242, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"ChickWeights\", \"MAE\": 6.276524828158362, \"RMSE\": 9.354066864400572, \"R2\": 0.7625593259413487, \"Memory in Mb\": 0.4183511734008789, \"Time in s\": 3.187566 }, { \"step\": 253, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"ChickWeights\", \"MAE\": 6.3739207187481774, \"RMSE\": 9.482128758285446, \"R2\": 0.7880944389945109, \"Memory in Mb\": 0.4399347305297851, \"Time in s\": 3.512786 }, { \"step\": 264, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"ChickWeights\", \"MAE\": 6.979732945669537, \"RMSE\": 10.70081988088543, \"R2\": 0.7534238884125567, \"Memory in Mb\": 0.4525842666625976, \"Time in s\": 3.851311 }, { \"step\": 275, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"ChickWeights\", \"MAE\": 7.440978636214291, \"RMSE\": 11.778455343325094, \"R2\": 0.7371253175053303, \"Memory in Mb\": 0.4610300064086914, \"Time in s\": 4.203753 }, { \"step\": 286, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"ChickWeights\", \"MAE\": 7.722695321089189, \"RMSE\": 12.251182899165162, \"R2\": 0.7536765505554787, \"Memory in Mb\": 0.4700403213500976, \"Time in s\": 4.570129 }, { \"step\": 297, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"ChickWeights\", \"MAE\": 7.706764271589622, \"RMSE\": 12.242915074252831, \"R2\": 0.7866542868859879, \"Memory in Mb\": 0.4733209609985351, \"Time in s\": 4.950063 }, { \"step\": 308, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"ChickWeights\", \"MAE\": 8.246398153738868, \"RMSE\": 13.148346181255995, \"R2\": 0.7808464902384371, \"Memory in Mb\": 0.4770059585571289, \"Time in s\": 5.342659 }, { \"step\": 319, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"ChickWeights\", \"MAE\": 9.3474227240372, \"RMSE\": 15.837966045238383, \"R2\": 0.7114408913609181, \"Memory in Mb\": 0.4802255630493164, \"Time in s\": 5.749268000000001 }, { \"step\": 330, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"ChickWeights\", \"MAE\": 9.81720525198044, \"RMSE\": 16.4984558080814, \"R2\": 0.732030690326682, \"Memory in Mb\": 0.4911470413208008, \"Time in s\": 6.171356000000001 }, { \"step\": 341, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"ChickWeights\", \"MAE\": 10.029686613445753, \"RMSE\": 16.618597428793866, \"R2\": 0.7565388422982208, \"Memory in Mb\": 0.496312141418457, \"Time in s\": 6.607662000000001 }, { \"step\": 352, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"ChickWeights\", \"MAE\": 10.183424974922596, \"RMSE\": 16.72954607175143, \"R2\": 0.7757182197717174, \"Memory in Mb\": 0.5041093826293945, \"Time in s\": 7.058783000000001 }, { \"step\": 363, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"ChickWeights\", \"MAE\": 11.087226456936444, \"RMSE\": 18.183293641227465, \"R2\": 0.7427163510168195, \"Memory in Mb\": 0.5065813064575195, \"Time in s\": 7.524030000000001 }, { \"step\": 374, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"ChickWeights\", \"MAE\": 11.611208342142504, \"RMSE\": 19.353368352464965, \"R2\": 0.7320664683002076, \"Memory in Mb\": 0.4931306838989258, \"Time in s\": 8.007171000000001 }, { \"step\": 385, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"ChickWeights\", \"MAE\": 12.01727910935522, \"RMSE\": 20.0211446068419, \"R2\": 0.7463056879958478, \"Memory in Mb\": 0.4681062698364258, \"Time in s\": 8.510702000000002 }, { \"step\": 396, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"ChickWeights\", \"MAE\": 12.181393533243163, \"RMSE\": 20.04691533881536, \"R2\": 0.7628481050767555, \"Memory in Mb\": 0.4353647232055664, \"Time in s\": 9.035917 }, { \"step\": 407, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"ChickWeights\", \"MAE\": 13.09061742270854, \"RMSE\": 21.647454224788547, \"R2\": 0.7429569103775002, \"Memory in Mb\": 0.4253015518188476, \"Time in s\": 9.583169000000002 }, { \"step\": 418, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"ChickWeights\", \"MAE\": 13.857583790923206, \"RMSE\": 23.134723844753264, \"R2\": 0.7244169153448246, \"Memory in Mb\": 0.4488153457641601, \"Time in s\": 10.147004 }, { \"step\": 429, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"ChickWeights\", \"MAE\": 14.415812868423222, \"RMSE\": 24.06086703427245, \"R2\": 0.7351096813998451, \"Memory in Mb\": 0.4820413589477539, \"Time in s\": 10.724926 }, { \"step\": 440, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"ChickWeights\", \"MAE\": 14.633210713365676, \"RMSE\": 24.14605091357361, \"R2\": 0.7521125932778163, \"Memory in Mb\": 0.5092172622680664, \"Time in s\": 11.318068 }, { \"step\": 451, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"ChickWeights\", \"MAE\": 15.133020029766048, \"RMSE\": 24.73830229788057, \"R2\": 0.7508643131645836, \"Memory in Mb\": 0.5367746353149414, \"Time in s\": 11.926425 }, { \"step\": 462, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"ChickWeights\", \"MAE\": 16.308165732303497, \"RMSE\": 27.095114472384648, \"R2\": 0.7192825818530082, \"Memory in Mb\": 0.5905351638793945, \"Time in s\": 12.55391 }, { \"step\": 473, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"ChickWeights\", \"MAE\": 17.097647362788752, \"RMSE\": 28.42746345262044, \"R2\": 0.7271490710401121, \"Memory in Mb\": 0.6096200942993164, \"Time in s\": 13.198999 }, { \"step\": 484, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"ChickWeights\", \"MAE\": 17.403233741388874, \"RMSE\": 28.60630060223112, \"R2\": 0.7388459944901757, \"Memory in Mb\": 0.6277017593383789, \"Time in s\": 13.862263 }, { \"step\": 495, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"ChickWeights\", \"MAE\": 17.845914876737442, \"RMSE\": 29.12415998011611, \"R2\": 0.7453593218363636, \"Memory in Mb\": 0.6518182754516602, \"Time in s\": 14.54413 }, { \"step\": 506, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"ChickWeights\", \"MAE\": 18.74953553887996, \"RMSE\": 30.62874049978988, \"R2\": 0.7265554777237875, \"Memory in Mb\": 0.6593713760375977, \"Time in s\": 15.245298000000002 }, { \"step\": 517, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"ChickWeights\", \"MAE\": 19.47487310339954, \"RMSE\": 31.618292560451803, \"R2\": 0.7250121198400961, \"Memory in Mb\": 0.6093225479125977, \"Time in s\": 15.967903000000002 }, { \"step\": 528, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"ChickWeights\", \"MAE\": 20.25384975920148, \"RMSE\": 32.7838221306768, \"R2\": 0.7341994138392889, \"Memory in Mb\": 0.6206216812133789, \"Time in s\": 16.707851 }, { \"step\": 539, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"ChickWeights\", \"MAE\": 20.43401222967888, \"RMSE\": 32.81136182644564, \"R2\": 0.7447469767293837, \"Memory in Mb\": 0.6340532302856445, \"Time in s\": 17.46759 }, { \"step\": 550, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"ChickWeights\", \"MAE\": 21.48271816680656, \"RMSE\": 34.486522216355176, \"R2\": 0.7267085961280999, \"Memory in Mb\": 0.6409807205200195, \"Time in s\": 18.246304 }, { \"step\": 561, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"ChickWeights\", \"MAE\": 22.214948014440758, \"RMSE\": 35.44362766111904, \"R2\": 0.7239528137855742, \"Memory in Mb\": 0.6374177932739258, \"Time in s\": 19.046772 }, { \"step\": 572, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"ChickWeights\", \"MAE\": 23.05953013824058, \"RMSE\": 36.58622235196899, \"R2\": 0.7279275728793118, \"Memory in Mb\": 0.6435747146606445, \"Time in s\": 19.865794 }, { \"step\": 20, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"TrumpApproval\", \"MAE\": 6.585839365171216, \"RMSE\": 13.881516159864546, \"R2\": -450.9893611410228, \"Memory in Mb\": 0.4141368865966797, \"Time in s\": 0.017776 }, { \"step\": 40, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"TrumpApproval\", \"MAE\": 4.363712565198605, \"RMSE\": 9.938671227284791, \"R2\": -38.712022235644, \"Memory in Mb\": 0.5953998565673828, \"Time in s\": 0.05778 }, { \"step\": 60, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"TrumpApproval\", \"MAE\": 3.1246200328296143, \"RMSE\": 8.126568674182735, \"R2\": -34.79519075888522, \"Memory in Mb\": 0.7211894989013672, \"Time in s\": 0.123528 }, { \"step\": 80, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.5854221987839483, \"RMSE\": 7.070456887280416, \"R2\": -26.179962781345687, \"Memory in Mb\": 0.8254451751708984, \"Time in s\": 0.217362 }, { \"step\": 100, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.612795341566278, \"RMSE\": 6.441452396131832, \"R2\": -13.15439617190778, \"Memory in Mb\": 0.9287128448486328, \"Time in s\": 0.340609 }, { \"step\": 120, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.5650159676967723, \"RMSE\": 5.964319967024998, \"R2\": -9.294746752112973, \"Memory in Mb\": 0.9687213897705078, \"Time in s\": 0.493962 }, { \"step\": 140, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.4246087892523565, \"RMSE\": 5.555151474129083, \"R2\": -8.228790661275799, \"Memory in Mb\": 0.9892597198486328, \"Time in s\": 0.677921 }, { \"step\": 160, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.2372944440918734, \"RMSE\": 5.2113826313001965, \"R2\": -6.162524907472281, \"Memory in Mb\": 1.0282306671142578, \"Time in s\": 0.8947890000000001 }, { \"step\": 180, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.1466345389477697, \"RMSE\": 4.9537003451084844, \"R2\": -4.683842281929501, \"Memory in Mb\": 0.9880685806274414, \"Time in s\": 1.151076 }, { \"step\": 200, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.9892950861944356, \"RMSE\": 4.704683592711833, \"R2\": -4.14200943628533, \"Memory in Mb\": 0.5611734390258789, \"Time in s\": 1.477073 }, { \"step\": 220, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.8962186057938932, \"RMSE\": 4.499134208858052, \"R2\": -4.072640393025983, \"Memory in Mb\": 0.3967218399047851, \"Time in s\": 1.841739 }, { \"step\": 240, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.803503053095204, \"RMSE\": 4.314740779926486, \"R2\": -3.700467691841136, \"Memory in Mb\": 0.4504899978637695, \"Time in s\": 2.225753 }, { \"step\": 260, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.7080100607704198, \"RMSE\": 4.14999374436193, \"R2\": -3.406965132392963, \"Memory in Mb\": 0.5133523941040039, \"Time in s\": 2.631091 }, { \"step\": 280, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.6655932342441322, \"RMSE\": 4.017842470146439, \"R2\": -3.324861014485008, \"Memory in Mb\": 0.6190156936645508, \"Time in s\": 3.0590589999999995 }, { \"step\": 300, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.6385421968346507, \"RMSE\": 3.904974440220909, \"R2\": -3.01765496812087, \"Memory in Mb\": 0.7099161148071289, \"Time in s\": 3.510991 }, { \"step\": 320, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.5504197008133211, \"RMSE\": 3.781681667850968, \"R2\": -2.954527764504537, \"Memory in Mb\": 0.777043342590332, \"Time in s\": 3.987728 }, { \"step\": 340, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.5047182142648992, \"RMSE\": 3.6774767295200856, \"R2\": -2.9518368180177696, \"Memory in Mb\": 0.8162164688110352, \"Time in s\": 4.49124 }, { \"step\": 360, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.5167434089128324, \"RMSE\": 3.599404249538014, \"R2\": -2.816912258630976, \"Memory in Mb\": 0.8943338394165039, \"Time in s\": 5.022537 }, { \"step\": 380, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.47690532831993, \"RMSE\": 3.5095784857397434, \"R2\": -2.754312484791544, \"Memory in Mb\": 0.9456682205200196, \"Time in s\": 5.582067 }, { \"step\": 400, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.4473785178172314, \"RMSE\": 3.4281100357630656, \"R2\": -2.683273334840131, \"Memory in Mb\": 1.0103578567504885, \"Time in s\": 6.172723 }, { \"step\": 420, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.40955398667131, \"RMSE\": 3.3497394826483737, \"R2\": -2.633176800681623, \"Memory in Mb\": 0.9896020889282228, \"Time in s\": 6.801780999999999 }, { \"step\": 440, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.3766290707405415, \"RMSE\": 3.2780180874471507, \"R2\": -2.4163065189839363, \"Memory in Mb\": 1.0508508682250977, \"Time in s\": 7.462708999999999 }, { \"step\": 460, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.3473843223583597, \"RMSE\": 3.2105206765417766, \"R2\": -2.1076358108912725, \"Memory in Mb\": 1.1290063858032229, \"Time in s\": 8.158304 }, { \"step\": 480, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.3146601146346069, \"RMSE\": 3.145698486393554, \"R2\": -1.914776430829721, \"Memory in Mb\": 1.2007226943969729, \"Time in s\": 8.887782 }, { \"step\": 500, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.2829933656636, \"RMSE\": 3.084835387092109, \"R2\": -1.7442697801193798, \"Memory in Mb\": 1.2468271255493164, \"Time in s\": 9.6533 }, { \"step\": 520, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.253253159364902, \"RMSE\": 3.027195393916086, \"R2\": -1.647288417049146, \"Memory in Mb\": 1.2976083755493164, \"Time in s\": 10.456286 }, { \"step\": 540, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.221748892460797, \"RMSE\": 2.9719574458877616, \"R2\": -1.5422080467663055, \"Memory in Mb\": 1.2989130020141602, \"Time in s\": 11.302161 }, { \"step\": 560, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.2084725532442826, \"RMSE\": 2.924408360153415, \"R2\": -1.5201637785773734, \"Memory in Mb\": 1.2362565994262695, \"Time in s\": 12.194852 }, { \"step\": 580, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.1798260089354875, \"RMSE\": 2.875143807222344, \"R2\": -1.496217338554359, \"Memory in Mb\": 1.0948266983032229, \"Time in s\": 13.14623 }, { \"step\": 600, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.1697523000107772, \"RMSE\": 2.832108990371036, \"R2\": -1.3952574337441268, \"Memory in Mb\": 0.9711389541625975, \"Time in s\": 14.161435 }, { \"step\": 620, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.1550049446613664, \"RMSE\": 2.791430677298195, \"R2\": -1.29147514335963, \"Memory in Mb\": 0.9871377944946288, \"Time in s\": 15.218796 }, { \"step\": 640, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.1314798119108302, \"RMSE\": 2.748614104980716, \"R2\": -1.180190104825623, \"Memory in Mb\": 0.955540657043457, \"Time in s\": 16.316332 }, { \"step\": 660, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.1121913646065007, \"RMSE\": 2.708986615305391, \"R2\": -1.102550782410015, \"Memory in Mb\": 0.9793291091918944, \"Time in s\": 17.449759 }, { \"step\": 680, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.096472283083769, \"RMSE\": 2.6715401075955145, \"R2\": -1.0771388394715324, \"Memory in Mb\": 1.0325212478637695, \"Time in s\": 18.614745 }, { \"step\": 700, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.0884657542544798, \"RMSE\": 2.639348967980305, \"R2\": -1.080631470653255, \"Memory in Mb\": 1.1148195266723633, \"Time in s\": 19.813024 }, { \"step\": 720, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.0667544669913536, \"RMSE\": 2.603003384812668, \"R2\": -1.0681837571806945, \"Memory in Mb\": 1.1698732376098633, \"Time in s\": 21.046603 }, { \"step\": 740, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.0459321867530782, \"RMSE\": 2.5682868281355016, \"R2\": -1.0121733037883445, \"Memory in Mb\": 1.2092561721801758, \"Time in s\": 22.315315 }, { \"step\": 760, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.0276781933457848, \"RMSE\": 2.5353029000371494, \"R2\": -0.9820644398878616, \"Memory in Mb\": 1.2420778274536133, \"Time in s\": 23.623046 }, { \"step\": 780, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.0094276125302497, \"RMSE\": 2.50338787783782, \"R2\": -0.9411341749539492, \"Memory in Mb\": 1.3017473220825195, \"Time in s\": 24.968597 }, { \"step\": 800, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.0023537746213038, \"RMSE\": 2.475512975868657, \"R2\": -0.9153129864924484, \"Memory in Mb\": 1.3318758010864258, \"Time in s\": 26.357821 }, { \"step\": 820, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.9977235614586972, \"RMSE\": 2.450813157375169, \"R2\": -0.8978992844128302, \"Memory in Mb\": 1.3786516189575195, \"Time in s\": 27.784762 }, { \"step\": 840, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.9889333901170724, \"RMSE\": 2.4246168260512984, \"R2\": -0.8747893145143344, \"Memory in Mb\": 1.416356086730957, \"Time in s\": 29.251496 }, { \"step\": 860, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.9712423333697714, \"RMSE\": 2.3967734443047704, \"R2\": -0.8288218633529623, \"Memory in Mb\": 1.4480867385864258, \"Time in s\": 30.757563999999995 }, { \"step\": 880, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.9593965092028914, \"RMSE\": 2.3708401769846086, \"R2\": -0.7805683851951253, \"Memory in Mb\": 1.4688615798950195, \"Time in s\": 32.305297 }, { \"step\": 900, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.9500350341868696, \"RMSE\": 2.346029725301664, \"R2\": -0.7508441641708208, \"Memory in Mb\": 1.3691072463989258, \"Time in s\": 33.916341 }, { \"step\": 920, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.9362562616709212, \"RMSE\": 2.321054413049932, \"R2\": -0.7419227486050888, \"Memory in Mb\": 1.2137422561645508, \"Time in s\": 35.592854 }, { \"step\": 940, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.9293426247327622, \"RMSE\": 2.2986373047272863, \"R2\": -0.7229310397545319, \"Memory in Mb\": 1.2394838333129885, \"Time in s\": 37.31016399999999 }, { \"step\": 960, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.9223382441623138, \"RMSE\": 2.2770208564096435, \"R2\": -0.7083555493245846, \"Memory in Mb\": 1.2827730178833008, \"Time in s\": 39.06327099999999 }, { \"step\": 980, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.9155630176835056, \"RMSE\": 2.256277668055956, \"R2\": -0.7099489887647161, \"Memory in Mb\": 1.2986268997192385, \"Time in s\": 40.85878199999999 }, { \"step\": 1000, \"track\": \"Regression\", \"model\": \"Bagging\", \"dataset\": \"TrumpApproval\", \"MAE\": 0.9044154317407224, \"RMSE\": 2.234829366963466, \"R2\": -0.7058332444386559, \"Memory in Mb\": 1.3350114822387695, \"Time in s\": 42.69044199999999 }, { \"step\": 11, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"ChickWeights\", \"MAE\": 41.63636363636363, \"RMSE\": 41.64569169030137, \"R2\": -2231.5319148936137, \"Memory in Mb\": 0.0524826049804687, \"Time in s\": 0.006254 }, { \"step\": 22, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"ChickWeights\", \"MAE\": 41.31818181818181, \"RMSE\": 41.32960638133835, \"R2\": -1808.0547045951903, \"Memory in Mb\": 0.0595359802246093, \"Time in s\": 0.016967 }, { \"step\": 33, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"ChickWeights\", \"MAE\": 41.12121212121212, \"RMSE\": 41.13871582091424, \"R2\": -1174.393494897962, \"Memory in Mb\": 0.0739974975585937, \"Time in s\": 0.032538 }, { \"step\": 44, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"ChickWeights\", \"MAE\": 41.159090909090914, \"RMSE\": 41.17451771534076, \"R2\": -1333.7620984139928, \"Memory in Mb\": 0.0802268981933593, \"Time in s\": 0.0535149999999999 }, { \"step\": 55, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"ChickWeights\", \"MAE\": 41.5090909090909, \"RMSE\": 41.57075020645253, \"R2\": -336.3506066081568, \"Memory in Mb\": 0.0978660583496093, \"Time in s\": 0.0804889999999999 }, { \"step\": 66, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"ChickWeights\", \"MAE\": 42.681818181818166, \"RMSE\": 42.82080349691271, \"R2\": -153.29834830483878, \"Memory in Mb\": 0.121429443359375, \"Time in s\": 0.1147379999999999 }, { \"step\": 77, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"ChickWeights\", \"MAE\": 43.50649350649351, \"RMSE\": 43.70978671356627, \"R2\": -106.75487995129542, \"Memory in Mb\": 0.1384239196777343, \"Time in s\": 0.1574789999999999 }, { \"step\": 88, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"ChickWeights\", \"MAE\": 44.21590909090909, \"RMSE\": 44.43649707984724, \"R2\": -99.97346126163, \"Memory in Mb\": 0.1540603637695312, \"Time in s\": 0.209356 }, { \"step\": 99, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"ChickWeights\", \"MAE\": 45.05050505050505, \"RMSE\": 45.309262771858165, \"R2\": -86.8022342468144, \"Memory in Mb\": 0.1602935791015625, \"Time in s\": 0.271082 }, { \"step\": 110, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"ChickWeights\", \"MAE\": 46.16363636363636, \"RMSE\": 46.52487115902242, \"R2\": -63.64797006437341, \"Memory in Mb\": 0.164306640625, \"Time in s\": 0.3431619999999999 }, { \"step\": 121, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"ChickWeights\", \"MAE\": 47.21487603305785, \"RMSE\": 47.67304278378361, \"R2\": -51.27707184490422, \"Memory in Mb\": 0.1650009155273437, \"Time in s\": 0.4251659999999999 }, { \"step\": 132, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"ChickWeights\", \"MAE\": 48.29545454545455, \"RMSE\": 48.843054157105485, \"R2\": -43.84882422437649, \"Memory in Mb\": 0.165863037109375, \"Time in s\": 0.5168919999999999 }, { \"step\": 143, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"ChickWeights\", \"MAE\": 49.44055944055945, \"RMSE\": 50.100318941519305, \"R2\": -37.220279564063546, \"Memory in Mb\": 0.1322584152221679, \"Time in s\": 0.622087 }, { \"step\": 154, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"ChickWeights\", \"MAE\": 50.532467532467535, \"RMSE\": 51.29137544271156, \"R2\": -33.04474826644667, \"Memory in Mb\": 0.1404676437377929, \"Time in s\": 0.736412 }, { \"step\": 165, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"ChickWeights\", \"MAE\": 51.690909090909095, \"RMSE\": 52.61253451297311, \"R2\": -27.795548438273773, \"Memory in Mb\": 0.1464834213256836, \"Time in s\": 0.8597999999999999 }, { \"step\": 176, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"ChickWeights\", \"MAE\": 53.00568181818182, \"RMSE\": 54.11860921749895, \"R2\": -23.566226925646237, \"Memory in Mb\": 0.1528844833374023, \"Time in s\": 0.99286 }, { \"step\": 187, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"ChickWeights\", \"MAE\": 54.41176470588235, \"RMSE\": 55.733754017636336, \"R2\": -20.33250305682894, \"Memory in Mb\": 0.1579313278198242, \"Time in s\": 1.135584 }, { \"step\": 198, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"ChickWeights\", \"MAE\": 56.02525252525252, \"RMSE\": 57.635786091488654, \"R2\": -17.146924852486976, \"Memory in Mb\": 0.1600141525268554, \"Time in s\": 1.287864 }, { \"step\": 209, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"ChickWeights\", \"MAE\": 57.5645933014354, \"RMSE\": 59.46206220864915, \"R2\": -14.922837840066968, \"Memory in Mb\": 0.1255407333374023, \"Time in s\": 1.454924 }, { \"step\": 220, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"ChickWeights\", \"MAE\": 58.69090909090908, \"RMSE\": 60.81327606250582, \"R2\": -13.581197962556498, \"Memory in Mb\": 0.1323118209838867, \"Time in s\": 1.63074 }, { \"step\": 231, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"ChickWeights\", \"MAE\": 60.25541125541125, \"RMSE\": 62.66764529032318, \"R2\": -12.244451024360147, \"Memory in Mb\": 0.1370038986206054, \"Time in s\": 1.815352 }, { \"step\": 242, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"ChickWeights\", \"MAE\": 62.17355371900826, \"RMSE\": 65.06963847478845, \"R2\": -10.489760184397111, \"Memory in Mb\": 0.1415891647338867, \"Time in s\": 2.009009 }, { \"step\": 253, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"ChickWeights\", \"MAE\": 63.93675889328063, \"RMSE\": 67.17295239601157, \"R2\": -9.634560128382748, \"Memory in Mb\": 0.1452207565307617, \"Time in s\": 2.211595 }, { \"step\": 264, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"ChickWeights\", \"MAE\": 65.10606060606062, \"RMSE\": 68.57980310513724, \"R2\": -9.127665748505592, \"Memory in Mb\": 0.1458845138549804, \"Time in s\": 2.423132 }, { \"step\": 275, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"ChickWeights\", \"MAE\": 66.61454545454548, \"RMSE\": 70.46451073219248, \"R2\": -8.408339126213217, \"Memory in Mb\": 0.1459379196166992, \"Time in s\": 2.643805 }, { \"step\": 286, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"ChickWeights\", \"MAE\": 68.48951048951052, \"RMSE\": 72.8020594498525, \"R2\": -7.6983532427125105, \"Memory in Mb\": 0.1282644271850586, \"Time in s\": 2.877713 }, { \"step\": 297, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"ChickWeights\", \"MAE\": 70.55218855218858, \"RMSE\": 75.3669362796119, \"R2\": -7.08492451355157, \"Memory in Mb\": 0.1300420761108398, \"Time in s\": 3.120317 }, { \"step\": 308, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"ChickWeights\", \"MAE\": 72.39285714285718, \"RMSE\": 77.65033596401675, \"R2\": -6.643510181414674, \"Memory in Mb\": 0.1343069076538086, \"Time in s\": 3.37158 }, { \"step\": 319, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"ChickWeights\", \"MAE\": 73.45454545454551, \"RMSE\": 79.15086186624424, \"R2\": -6.206879640065647, \"Memory in Mb\": 0.1407041549682617, \"Time in s\": 3.631653 }, { \"step\": 330, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"ChickWeights\", \"MAE\": 75.77878787878792, \"RMSE\": 82.20832738177494, \"R2\": -5.653192449779911, \"Memory in Mb\": 0.1449460983276367, \"Time in s\": 3.900744 }, { \"step\": 341, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"ChickWeights\", \"MAE\": 77.92375366568919, \"RMSE\": 84.89106353805269, \"R2\": -5.352795814687307, \"Memory in Mb\": 0.1466054916381836, \"Time in s\": 4.1790140000000005 }, { \"step\": 352, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"ChickWeights\", \"MAE\": 80.04545454545458, \"RMSE\": 87.49376601169416, \"R2\": -5.134510311668016, \"Memory in Mb\": 0.1466588973999023, \"Time in s\": 4.46626 }, { \"step\": 363, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"ChickWeights\", \"MAE\": 80.99724517906337, \"RMSE\": 88.57562798692558, \"R2\": -5.105139086016474, \"Memory in Mb\": 0.1461553573608398, \"Time in s\": 4.762509 }, { \"step\": 374, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"ChickWeights\", \"MAE\": 82.77807486631018, \"RMSE\": 90.83029071422122, \"R2\": -4.901675845817959, \"Memory in Mb\": 0.1569280624389648, \"Time in s\": 5.06867 }, { \"step\": 385, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"ChickWeights\", \"MAE\": 85.1766233766234, \"RMSE\": 93.99517810235533, \"R2\": -4.591702735915359, \"Memory in Mb\": 0.1629590988159179, \"Time in s\": 5.384646 }, { \"step\": 396, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"ChickWeights\", \"MAE\": 87.26767676767678, \"RMSE\": 96.48964983485284, \"R2\": -4.494054297851511, \"Memory in Mb\": 0.1698560714721679, \"Time in s\": 5.710516 }, { \"step\": 407, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"ChickWeights\", \"MAE\": 89.00737100737103, \"RMSE\": 98.71879502607636, \"R2\": -4.345544683073043, \"Memory in Mb\": 0.1741170883178711, \"Time in s\": 6.048375 }, { \"step\": 418, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"ChickWeights\", \"MAE\": 90.57416267942588, \"RMSE\": 100.72635724110243, \"R2\": -4.224084264201084, \"Memory in Mb\": 0.1756696701049804, \"Time in s\": 6.396311 }, { \"step\": 429, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"ChickWeights\", \"MAE\": 93.12121212121215, \"RMSE\": 104.19735398794236, \"R2\": -3.967717840349581, \"Memory in Mb\": 0.1753263473510742, \"Time in s\": 6.754458 }, { \"step\": 440, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"ChickWeights\", \"MAE\": 95.41818181818184, \"RMSE\": 107.03565676064125, \"R2\": -3.8710119659250095, \"Memory in Mb\": 0.1580381393432617, \"Time in s\": 7.124987 }, { \"step\": 451, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"ChickWeights\", \"MAE\": 97.16629711751663, \"RMSE\": 109.07665280092142, \"R2\": -3.843505105397095, \"Memory in Mb\": 0.1671514511108398, \"Time in s\": 7.505263 }, { \"step\": 462, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"ChickWeights\", \"MAE\": 98.71645021645024, \"RMSE\": 111.1763643167196, \"R2\": -3.72620239405422, \"Memory in Mb\": 0.1737470626831054, \"Time in s\": 7.89547 }, { \"step\": 473, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"ChickWeights\", \"MAE\": 101.54122621564484, \"RMSE\": 115.2058457378686, \"R2\": -3.48124047566686, \"Memory in Mb\": 0.1785993576049804, \"Time in s\": 8.295795 }, { \"step\": 484, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"ChickWeights\", \"MAE\": 103.77066115702482, \"RMSE\": 117.90601559037044, \"R2\": -3.4365483842712585, \"Memory in Mb\": 0.1823682785034179, \"Time in s\": 8.706368 }, { \"step\": 495, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"ChickWeights\", \"MAE\": 106.02424242424244, \"RMSE\": 120.71525892518191, \"R2\": -3.37467008920777, \"Memory in Mb\": 0.1661062240600586, \"Time in s\": 9.13027 }, { \"step\": 506, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"ChickWeights\", \"MAE\": 107.31620553359684, \"RMSE\": 122.26004165941237, \"R2\": -3.356924458603192, \"Memory in Mb\": 0.1709508895874023, \"Time in s\": 9.564033 }, { \"step\": 517, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"ChickWeights\", \"MAE\": 109.39651837524178, \"RMSE\": 124.91233289427784, \"R2\": -3.291877964737682, \"Memory in Mb\": 0.1725950241088867, \"Time in s\": 10.007821 }, { \"step\": 528, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"ChickWeights\", \"MAE\": 112.36553030303028, \"RMSE\": 129.1106745698386, \"R2\": -3.1225038051323804, \"Memory in Mb\": 0.1781835556030273, \"Time in s\": 10.461944 }, { \"step\": 539, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"ChickWeights\", \"MAE\": 114.52504638218922, \"RMSE\": 131.65752925403248, \"R2\": -3.109734667916423, \"Memory in Mb\": 0.1829481124877929, \"Time in s\": 10.926196 }, { \"step\": 550, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"ChickWeights\", \"MAE\": 115.89999999999996, \"RMSE\": 133.35909826820617, \"R2\": -3.0866973064470367, \"Memory in Mb\": 0.1825780868530273, \"Time in s\": 11.400602999999998 }, { \"step\": 561, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"ChickWeights\", \"MAE\": 117.86452762923346, \"RMSE\": 135.8046463151548, \"R2\": -3.0526234314410727, \"Memory in Mb\": 0.1822462081909179, \"Time in s\": 11.885435 }, { \"step\": 572, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"ChickWeights\", \"MAE\": 120.54020979020974, \"RMSE\": 139.4624607986965, \"R2\": -2.953338846956928, \"Memory in Mb\": 0.1833868026733398, \"Time in s\": 12.380554 }, { \"step\": 20, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"TrumpApproval\", \"MAE\": 43.8732195, \"RMSE\": 43.87807788634269, \"R2\": -4514.954899312423, \"Memory in Mb\": 0.1279449462890625, \"Time in s\": 0.015909 }, { \"step\": 40, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"TrumpApproval\", \"MAE\": 42.4932955, \"RMSE\": 42.52255283421693, \"R2\": -725.9491167623446, \"Memory in Mb\": 0.1855659484863281, \"Time in s\": 0.053644 }, { \"step\": 60, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"TrumpApproval\", \"MAE\": 42.2167785, \"RMSE\": 42.2386240157387, \"R2\": -966.0073736019044, \"Memory in Mb\": 0.2224998474121093, \"Time in s\": 0.118601 }, { \"step\": 80, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"TrumpApproval\", \"MAE\": 41.975705625, \"RMSE\": 41.99760868559829, \"R2\": -957.9655948743646, \"Memory in Mb\": 0.2547683715820312, \"Time in s\": 0.215522 }, { \"step\": 100, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"TrumpApproval\", \"MAE\": 41.37550450000001, \"RMSE\": 41.410913785433536, \"R2\": -583.9966399141301, \"Memory in Mb\": 0.2853622436523437, \"Time in s\": 0.349036 }, { \"step\": 120, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"TrumpApproval\", \"MAE\": 40.936110000000006, \"RMSE\": 40.97829382197767, \"R2\": -484.9611418859003, \"Memory in Mb\": 0.2942886352539062, \"Time in s\": 0.520816 }, { \"step\": 140, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"TrumpApproval\", \"MAE\": 40.6885472857143, \"RMSE\": 40.72961738075088, \"R2\": -495.1050461477588, \"Memory in Mb\": 0.2989387512207031, \"Time in s\": 0.713599 }, { \"step\": 160, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"TrumpApproval\", \"MAE\": 40.35105437500001, \"RMSE\": 40.39801158334292, \"R2\": -429.4078677932073, \"Memory in Mb\": 0.2328748703002929, \"Time in s\": 0.934381 }, { \"step\": 180, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"TrumpApproval\", \"MAE\": 40.00981655555555, \"RMSE\": 40.06373388340122, \"R2\": -370.7794659133543, \"Memory in Mb\": 0.2508001327514648, \"Time in s\": 1.175315 }, { \"step\": 200, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"TrumpApproval\", \"MAE\": 39.80633095, \"RMSE\": 39.860362966711, \"R2\": -368.1089073295326, \"Memory in Mb\": 0.1596212387084961, \"Time in s\": 1.445583 }, { \"step\": 220, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"TrumpApproval\", \"MAE\": 39.727043136363626, \"RMSE\": 39.77723500009918, \"R2\": -395.5019807293188, \"Memory in Mb\": 0.1831541061401367, \"Time in s\": 1.734898 }, { \"step\": 240, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"TrumpApproval\", \"MAE\": 39.56323079166665, \"RMSE\": 39.61325406766278, \"R2\": -395.19837684116754, \"Memory in Mb\": 0.1905164718627929, \"Time in s\": 2.043551 }, { \"step\": 260, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"TrumpApproval\", \"MAE\": 39.42014538461535, \"RMSE\": 39.46968290441584, \"R2\": -397.63185900832246, \"Memory in Mb\": 0.2020750045776367, \"Time in s\": 2.371621 }, { \"step\": 280, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"TrumpApproval\", \"MAE\": 39.33200189285712, \"RMSE\": 39.37942345737111, \"R2\": -414.4560159350036, \"Memory in Mb\": 0.2260313034057617, \"Time in s\": 2.720012 }, { \"step\": 300, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"TrumpApproval\", \"MAE\": 39.18435719999999, \"RMSE\": 39.23275803924839, \"R2\": -404.5402138221895, \"Memory in Mb\": 0.2437810897827148, \"Time in s\": 3.088623 }, { \"step\": 320, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"TrumpApproval\", \"MAE\": 39.13568690624999, \"RMSE\": 39.1818628962716, \"R2\": -423.5167725219512, \"Memory in Mb\": 0.2581815719604492, \"Time in s\": 3.477665 }, { \"step\": 340, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"TrumpApproval\", \"MAE\": 39.14620944117645, \"RMSE\": 39.18989510023786, \"R2\": -447.7943063391533, \"Memory in Mb\": 0.2493734359741211, \"Time in s\": 3.892527 }, { \"step\": 360, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"TrumpApproval\", \"MAE\": 39.24072974999997, \"RMSE\": 39.28395553300239, \"R2\": -453.6543473793619, \"Memory in Mb\": 0.2643537521362304, \"Time in s\": 4.328145999999999 }, { \"step\": 380, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"TrumpApproval\", \"MAE\": 39.29597665789471, \"RMSE\": 39.33769921546023, \"R2\": -470.6701690846498, \"Memory in Mb\": 0.2747945785522461, \"Time in s\": 4.784845 }, { \"step\": 400, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"TrumpApproval\", \"MAE\": 39.35730624999997, \"RMSE\": 39.39781946688104, \"R2\": -485.4842825426507, \"Memory in Mb\": 0.2898855209350586, \"Time in s\": 5.262664 }, { \"step\": 420, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"TrumpApproval\", \"MAE\": 39.40549083333331, \"RMSE\": 39.44465897881697, \"R2\": -502.7799504226928, \"Memory in Mb\": 0.2975950241088867, \"Time in s\": 5.761624 }, { \"step\": 440, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"TrumpApproval\", \"MAE\": 39.49730674999998, \"RMSE\": 39.53710368662846, \"R2\": -495.9856416828035, \"Memory in Mb\": 0.3090314865112304, \"Time in s\": 6.281948 }, { \"step\": 460, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"TrumpApproval\", \"MAE\": 39.61474728260867, \"RMSE\": 39.65658853240579, \"R2\": -473.14358309219216, \"Memory in Mb\": 0.3255758285522461, \"Time in s\": 6.824272 }, { \"step\": 480, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"TrumpApproval\", \"MAE\": 39.71032456249997, \"RMSE\": 39.75304758270976, \"R2\": -464.4916761787406, \"Memory in Mb\": 0.3411321640014648, \"Time in s\": 7.389190999999999 }, { \"step\": 500, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"TrumpApproval\", \"MAE\": 39.80313951999997, \"RMSE\": 39.84667590965187, \"R2\": -456.8750824508669, \"Memory in Mb\": 0.2797002792358398, \"Time in s\": 7.981206999999999 }, { \"step\": 520, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"TrumpApproval\", \"MAE\": 39.87354713461536, \"RMSE\": 39.916931033645376, \"R2\": -459.2932847271911, \"Memory in Mb\": 0.2906713485717773, \"Time in s\": 8.594204 }, { \"step\": 540, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"TrumpApproval\", \"MAE\": 39.94649651851849, \"RMSE\": 39.98996046818772, \"R2\": -459.28610565666287, \"Memory in Mb\": 0.2965841293334961, \"Time in s\": 9.232432 }, { \"step\": 560, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"TrumpApproval\", \"MAE\": 39.97606614285712, \"RMSE\": 40.018487723609816, \"R2\": -470.926187706672, \"Memory in Mb\": 0.3019857406616211, \"Time in s\": 9.891908 }, { \"step\": 580, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"TrumpApproval\", \"MAE\": 40.00338510344825, \"RMSE\": 40.044755101652726, \"R2\": -483.2331705341176, \"Memory in Mb\": 0.3056249618530273, \"Time in s\": 10.572818 }, { \"step\": 600, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"TrumpApproval\", \"MAE\": 40.07393431666663, \"RMSE\": 40.11569326301364, \"R2\": -479.5746686678817, \"Memory in Mb\": 0.3106412887573242, \"Time in s\": 11.275213 }, { \"step\": 620, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"TrumpApproval\", \"MAE\": 40.1459417741935, \"RMSE\": 40.18827077358568, \"R2\": -473.96334667177865, \"Memory in Mb\": 0.3147268295288086, \"Time in s\": 11.999135999999998 }, { \"step\": 640, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"TrumpApproval\", \"MAE\": 40.21943815624997, \"RMSE\": 40.26249426545423, \"R2\": -466.8085709746123, \"Memory in Mb\": 0.3194303512573242, \"Time in s\": 12.744586999999996 }, { \"step\": 660, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"TrumpApproval\", \"MAE\": 40.28296777272724, \"RMSE\": 40.32626722721455, \"R2\": -464.9172853497744, \"Memory in Mb\": 0.3247060775756836, \"Time in s\": 13.511768999999996 }, { \"step\": 680, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"TrumpApproval\", \"MAE\": 40.31998279411761, \"RMSE\": 40.36256991107017, \"R2\": -473.1325264408024, \"Memory in Mb\": 0.3289289474487304, \"Time in s\": 14.300919999999998 }, { \"step\": 700, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"TrumpApproval\", \"MAE\": 40.31359012857138, \"RMSE\": 40.35509446667054, \"R2\": -485.40526703956544, \"Memory in Mb\": 0.2762861251831054, \"Time in s\": 15.116749 }, { \"step\": 720, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"TrumpApproval\", \"MAE\": 40.31730695833329, \"RMSE\": 40.357915759594896, \"R2\": -496.1610725544049, \"Memory in Mb\": 0.2866239547729492, \"Time in s\": 15.953537 }, { \"step\": 740, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"TrumpApproval\", \"MAE\": 40.36653568918915, \"RMSE\": 40.40711941642496, \"R2\": -497.0742803710164, \"Memory in Mb\": 0.2274045944213867, \"Time in s\": 16.820776 }, { \"step\": 760, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"TrumpApproval\", \"MAE\": 40.40314367105261, \"RMSE\": 40.443256311482514, \"R2\": -503.3712175162706, \"Memory in Mb\": 0.2380895614624023, \"Time in s\": 17.708028 }, { \"step\": 780, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"TrumpApproval\", \"MAE\": 40.44545064102563, \"RMSE\": 40.48534274444009, \"R2\": -506.6856716110208, \"Memory in Mb\": 0.2534551620483398, \"Time in s\": 18.615628 }, { \"step\": 800, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"TrumpApproval\", \"MAE\": 40.47854825, \"RMSE\": 40.518050685964006, \"R2\": -512.1052117095793, \"Memory in Mb\": 0.2672185897827148, \"Time in s\": 19.543925 }, { \"step\": 820, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"TrumpApproval\", \"MAE\": 40.50894034146341, \"RMSE\": 40.5479845946661, \"R2\": -518.5068774177179, \"Memory in Mb\": 0.2752447128295898, \"Time in s\": 20.493024 }, { \"step\": 840, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"TrumpApproval\", \"MAE\": 40.5406558690476, \"RMSE\": 40.57931089736599, \"R2\": -524.140575335229, \"Memory in Mb\": 0.2841558456420898, \"Time in s\": 21.463054 }, { \"step\": 860, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"TrumpApproval\", \"MAE\": 40.58371181395347, \"RMSE\": 40.62239247493601, \"R2\": -524.3496319016275, \"Memory in Mb\": 0.2900037765502929, \"Time in s\": 22.454245 }, { \"step\": 880, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"TrumpApproval\", \"MAE\": 40.62855514772725, \"RMSE\": 40.66738601007716, \"R2\": -522.897851512946, \"Memory in Mb\": 0.2707319259643554, \"Time in s\": 23.471721 }, { \"step\": 900, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"TrumpApproval\", \"MAE\": 40.664104233333326, \"RMSE\": 40.702738445808535, \"R2\": -526.020768835918, \"Memory in Mb\": 0.2802000045776367, \"Time in s\": 24.510012 }, { \"step\": 920, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"TrumpApproval\", \"MAE\": 40.68274704347825, \"RMSE\": 40.72073961991632, \"R2\": -535.1540147256861, \"Memory in Mb\": 0.2901716232299804, \"Time in s\": 25.569536 }, { \"step\": 940, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"TrumpApproval\", \"MAE\": 40.70972619148935, \"RMSE\": 40.74737437775791, \"R2\": -540.4099749760601, \"Memory in Mb\": 0.2975721359252929, \"Time in s\": 26.655616 }, { \"step\": 960, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"TrumpApproval\", \"MAE\": 40.73400636458332, \"RMSE\": 40.771242977826994, \"R2\": -546.7118652484228, \"Memory in Mb\": 0.3080549240112304, \"Time in s\": 27.763054 }, { \"step\": 980, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"TrumpApproval\", \"MAE\": 40.74031829795916, \"RMSE\": 40.77684015923968, \"R2\": -557.5026042066913, \"Memory in Mb\": 0.3134031295776367, \"Time in s\": 28.892281 }, { \"step\": 1000, \"track\": \"Regression\", \"model\": \"Exponentially Weighted Average\", \"dataset\": \"TrumpApproval\", \"MAE\": 40.75359492299998, \"RMSE\": 40.78950075300399, \"R2\": -567.2567645513548, \"Memory in Mb\": 0.3166418075561523, \"Time in s\": 30.043213999999995 }, { \"step\": 11, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"ChickWeights\", \"MAE\": 41.63636363636363, \"RMSE\": 41.64569169030137, \"R2\": -2231.5319148936137, \"Memory in Mb\": 0.0121526718139648, \"Time in s\": 0.028772 }, { \"step\": 22, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"ChickWeights\", \"MAE\": 41.31818181818181, \"RMSE\": 41.32960638133835, \"R2\": -1808.0547045951903, \"Memory in Mb\": 0.0121526718139648, \"Time in s\": 0.079044 }, { \"step\": 33, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"ChickWeights\", \"MAE\": 41.12121212121212, \"RMSE\": 41.13871582091424, \"R2\": -1174.393494897962, \"Memory in Mb\": 0.0121526718139648, \"Time in s\": 0.149857 }, { \"step\": 44, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"ChickWeights\", \"MAE\": 41.159090909090914, \"RMSE\": 41.17451771534076, \"R2\": -1333.7620984139928, \"Memory in Mb\": 0.0121526718139648, \"Time in s\": 0.237747 }, { \"step\": 55, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"ChickWeights\", \"MAE\": 41.5090909090909, \"RMSE\": 41.57075020645253, \"R2\": -336.3506066081568, \"Memory in Mb\": 0.0121526718139648, \"Time in s\": 0.338425 }, { \"step\": 66, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"ChickWeights\", \"MAE\": 42.681818181818166, \"RMSE\": 42.82080349691271, \"R2\": -153.29834830483878, \"Memory in Mb\": 0.0121526718139648, \"Time in s\": 0.452041 }, { \"step\": 77, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"ChickWeights\", \"MAE\": 43.46441536424646, \"RMSE\": 43.6630525281676, \"R2\": -106.5245816691109, \"Memory in Mb\": 0.0121526718139648, \"Time in s\": 0.578307 }, { \"step\": 88, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"ChickWeights\", \"MAE\": 43.36686847531974, \"RMSE\": 43.58986852756654, \"R2\": -96.16251088961316, \"Memory in Mb\": 0.0121526718139648, \"Time in s\": 0.71715 }, { \"step\": 99, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"ChickWeights\", \"MAE\": 39.3777319762956, \"RMSE\": 41.29293125499619, \"R2\": -71.92610223487543, \"Memory in Mb\": 0.0121526718139648, \"Time in s\": 0.8686729999999999 }, { \"step\": 110, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"ChickWeights\", \"MAE\": 36.38865389064824, \"RMSE\": 39.46494228501369, \"R2\": -45.51654802750055, \"Memory in Mb\": 0.0121526718139648, \"Time in s\": 1.032976 }, { \"step\": 121, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"ChickWeights\", \"MAE\": 33.47571311046551, \"RMSE\": 37.6638192516392, \"R2\": -31.6297858502766, \"Memory in Mb\": 0.0121526718139648, \"Time in s\": 1.209623 }, { \"step\": 132, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"ChickWeights\", \"MAE\": 30.829458151969483, \"RMSE\": 36.06523958622657, \"R2\": -23.452489437409387, \"Memory in Mb\": 0.0121526718139648, \"Time in s\": 1.398231 }, { \"step\": 143, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"ChickWeights\", \"MAE\": 28.6940047418964, \"RMSE\": 34.66464981164902, \"R2\": -17.297279097398025, \"Memory in Mb\": 0.0121526718139648, \"Time in s\": 1.59888 }, { \"step\": 154, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"ChickWeights\", \"MAE\": 26.901670019158168, \"RMSE\": 33.42439644535526, \"R2\": -13.457346308406352, \"Memory in Mb\": 0.0121526718139648, \"Time in s\": 1.811498 }, { \"step\": 165, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"ChickWeights\", \"MAE\": 25.73601455188179, \"RMSE\": 32.42918896219868, \"R2\": -9.940044331027543, \"Memory in Mb\": 0.0121526718139648, \"Time in s\": 2.036514 }, { \"step\": 176, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"ChickWeights\", \"MAE\": 24.44416894616589, \"RMSE\": 31.465121114844862, \"R2\": -7.304318935113031, \"Memory in Mb\": 0.0121526718139648, \"Time in s\": 2.273516 }, { \"step\": 187, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"ChickWeights\", \"MAE\": 23.31106527863985, \"RMSE\": 30.560727675640003, \"R2\": -5.414053903472148, \"Memory in Mb\": 0.0121526718139648, \"Time in s\": 2.522216 }, { \"step\": 198, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"ChickWeights\", \"MAE\": 22.2497659881735, \"RMSE\": 29.72926965926448, \"R2\": -3.828220321961205, \"Memory in Mb\": 0.0121526718139648, \"Time in s\": 2.782587 }, { \"step\": 209, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"ChickWeights\", \"MAE\": 21.58385771370322, \"RMSE\": 29.10991604615937, \"R2\": -2.8161237021822814, \"Memory in Mb\": 0.0121526718139648, \"Time in s\": 3.05463 }, { \"step\": 220, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"ChickWeights\", \"MAE\": 21.489125733317938, \"RMSE\": 28.89967848441583, \"R2\": -2.2929294700983545, \"Memory in Mb\": 0.0121526718139648, \"Time in s\": 3.338483 }, { \"step\": 231, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"ChickWeights\", \"MAE\": 20.96077526896472, \"RMSE\": 28.36167354440183, \"R2\": -1.7127571457874735, \"Memory in Mb\": 0.0121526718139648, \"Time in s\": 3.633999 }, { \"step\": 242, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"ChickWeights\", \"MAE\": 20.684837308385998, \"RMSE\": 27.97322591752617, \"R2\": -1.123436519018493, \"Memory in Mb\": 0.0121526718139648, \"Time in s\": 3.941181 }, { \"step\": 253, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"ChickWeights\", \"MAE\": 20.10836609318901, \"RMSE\": 27.433189641281587, \"R2\": -0.7737126718723288, \"Memory in Mb\": 0.0121526718139648, \"Time in s\": 4.259926 }, { \"step\": 264, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"ChickWeights\", \"MAE\": 20.477468707260805, \"RMSE\": 27.77140109557264, \"R2\": -0.6607814047377214, \"Memory in Mb\": 0.012312889099121, \"Time in s\": 4.590346 }, { \"step\": 275, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"ChickWeights\", \"MAE\": 20.61134029899391, \"RMSE\": 28.056502395486245, \"R2\": -0.491554549366604, \"Memory in Mb\": 0.012312889099121, \"Time in s\": 4.932228 }, { \"step\": 286, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"ChickWeights\", \"MAE\": 20.562403371381592, \"RMSE\": 27.861367163045813, \"R2\": -0.2739563786996055, \"Memory in Mb\": 0.012312889099121, \"Time in s\": 5.285550000000001 }, { \"step\": 297, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"ChickWeights\", \"MAE\": 20.2802158532438, \"RMSE\": 27.63742758295081, \"R2\": -0.0872000961905188, \"Memory in Mb\": 0.012312889099121, \"Time in s\": 5.650314000000001 }, { \"step\": 308, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"ChickWeights\", \"MAE\": 20.449355350398, \"RMSE\": 27.920149137510645, \"R2\": 0.0118073081415535, \"Memory in Mb\": 0.012312889099121, \"Time in s\": 6.026518000000001 }, { \"step\": 319, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"ChickWeights\", \"MAE\": 21.50134169290969, \"RMSE\": 30.009678165732637, \"R2\": -0.0359973595319296, \"Memory in Mb\": 0.012312889099121, \"Time in s\": 6.414170000000001 }, { \"step\": 330, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"ChickWeights\", \"MAE\": 21.657358056011915, \"RMSE\": 30.050226639054536, \"R2\": 0.1110159573580076, \"Memory in Mb\": 0.012312889099121, \"Time in s\": 6.813245000000001 }, { \"step\": 341, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"ChickWeights\", \"MAE\": 21.886687938329302, \"RMSE\": 30.333453058468624, \"R2\": 0.1888808568866784, \"Memory in Mb\": 0.012312889099121, \"Time in s\": 7.223789000000001 }, { \"step\": 352, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"ChickWeights\", \"MAE\": 21.886047000767118, \"RMSE\": 30.34044931799507, \"R2\": 0.2623171341711791, \"Memory in Mb\": 0.012312889099121, \"Time in s\": 7.645758000000001 }, { \"step\": 363, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"ChickWeights\", \"MAE\": 22.895757506339034, \"RMSE\": 32.0447958992957, \"R2\": 0.2009350122171253, \"Memory in Mb\": 0.012312889099121, \"Time in s\": 8.07951 }, { \"step\": 374, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"ChickWeights\", \"MAE\": 23.75249563906416, \"RMSE\": 33.838912051163845, \"R2\": 0.1808815149406113, \"Memory in Mb\": 0.012312889099121, \"Time in s\": 8.524693000000001 }, { \"step\": 385, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"ChickWeights\", \"MAE\": 24.560900349589296, \"RMSE\": 34.753482086388026, \"R2\": 0.2355843412503995, \"Memory in Mb\": 0.012312889099121, \"Time in s\": 8.981311000000002 }, { \"step\": 396, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"ChickWeights\", \"MAE\": 24.352453583923488, \"RMSE\": 34.473609026370774, \"R2\": 0.2986981406442135, \"Memory in Mb\": 0.012312889099121, \"Time in s\": 9.449373 }, { \"step\": 407, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"ChickWeights\", \"MAE\": 25.719275464870663, \"RMSE\": 37.04357441923364, \"R2\": 0.2473067090757558, \"Memory in Mb\": 0.012312889099121, \"Time in s\": 9.928858000000002 }, { \"step\": 418, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"ChickWeights\", \"MAE\": 27.096550121003176, \"RMSE\": 39.902565812758986, \"R2\": 0.1801666848023049, \"Memory in Mb\": 0.012312889099121, \"Time in s\": 10.419752000000004 }, { \"step\": 429, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"ChickWeights\", \"MAE\": 28.343681876579897, \"RMSE\": 42.0624752869508, \"R2\": 0.1904695440990398, \"Memory in Mb\": 0.012312889099121, \"Time in s\": 10.922067000000002 }, { \"step\": 440, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"ChickWeights\", \"MAE\": 29.341897763641, \"RMSE\": 43.65194027092424, \"R2\": 0.1898427093039769, \"Memory in Mb\": 0.012312889099121, \"Time in s\": 11.435807000000002 }, { \"step\": 451, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"ChickWeights\", \"MAE\": 30.67273802938304, \"RMSE\": 45.6370688200433, \"R2\": 0.1521253661768283, \"Memory in Mb\": 0.012312889099121, \"Time in s\": 11.960959000000004 }, { \"step\": 462, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"ChickWeights\", \"MAE\": 32.56224145473062, \"RMSE\": 49.915337115223885, \"R2\": 0.0473016837570096, \"Memory in Mb\": 0.012312889099121, \"Time in s\": 12.497734000000005 }, { \"step\": 473, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"ChickWeights\", \"MAE\": 34.43415680949522, \"RMSE\": 53.78695495416363, \"R2\": 0.0232056621878617, \"Memory in Mb\": 0.012312889099121, \"Time in s\": 13.045956000000004 }, { \"step\": 484, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"ChickWeights\", \"MAE\": 37.59302308486845, \"RMSE\": 59.43178585545712, \"R2\": -0.1272256336981969, \"Memory in Mb\": 0.012312889099121, \"Time in s\": 13.605608000000004 }, { \"step\": 495, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"ChickWeights\", \"MAE\": 39.81029832190851, \"RMSE\": 63.2563754686467, \"R2\": -0.2012394142017446, \"Memory in Mb\": 0.012312889099121, \"Time in s\": 14.176690000000004 }, { \"step\": 506, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"ChickWeights\", \"MAE\": 40.81849028215024, \"RMSE\": 64.7117163856453, \"R2\": -0.2206096222811289, \"Memory in Mb\": 0.012312889099121, \"Time in s\": 14.759208000000005 }, { \"step\": 517, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"ChickWeights\", \"MAE\": 42.30053541895924, \"RMSE\": 66.69347450894223, \"R2\": -0.2234984190364006, \"Memory in Mb\": 0.012312889099121, \"Time in s\": 15.353157000000005 }, { \"step\": 528, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"ChickWeights\", \"MAE\": 43.87998520264807, \"RMSE\": 69.22658128427689, \"R2\": -0.1851751428384946, \"Memory in Mb\": 0.012312889099121, \"Time in s\": 15.958328000000003 }, { \"step\": 539, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"ChickWeights\", \"MAE\": 45.090482136166365, \"RMSE\": 70.91807128614899, \"R2\": -0.1924391171188431, \"Memory in Mb\": 0.012312889099121, \"Time in s\": 16.574693000000003 }, { \"step\": 550, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"ChickWeights\", \"MAE\": 46.16603381063717, \"RMSE\": 72.79977252028388, \"R2\": -0.2178315347358042, \"Memory in Mb\": 0.012312889099121, \"Time in s\": 17.202244000000004 }, { \"step\": 561, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"ChickWeights\", \"MAE\": 47.99895001321041, \"RMSE\": 75.49864048778444, \"R2\": -0.2525216922711382, \"Memory in Mb\": 0.012312889099121, \"Time in s\": 17.841166000000005 }, { \"step\": 572, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"ChickWeights\", \"MAE\": 49.57826542612398, \"RMSE\": 77.90262570405233, \"R2\": -0.2335409846027114, \"Memory in Mb\": 0.012312889099121, \"Time in s\": 18.491295000000004 }, { \"step\": 20, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"TrumpApproval\", \"MAE\": 30.45103241731707, \"RMSE\": 33.23585723529438, \"R2\": -2590.0045530336465, \"Memory in Mb\": 0.0131101608276367, \"Time in s\": 0.03648 }, { \"step\": 40, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"TrumpApproval\", \"MAE\": 18.3406114455606, \"RMSE\": 24.1628558112126, \"R2\": -233.72636807636488, \"Memory in Mb\": 0.0131101608276367, \"Time in s\": 0.100456 }, { \"step\": 60, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"TrumpApproval\", \"MAE\": 14.012302795940927, \"RMSE\": 20.27429916426714, \"R2\": -221.7932161673039, \"Memory in Mb\": 0.0131101608276367, \"Time in s\": 0.190991 }, { \"step\": 80, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"TrumpApproval\", \"MAE\": 11.49107264681655, \"RMSE\": 17.720640796103456, \"R2\": -169.73114207921216, \"Memory in Mb\": 0.0131101608276367, \"Time in s\": 0.307239 }, { \"step\": 100, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"TrumpApproval\", \"MAE\": 9.726608796116103, \"RMSE\": 15.913172800750536, \"R2\": -85.38479390162912, \"Memory in Mb\": 0.0131101608276367, \"Time in s\": 0.4491740000000001 }, { \"step\": 120, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"TrumpApproval\", \"MAE\": 8.642897473622819, \"RMSE\": 14.610205599232696, \"R2\": -60.77410396737057, \"Memory in Mb\": 0.0131101608276367, \"Time in s\": 0.6162740000000001 }, { \"step\": 140, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"TrumpApproval\", \"MAE\": 7.627861449957184, \"RMSE\": 13.547130531753504, \"R2\": -53.88423494401591, \"Memory in Mb\": 0.0131101608276367, \"Time in s\": 0.8076440000000001 }, { \"step\": 160, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"TrumpApproval\", \"MAE\": 6.825616478903957, \"RMSE\": 12.68640242718137, \"R2\": -41.44604107616888, \"Memory in Mb\": 0.0131101608276367, \"Time in s\": 1.023068 }, { \"step\": 180, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"TrumpApproval\", \"MAE\": 6.298325880816026, \"RMSE\": 11.99455373608459, \"R2\": -32.32351025206219, \"Memory in Mb\": 0.0131101608276367, \"Time in s\": 1.262663 }, { \"step\": 200, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"TrumpApproval\", \"MAE\": 5.791221405645649, \"RMSE\": 11.389261045241726, \"R2\": -29.134439780883504, \"Memory in Mb\": 0.0131101608276367, \"Time in s\": 1.526002 }, { \"step\": 220, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"TrumpApproval\", \"MAE\": 5.429217208457164, \"RMSE\": 10.877504011291329, \"R2\": -28.650681734644422, \"Memory in Mb\": 0.0131101608276367, \"Time in s\": 1.812923 }, { \"step\": 240, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"TrumpApproval\", \"MAE\": 5.060396678997669, \"RMSE\": 10.421476408956162, \"R2\": -26.4214333861576, \"Memory in Mb\": 0.0131101608276367, \"Time in s\": 2.123256 }, { \"step\": 260, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"TrumpApproval\", \"MAE\": 4.7577579662422185, \"RMSE\": 10.01970369652003, \"R2\": -24.68943115642662, \"Memory in Mb\": 0.013350486755371, \"Time in s\": 2.456738 }, { \"step\": 280, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"TrumpApproval\", \"MAE\": 4.47898479527949, \"RMSE\": 9.659781964354623, \"R2\": -23.99890538466576, \"Memory in Mb\": 0.013350486755371, \"Time in s\": 2.813161 }, { \"step\": 300, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"TrumpApproval\", \"MAE\": 4.231628573868652, \"RMSE\": 9.33546881214548, \"R2\": -21.961936464609707, \"Memory in Mb\": 0.013350486755371, \"Time in s\": 3.192823 }, { \"step\": 320, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"TrumpApproval\", \"MAE\": 4.032288217093912, \"RMSE\": 9.046394584789834, \"R2\": -21.629541054113563, \"Memory in Mb\": 0.013350486755371, \"Time in s\": 3.5954460000000004 }, { \"step\": 340, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"TrumpApproval\", \"MAE\": 3.8351551756309497, \"RMSE\": 8.779241117397504, \"R2\": -21.522318216666985, \"Memory in Mb\": 0.013350486755371, \"Time in s\": 4.021045 }, { \"step\": 360, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"TrumpApproval\", \"MAE\": 3.660607443401045, \"RMSE\": 8.536652519215579, \"R2\": -20.469707840891584, \"Memory in Mb\": 0.013350486755371, \"Time in s\": 4.469678 }, { \"step\": 380, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"TrumpApproval\", \"MAE\": 3.505270605120577, \"RMSE\": 8.311598150636895, \"R2\": -20.056664379019765, \"Memory in Mb\": 0.013350486755371, \"Time in s\": 4.941134 }, { \"step\": 400, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"TrumpApproval\", \"MAE\": 3.360453882251005, \"RMSE\": 8.10307441673556, \"R2\": -19.578991923208985, \"Memory in Mb\": 0.013350486755371, \"Time in s\": 5.435716 }, { \"step\": 420, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"TrumpApproval\", \"MAE\": 3.230603986740656, \"RMSE\": 7.909485894846834, \"R2\": -19.256340082684435, \"Memory in Mb\": 0.013350486755371, \"Time in s\": 5.953156 }, { \"step\": 440, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"TrumpApproval\", \"MAE\": 3.1198347345540465, \"RMSE\": 7.731010347257149, \"R2\": -18.002320884204543, \"Memory in Mb\": 0.013350486755371, \"Time in s\": 6.493473 }, { \"step\": 460, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"TrumpApproval\", \"MAE\": 3.010353198494384, \"RMSE\": 7.562877110682495, \"R2\": -16.244605442867506, \"Memory in Mb\": 0.013350486755371, \"Time in s\": 7.056611999999999 }, { \"step\": 480, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.9023580604284582, \"RMSE\": 7.404341650720478, \"R2\": -15.148937801753288, \"Memory in Mb\": 0.013350486755371, \"Time in s\": 7.642589999999999 }, { \"step\": 500, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.8120368518109427, \"RMSE\": 7.256642134427339, \"R2\": -14.185679409391112, \"Memory in Mb\": 0.013350486755371, \"Time in s\": 8.251631 }, { \"step\": 520, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.722217494708863, \"RMSE\": 7.11705765703134, \"R2\": -13.632593906904136, \"Memory in Mb\": 0.013350486755371, \"Time in s\": 8.883484 }, { \"step\": 540, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.6379600337769933, \"RMSE\": 6.984812005374008, \"R2\": -13.042206620144157, \"Memory in Mb\": 0.013350486755371, \"Time in s\": 9.537691 }, { \"step\": 560, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.573532085936036, \"RMSE\": 6.862931118690264, \"R2\": -12.879442173523902, \"Memory in Mb\": 0.013350486755371, \"Time in s\": 10.214296 }, { \"step\": 580, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.508294944253365, \"RMSE\": 6.745457437445754, \"R2\": -12.739978855164816, \"Memory in Mb\": 0.013350486755371, \"Time in s\": 10.913296 }, { \"step\": 600, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.4431260057234407, \"RMSE\": 6.633435680824833, \"R2\": -12.140422111767563, \"Memory in Mb\": 0.013350486755371, \"Time in s\": 11.634954 }, { \"step\": 620, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.386700775516181, \"RMSE\": 6.528113194197335, \"R2\": -11.53247390884172, \"Memory in Mb\": 0.013350486755371, \"Time in s\": 12.379039999999998 }, { \"step\": 640, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.3242014075410804, \"RMSE\": 6.426013651213371, \"R2\": -10.916538218303476, \"Memory in Mb\": 0.013350486755371, \"Time in s\": 13.145560999999995 }, { \"step\": 660, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.2685968162113417, \"RMSE\": 6.328883008296775, \"R2\": -10.475904112331508, \"Memory in Mb\": 0.013350486755371, \"Time in s\": 13.934486999999995 }, { \"step\": 680, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.2077966601853354, \"RMSE\": 6.235306598260896, \"R2\": -10.31508330333358, \"Memory in Mb\": 0.013350486755371, \"Time in s\": 14.745848999999998 }, { \"step\": 700, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.1581493026656715, \"RMSE\": 6.146460408814674, \"R2\": -10.283704638807178, \"Memory in Mb\": 0.013350486755371, \"Time in s\": 15.579853999999996 }, { \"step\": 720, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.1027567473920863, \"RMSE\": 6.060643545524585, \"R2\": -10.211846444382044, \"Memory in Mb\": 0.013350486755371, \"Time in s\": 16.436290999999997 }, { \"step\": 740, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.051121816682194, \"RMSE\": 5.978350933609188, \"R2\": -9.90287777753909, \"Memory in Mb\": 0.013350486755371, \"Time in s\": 17.315177999999996 }, { \"step\": 760, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.003155134641025, \"RMSE\": 5.899358540561457, \"R2\": -9.731678337792127, \"Memory in Mb\": 0.013350486755371, \"Time in s\": 18.216514 }, { \"step\": 780, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.9581628436412764, \"RMSE\": 5.823548668044576, \"R2\": -9.504483044737295, \"Memory in Mb\": 0.013350486755371, \"Time in s\": 19.140319 }, { \"step\": 800, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.920522972774301, \"RMSE\": 5.751090425279772, \"R2\": -9.337362153707344, \"Memory in Mb\": 0.013350486755371, \"Time in s\": 20.086783 }, { \"step\": 820, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.884378546616626, \"RMSE\": 5.681435320930898, \"R2\": -9.199265318679966, \"Memory in Mb\": 0.013350486755371, \"Time in s\": 21.055682 }, { \"step\": 840, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.8462172344878476, \"RMSE\": 5.613649762186833, \"R2\": -9.049787223051387, \"Memory in Mb\": 0.013350486755371, \"Time in s\": 22.047027 }, { \"step\": 860, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.8114873001009923, \"RMSE\": 5.548504609880171, \"R2\": -8.800976301633217, \"Memory in Mb\": 0.013350486755371, \"Time in s\": 23.060786 }, { \"step\": 880, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.7737427907909609, \"RMSE\": 5.485176730961097, \"R2\": -8.530931568707373, \"Memory in Mb\": 0.013350486755371, \"Time in s\": 24.096958 }, { \"step\": 900, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.7378742216236305, \"RMSE\": 5.423968591721732, \"R2\": -8.358684442608821, \"Memory in Mb\": 0.013350486755371, \"Time in s\": 25.155795 }, { \"step\": 920, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.7058202733924563, \"RMSE\": 5.364926267836746, \"R2\": -8.30648673172487, \"Memory in Mb\": 0.013350486755371, \"Time in s\": 26.237104 }, { \"step\": 940, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.6771033983092818, \"RMSE\": 5.307938337650396, \"R2\": -8.187106100071555, \"Memory in Mb\": 0.013350486755371, \"Time in s\": 27.340904 }, { \"step\": 960, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.6470899020587193, \"RMSE\": 5.252615019014566, \"R2\": -8.09065943244505, \"Memory in Mb\": 0.013350486755371, \"Time in s\": 28.467203 }, { \"step\": 980, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.6179664800066769, \"RMSE\": 5.198922142498095, \"R2\": -8.078721464420347, \"Memory in Mb\": 0.013350486755371, \"Time in s\": 29.615952 }, { \"step\": 1000, \"track\": \"Regression\", \"model\": \"River MLP\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.591389315944533, \"RMSE\": 5.1470018023734205, \"R2\": -8.048080908594342, \"Memory in Mb\": 0.013350486755371, \"Time in s\": 30.787287 }, { \"step\": 11, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"ChickWeights\", \"MAE\": 4.664574314574316, \"RMSE\": 12.7079745317607, \"R2\": -206.87879383707747, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.002154 }, { \"step\": 22, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"ChickWeights\", \"MAE\": 2.767694704637076, \"RMSE\": 9.018587183866767, \"R2\": -85.14025986830408, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.005944 }, { \"step\": 33, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"ChickWeights\", \"MAE\": 2.3093367298127023, \"RMSE\": 7.420500566500976, \"R2\": -37.24267181629702, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.010037 }, { \"step\": 44, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"ChickWeights\", \"MAE\": 1.892363968348808, \"RMSE\": 6.441521936619904, \"R2\": -31.668094594906044, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.014469 }, { \"step\": 55, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"ChickWeights\", \"MAE\": 2.1129412159858934, \"RMSE\": 6.114058653243701, \"R2\": -6.297346571779499, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.019181 }, { \"step\": 66, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"ChickWeights\", \"MAE\": 2.832849782567835, \"RMSE\": 6.236602142425367, \"R2\": -2.2730130120415795, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.024182 }, { \"step\": 77, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"ChickWeights\", \"MAE\": 3.4069290990236856, \"RMSE\": 6.402381882180361, \"R2\": -1.3118663438824, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.029453 }, { \"step\": 88, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"ChickWeights\", \"MAE\": 3.650377971160808, \"RMSE\": 6.321189272940957, \"R2\": -1.043267371916866, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.034986 }, { \"step\": 99, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"ChickWeights\", \"MAE\": 4.035631404360372, \"RMSE\": 6.4483291916176695, \"R2\": -0.7783857772357967, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.040781 }, { \"step\": 110, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"ChickWeights\", \"MAE\": 4.693189868957898, \"RMSE\": 7.0697740144659305, \"R2\": -0.4927792786841307, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.046845 }, { \"step\": 121, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"ChickWeights\", \"MAE\": 5.274396860168236, \"RMSE\": 7.6542276724395, \"R2\": -0.3476225254437259, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.053171 }, { \"step\": 132, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"ChickWeights\", \"MAE\": 5.875758254207378, \"RMSE\": 8.194624755054596, \"R2\": -0.2624191661321591, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.05976 }, { \"step\": 143, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"ChickWeights\", \"MAE\": 6.530760796045927, \"RMSE\": 8.870097879563003, \"R2\": -0.1980355424044948, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.066612 }, { \"step\": 154, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"ChickWeights\", \"MAE\": 7.121466111912466, \"RMSE\": 9.458403141043558, \"R2\": -0.1577027852151795, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.073726 }, { \"step\": 165, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"ChickWeights\", \"MAE\": 7.772438504082036, \"RMSE\": 10.375670403553157, \"R2\": -0.1198999930450892, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.081109 }, { \"step\": 176, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"ChickWeights\", \"MAE\": 8.565827130563894, \"RMSE\": 11.410434180005833, \"R2\": -0.0920676568626532, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.088752 }, { \"step\": 187, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"ChickWeights\", \"MAE\": 9.429958588641576, \"RMSE\": 12.495061319237752, \"R2\": -0.0722153171628203, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.096665 }, { \"step\": 198, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"ChickWeights\", \"MAE\": 10.47731537859646, \"RMSE\": 13.900491647656429, \"R2\": -0.0555502703757588, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.104851 }, { \"step\": 209, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"ChickWeights\", \"MAE\": 11.43172675762076, \"RMSE\": 15.229123619635446, \"R2\": -0.0444565128716372, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.1133 }, { \"step\": 220, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"ChickWeights\", \"MAE\": 11.97432098008114, \"RMSE\": 16.22368260926648, \"R2\": -0.0377560869847111, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.122025 }, { \"step\": 231, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"ChickWeights\", \"MAE\": 12.9382196746461, \"RMSE\": 17.489503190785292, \"R2\": -0.0315781972827118, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.131017 }, { \"step\": 242, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"ChickWeights\", \"MAE\": 14.229204186206864, \"RMSE\": 19.43725798629848, \"R2\": -0.0252367718674193, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.140272 }, { \"step\": 253, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"ChickWeights\", \"MAE\": 15.339413196393396, \"RMSE\": 20.82023831254592, \"R2\": -0.0216497893038387, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.149788 }, { \"step\": 264, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"ChickWeights\", \"MAE\": 15.948617107030818, \"RMSE\": 21.75817315507082, \"R2\": -0.0194401851240946, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.159572 }, { \"step\": 275, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"ChickWeights\", \"MAE\": 16.794155127707494, \"RMSE\": 23.16724301729152, \"R2\": -0.0169996193237813, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.169617 }, { \"step\": 286, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"ChickWeights\", \"MAE\": 17.990009992534457, \"RMSE\": 24.865985915258104, \"R2\": -0.0147547133955299, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.17992 }, { \"step\": 297, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"ChickWeights\", \"MAE\": 19.34919450213405, \"RMSE\": 26.67620929760368, \"R2\": -0.0128904565600072, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.190491 }, { \"step\": 308, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"ChickWeights\", \"MAE\": 20.46881241431745, \"RMSE\": 28.248013022827838, \"R2\": -0.011537481517321, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.201321 }, { \"step\": 319, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"ChickWeights\", \"MAE\": 20.993702124162965, \"RMSE\": 29.63814114349949, \"R2\": -0.0105036731193923, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.212414 }, { \"step\": 330, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"ChickWeights\", \"MAE\": 22.586872779548436, \"RMSE\": 32.01796640002603, \"R2\": -0.0092202379520505, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.223765 }, { \"step\": 341, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"ChickWeights\", \"MAE\": 23.97345887210737, \"RMSE\": 33.821533603903084, \"R2\": -0.0083877019037323, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.235368 }, { \"step\": 352, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"ChickWeights\", \"MAE\": 25.315991788770976, \"RMSE\": 35.461698606860665, \"R2\": -0.0077313021586467, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.247225 }, { \"step\": 363, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"ChickWeights\", \"MAE\": 25.615062978866305, \"RMSE\": 35.981300981590465, \"R2\": -0.0074437490312051, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.259337 }, { \"step\": 374, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"ChickWeights\", \"MAE\": 26.673321526932543, \"RMSE\": 37.51836715700961, \"R2\": -0.0069358461242559, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.271713 }, { \"step\": 385, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"ChickWeights\", \"MAE\": 28.27694482780972, \"RMSE\": 39.8753298933956, \"R2\": -0.0063325109838794, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.2843549999999999 }, { \"step\": 396, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"ChickWeights\", \"MAE\": 29.55612496209691, \"RMSE\": 41.28848705945016, \"R2\": -0.0059801818919071, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.2972059999999999 }, { \"step\": 407, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"ChickWeights\", \"MAE\": 30.56167711268285, \"RMSE\": 42.81802042618151, \"R2\": -0.0056467231500465, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.3102509999999999 }, { \"step\": 418, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"ChickWeights\", \"MAE\": 31.39346669137945, \"RMSE\": 44.18765357092498, \"R2\": -0.0053697143301307, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.3234889999999999 }, { \"step\": 429, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"ChickWeights\", \"MAE\": 33.10612890637694, \"RMSE\": 46.865579751152914, \"R2\": -0.0049663660706051, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.3369199999999999 }, { \"step\": 440, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"ChickWeights\", \"MAE\": 34.54914638861108, \"RMSE\": 48.61167278858254, \"R2\": -0.0047161238549726, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.3505489999999999 }, { \"step\": 451, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"ChickWeights\", \"MAE\": 35.43263419295921, \"RMSE\": 49.67507127970072, \"R2\": -0.0045536938071879, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.3643729999999999 }, { \"step\": 462, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"ChickWeights\", \"MAE\": 36.308550382896186, \"RMSE\": 51.2507761435036, \"R2\": -0.0043573774895468, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.3783909999999999 }, { \"step\": 473, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"ChickWeights\", \"MAE\": 38.26330298063241, \"RMSE\": 54.53225049728104, \"R2\": -0.0040516612048955, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.3926029999999999 }, { \"step\": 484, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"ChickWeights\", \"MAE\": 39.59866234800828, \"RMSE\": 56.08659790201894, \"R2\": -0.0039023944795495, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.4070059999999999 }, { \"step\": 495, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"ChickWeights\", \"MAE\": 40.94697327298068, \"RMSE\": 57.823326559810994, \"R2\": -0.0037535911132069, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.4215999999999999 }, { \"step\": 506, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"ChickWeights\", \"MAE\": 41.42384714758024, \"RMSE\": 58.67984594201592, \"R2\": -0.0036652347211194, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.4363849999999999 }, { \"step\": 517, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"ChickWeights\", \"MAE\": 42.72663002099646, \"RMSE\": 60.40151056768402, \"R2\": -0.0035345422299792, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.4513639999999999 }, { \"step\": 528, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"ChickWeights\", \"MAE\": 44.77321528369677, \"RMSE\": 63.69509749878913, \"R2\": -0.0033415055563215, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.4665329999999999 }, { \"step\": 539, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"ChickWeights\", \"MAE\": 45.99579764939489, \"RMSE\": 65.0494992510053, \"R2\": -0.003252609562637, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.4818929999999999 }, { \"step\": 550, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"ChickWeights\", \"MAE\": 46.57020777663759, \"RMSE\": 66.07332710234044, \"R2\": -0.0031815200825582, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.4974459999999999 }, { \"step\": 561, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"ChickWeights\", \"MAE\": 47.75825760640621, \"RMSE\": 67.5643396193493, \"R2\": -0.0030950009187136, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.5131919999999999 }, { \"step\": 572, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"ChickWeights\", \"MAE\": 49.49138874897682, \"RMSE\": 70.24569214117749, \"R2\": -0.0029719424061886, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.5291269999999999 }, { \"step\": 20, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.695184981652336, \"RMSE\": 9.807184976514188, \"R2\": -224.6021011118197, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.003379 }, { \"step\": 40, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.3994713447037435, \"RMSE\": 7.102066178895935, \"R2\": -19.27845129783118, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.0081789999999999 }, { \"step\": 60, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.8170744682035584, \"RMSE\": 5.815253847056423, \"R2\": -17.329373299766118, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.0135909999999999 }, { \"step\": 80, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.604995404573344, \"RMSE\": 5.081770494168446, \"R2\": -13.040545957103586, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.0195849999999999 }, { \"step\": 100, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.824259078948539, \"RMSE\": 4.70488333223354, \"R2\": -6.5512954222403845, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.0262549999999999 }, { \"step\": 120, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.918744608116588, \"RMSE\": 4.412336880489357, \"R2\": -4.634185300646759, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.033502 }, { \"step\": 140, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.8761207739327503, \"RMSE\": 4.13187920011476, \"R2\": -4.105616799680584, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.041316 }, { \"step\": 160, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.961232939518506, \"RMSE\": 3.976173487274506, \"R2\": -3.1695661963674864, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.0496949999999999 }, { \"step\": 180, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.066134597500757, \"RMSE\": 3.873731518767916, \"R2\": -2.4756944369169624, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.0586379999999999 }, { \"step\": 200, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"TrumpApproval\", \"MAE\": 2.051125997923389, \"RMSE\": 3.731810291394655, \"R2\": -2.23527456693896, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.0682329999999999 }, { \"step\": 220, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.94095193468414, \"RMSE\": 3.56902990398404, \"R2\": -2.19210047340805, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.078397 }, { \"step\": 240, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.9366756524315063, \"RMSE\": 3.4612902974772624, \"R2\": -2.024876884626847, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.089124 }, { \"step\": 260, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.9250039777458068, \"RMSE\": 3.363327951159923, \"R2\": -1.8945640461454525, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.1004139999999999 }, { \"step\": 280, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.8726934920539136, \"RMSE\": 3.257010428159885, \"R2\": -1.8420037280027224, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.11227 }, { \"step\": 300, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.8907476896224935, \"RMSE\": 3.1958821895815714, \"R2\": -1.6910252267675163, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.124747 }, { \"step\": 320, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.819623890420079, \"RMSE\": 3.103812605138666, \"R2\": -1.663886258690169, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.137792 }, { \"step\": 340, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.7396293145937214, \"RMSE\": 3.014220627768389, \"R2\": -1.654906383755708, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.151284 }, { \"step\": 360, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.7350691203787965, \"RMSE\": 2.9569384317632506, \"R2\": -1.5759385016835008, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.165202 }, { \"step\": 380, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.6987131960417108, \"RMSE\": 2.8893997308323693, \"R2\": -1.5446951110541192, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.179547 }, { \"step\": 400, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.673610627740774, \"RMSE\": 2.82935583501861, \"R2\": -1.5089937655143242, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.194363 }, { \"step\": 420, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.6410137122925974, \"RMSE\": 2.7701802079251965, \"R2\": -1.484737486096575, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.209602 }, { \"step\": 440, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.6565972573555454, \"RMSE\": 2.7427790467379385, \"R2\": -1.391750010744973, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.225268 }, { \"step\": 460, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.699464840115161, \"RMSE\": 2.73946740401384, \"R2\": -1.2626191030939884, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.241356 }, { \"step\": 480, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.7224824441896145, \"RMSE\": 2.7219018737730583, \"R2\": -1.182307732575659, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.257864 }, { \"step\": 500, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.7446092142173422, \"RMSE\": 2.70580354422956, \"R2\": -1.1113262021905803, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.274789 }, { \"step\": 520, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.7464998751860934, \"RMSE\": 2.677192702589883, \"R2\": -1.0705208906620065, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.292197 }, { \"step\": 540, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.7535492786865423, \"RMSE\": 2.653885630983747, \"R2\": -1.027170706279252, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.310025 }, { \"step\": 560, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.7201019899937544, \"RMSE\": 2.614359234374483, \"R2\": -1.0141103337708768, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.328271 }, { \"step\": 580, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.6887559504032663, \"RMSE\": 2.5757257291728384, \"R2\": -1.0033760803823184, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.346933 }, { \"step\": 600, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.701917368353294, \"RMSE\": 2.561424763732869, \"R2\": -0.9592753712060648, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.366058 }, { \"step\": 620, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.7178157166185173, \"RMSE\": 2.551346895968156, \"R2\": -0.9142580419512064, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.38561 }, { \"step\": 640, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.7365901196485038, \"RMSE\": 2.545046385321895, \"R2\": -0.8692105635365064, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.405582 }, { \"step\": 660, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.7465677425181807, \"RMSE\": 2.532051562790666, \"R2\": -0.8368676529707118, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.425968 }, { \"step\": 680, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.731617734826669, \"RMSE\": 2.504226186170861, \"R2\": -0.8251107974736909, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.446783 }, { \"step\": 700, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.6973720107412231, \"RMSE\": 2.47026789197972, \"R2\": -0.8225927549994396, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.468073 }, { \"step\": 720, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.6698372433333928, \"RMSE\": 2.4400355004771077, \"R2\": -0.81732226470892, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.489788 }, { \"step\": 740, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.6732482399922957, \"RMSE\": 2.425592833263792, \"R2\": -0.7947920429290933, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.511921 }, { \"step\": 760, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.6653913599894004, \"RMSE\": 2.404136439714782, \"R2\": -0.7822814452716051, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.53447 }, { \"step\": 780, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.6644612180457288, \"RMSE\": 2.387561393188575, \"R2\": -0.7656652158374817, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.557436 }, { \"step\": 800, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.6556359332933146, \"RMSE\": 2.368497267913513, \"R2\": -0.7532954885990883, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.580851 }, { \"step\": 820, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.6452077788467467, \"RMSE\": 2.348678653798561, \"R2\": -0.7430103139622937, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.604682 }, { \"step\": 840, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.6374623223784903, \"RMSE\": 2.3305035344735936, \"R2\": -0.7320713255917544, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.6289220000000001 }, { \"step\": 860, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.6419505315856449, \"RMSE\": 2.320208013716276, \"R2\": -0.7138439732116804, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.653572 }, { \"step\": 880, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.6490002164922652, \"RMSE\": 2.3126155324510744, \"R2\": -0.6941855677649247, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.6786340000000001 }, { \"step\": 900, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.6474991175923384, \"RMSE\": 2.299197536504521, \"R2\": -0.6816400531907807, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.704114 }, { \"step\": 920, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.6301006788336792, \"RMSE\": 2.2779225390149764, \"R2\": -0.6777843948800273, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.730046 }, { \"step\": 940, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.6221876471839871, \"RMSE\": 2.262378737250057, \"R2\": -0.6690049120995847, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.7563869999999999 }, { \"step\": 960, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.6124120493571743, \"RMSE\": 2.245866476718547, \"R2\": -0.6619276404267609, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.7831509999999999 }, { \"step\": 980, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.5867001120604314, \"RMSE\": 2.223758235975506, \"R2\": -0.661013659831075, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.810322 }, { \"step\": 1000, \"track\": \"Regression\", \"model\": \"[baseline] Mean predictor\", \"dataset\": \"TrumpApproval\", \"MAE\": 1.5681359363812415, \"RMSE\": 2.2037391763141216, \"R2\": -0.6587014308970958, \"Memory in Mb\": 0.0004901885986328, \"Time in s\": 0.8379 } ] }, \"params\": [ { \"name\": \"models\", \"select\": { \"type\": \"point\", \"fields\": [ \"model\" ] }, \"bind\": \"legend\" }, { \"name\": \"Dataset\", \"value\": \"ChickWeights\", \"bind\": { \"input\": \"select\", \"options\": [ \"ChickWeights\", \"TrumpApproval\" ] } }, { \"name\": \"grid\", \"select\": \"interval\", \"bind\": \"scales\" } ], \"transform\": [ { \"filter\": { \"field\": \"dataset\", \"equal\": { \"expr\": \"Dataset\" } } } ], \"repeat\": { \"row\": [ \"MAE\", \"RMSE\", \"R2\", \"Memory in Mb\", \"Time in s\" ] }, \"spec\": { \"width\": \"container\", \"mark\": \"line\", \"encoding\": { \"x\": { \"field\": \"step\", \"type\": \"quantitative\", \"axis\": { \"titleFontSize\": 18, \"labelFontSize\": 18, \"title\": \"Instance\" } }, \"y\": { \"field\": { \"repeat\": \"row\" }, \"type\": \"quantitative\", \"axis\": { \"titleFontSize\": 18, \"labelFontSize\": 18 } }, \"color\": { \"field\": \"model\", \"type\": \"ordinal\", \"scale\": { \"scheme\": \"category20b\" }, \"title\": \"Models\", \"legend\": { \"titleFontSize\": 18, \"labelFontSize\": 18, \"labelLimit\": 500 } }, \"opacity\": { \"condition\": { \"param\": \"models\", \"value\": 1 }, \"value\": 0.2 } } } }

    "},{"location":"benchmarks/Regression/#datasets","title":"Datasets","text":"ChickWeights

    Chick weights along time.

    The stream contains 578 items and 3 features. The goal is to predict the weight of each chick along time, according to the diet the chick is on. The data is ordered by time and then by chick.

    Name  ChickWeights                                                 \nTask  Regression\n

    Samples 578 Features 3 Sparse False Path /home/kulbach/projects/river/river/datasets/chick-weights.csv

    TrumpApproval

    Donald Trump approval ratings.

    This dataset was obtained by reshaping the data used by FiveThirtyEight for analyzing Donald Trump's approval ratings. It contains 5 features, which are approval ratings collected by 5 polling agencies. The target is the approval rating from FiveThirtyEight's model. The goal of this task is to see if we can reproduce FiveThirtyEight's model.

    Name  TrumpApproval                                                    \nTask  Regression\n

    Samples 1,001 Features 6 Sparse False Path /home/kulbach/projects/river/river/datasets/trump_approval.csv.gz

    "},{"location":"benchmarks/Regression/#models","title":"Models","text":"Linear Regression

    Pipeline (\n  StandardScaler (\n    with_std=True\n  ),\n  LinearRegression (\n    optimizer=SGD (\n      lr=Constant (\n        learning_rate=0.01\n      )\n    )\n    loss=Squared ()\n    l2=0.\n    l1=0.\n    intercept_init=0.\n    intercept_lr=Constant (\n      learning_rate=0.01\n    )\n    clip_gradient=1e+12\n    initializer=Zeros ()\n  )\n)

    Linear Regression with l1 regularization

    Pipeline (\n  StandardScaler (\n    with_std=True\n  ),\n  LinearRegression (\n    optimizer=SGD (\n      lr=Constant (\n        learning_rate=0.01\n      )\n    )\n    loss=Squared ()\n    l2=0.\n    l1=1.\n    intercept_init=0.\n    intercept_lr=Constant (\n      learning_rate=0.01\n    )\n    clip_gradient=1e+12\n    initializer=Zeros ()\n  )\n)

    Linear Regression with l2 regularization

    Pipeline (\n  StandardScaler (\n    with_std=True\n  ),\n  LinearRegression (\n    optimizer=SGD (\n      lr=Constant (\n        learning_rate=0.01\n      )\n    )\n    loss=Squared ()\n    l2=1.\n    l1=0.\n    intercept_init=0.\n    intercept_lr=Constant (\n      learning_rate=0.01\n    )\n    clip_gradient=1e+12\n    initializer=Zeros ()\n  )\n)

    Passive-Aggressive Regressor, mode 1

    Pipeline (\n  StandardScaler (\n    with_std=True\n  ),\n  PARegressor (\n    C=1.\n    mode=1\n    eps=0.1\n    learn_intercept=True\n  )\n)

    Passive-Aggressive Regressor, mode 2

    Pipeline (\n  StandardScaler (\n    with_std=True\n  ),\n  PARegressor (\n    C=1.\n    mode=2\n    eps=0.1\n    learn_intercept=True\n  )\n)

    k-Nearest Neighbors

    Pipeline (\n  StandardScaler (\n    with_std=True\n  ),\n  KNNRegressor (\n    n_neighbors=5\n    window_size=100\n    aggregation_method=\"mean\"\n    min_distance_keep=0.\n    distance_func=functools.partial(, p=2)\n  )\n)\n

    \nHoeffding Tree\n

    Pipeline (\n  StandardScaler (\n    with_std=True\n  ),\n  HoeffdingTreeRegressor (\n    grace_period=200\n    max_depth=inf\n    delta=1e-07\n    tau=0.05\n    leaf_prediction=\"adaptive\"\n    leaf_model=LinearRegression (\n      optimizer=SGD (\n        lr=Constant (\n          learning_rate=0.01\n        )\n      )\n      loss=Squared ()\n      l2=0.\n      l1=0.\n      intercept_init=0.\n      intercept_lr=Constant (\n        learning_rate=0.01\n      )\n      clip_gradient=1e+12\n      initializer=Zeros ()\n    )\n    model_selector_decay=0.95\n    nominal_attributes=None\n    splitter=TEBSTSplitter (\n      digits=1\n    )\n    min_samples_split=5\n    binary_split=False\n    max_size=500.\n    memory_estimate_period=1000000\n    stop_mem_management=False\n    remove_poor_attrs=False\n    merit_preprune=True\n  )\n)

    \n

    \nHoeffding Adaptive Tree\n

    Pipeline (\n  StandardScaler (\n    with_std=True\n  ),\n  HoeffdingAdaptiveTreeRegressor (\n    grace_period=200\n    max_depth=inf\n    delta=1e-07\n    tau=0.05\n    leaf_prediction=\"adaptive\"\n    leaf_model=LinearRegression (\n      optimizer=SGD (\n        lr=Constant (\n          learning_rate=0.01\n        )\n      )\n      loss=Squared ()\n      l2=0.\n      l1=0.\n      intercept_init=0.\n      intercept_lr=Constant (\n        learning_rate=0.01\n      )\n      clip_gradient=1e+12\n      initializer=Zeros ()\n    )\n    model_selector_decay=0.95\n    nominal_attributes=None\n    splitter=TEBSTSplitter (\n      digits=1\n    )\n    min_samples_split=5\n    bootstrap_sampling=True\n    drift_window_threshold=300\n    drift_detector=ADWIN (\n      delta=0.002\n      clock=32\n      max_buckets=5\n      min_window_length=5\n      grace_period=10\n    )\n    switch_significance=0.05\n    binary_split=False\n    max_size=500.\n    memory_estimate_period=1000000\n    stop_mem_management=False\n    remove_poor_attrs=False\n    merit_preprune=True\n    seed=42\n  )\n)

    \n

    \nStochastic Gradient Tree\n

    SGTRegressor (\n  delta=1e-07\n  grace_period=200\n  init_pred=0.\n  max_depth=inf\n  lambda_value=0.1\n  gamma=1.\n  nominal_attributes=[]\n  feature_quantizer=StaticQuantizer (\n    n_bins=64\n    warm_start=100\n    buckets=None\n  )\n)

    \n

    \nAdaptive Random Forest\n

    Pipeline (\n  StandardScaler (\n    with_std=True\n  ),\n  []\n)

    \n

    \nAdaptive Model Rules\n

    Pipeline (\n  StandardScaler (\n    with_std=True\n  ),\n  AMRules (\n    n_min=200\n    delta=1e-07\n    tau=0.05\n    pred_type=\"adaptive\"\n    pred_model=LinearRegression (\n      optimizer=SGD (\n        lr=Constant (\n          learning_rate=0.01\n        )\n      )\n      loss=Squared ()\n      l2=0.\n      l1=0.\n      intercept_init=0.\n      intercept_lr=Constant (\n        learning_rate=0.01\n      )\n      clip_gradient=1e+12\n      initializer=Zeros ()\n    )\n    splitter=TEBSTSplitter (\n      digits=1\n    )\n    drift_detector=ADWIN (\n      delta=0.002\n      clock=32\n      max_buckets=5\n      min_window_length=5\n      grace_period=10\n    )\n    fading_factor=0.99\n    anomaly_threshold=-0.75\n    m_min=30\n    ordered_rule_set=True\n    min_samples_split=5\n  )\n)

    \n

    \nStreaming Random Patches\n

    Pipeline (\n  StandardScaler (\n    with_std=True\n  ),\n  SRPRegressor (\n    model=HoeffdingTreeRegressor (\n      grace_period=50\n      max_depth=inf\n      delta=0.01\n      tau=0.05\n      leaf_prediction=\"adaptive\"\n      leaf_model=LinearRegression (\n        optimizer=SGD (\n          lr=Constant (\n            learning_rate=0.01\n          )\n        )\n        loss=Squared ()\n        l2=0.\n        l1=0.\n        intercept_init=0.\n        intercept_lr=Constant (\n          learning_rate=0.01\n        )\n        clip_gradient=1e+12\n        initializer=Zeros ()\n      )\n      model_selector_decay=0.95\n      nominal_attributes=None\n      splitter=TEBSTSplitter (\n        digits=1\n      )\n      min_samples_split=5\n      binary_split=False\n      max_size=500.\n      memory_estimate_period=1000000\n      stop_mem_management=False\n      remove_poor_attrs=False\n      merit_preprune=True\n    )\n    n_models=10\n    subspace_size=0.6\n    training_method=\"patches\"\n    lam=6\n    drift_detector=ADWIN (\n      delta=1e-05\n      clock=32\n      max_buckets=5\n      min_window_length=5\n      grace_period=10\n    )\n    warning_detector=ADWIN (\n      delta=0.0001\n      clock=32\n      max_buckets=5\n      min_window_length=5\n      grace_period=10\n    )\n    disable_detector=\"off\"\n    disable_weighted_vote=True\n    drift_detection_criteria=\"error\"\n    aggregation_method=\"mean\"\n    seed=42\n    metric=MAE ()\n  )\n)

    \n

    \nBagging\n

    Pipeline (\n  StandardScaler (\n    with_std=True\n  ),\n  [HoeffdingAdaptiveTreeRegressor (\n    grace_period=200\n    max_depth=inf\n    delta=1e-07\n    tau=0.05\n    leaf_prediction=\"adaptive\"\n    leaf_model=LinearRegression (\n      optimizer=SGD (\n        lr=Constant (\n          learning_rate=0.01\n        )\n      )\n      loss=Squared ()\n      l2=0.\n      l1=0.\n      intercept_init=0.\n      intercept_lr=Constant (\n        learning_rate=0.01\n      )\n      clip_gradient=1e+12\n      initializer=Zeros ()\n    )\n    model_selector_decay=0.95\n    nominal_attributes=None\n    splitter=TEBSTSplitter (\n      digits=1\n    )\n    min_samples_split=5\n    bootstrap_sampling=False\n    drift_window_threshold=300\n    drift_detector=ADWIN (\n      delta=0.002\n      clock=32\n      max_buckets=5\n      min_window_length=5\n      grace_period=10\n    )\n    switch_significance=0.05\n    binary_split=False\n    max_size=500.\n    memory_estimate_period=1000000\n    stop_mem_management=False\n    remove_poor_attrs=False\n    merit_preprune=True\n    seed=None\n  ), HoeffdingAdaptiveTreeRegressor (\n    grace_period=200\n    max_depth=inf\n    delta=1e-07\n    tau=0.05\n    leaf_prediction=\"adaptive\"\n    leaf_model=LinearRegression (\n      optimizer=SGD (\n        lr=Constant (\n          learning_rate=0.01\n        )\n      )\n      loss=Squared ()\n      l2=0.\n      l1=0.\n      intercept_init=0.\n      intercept_lr=Constant (\n        learning_rate=0.01\n      )\n      clip_gradient=1e+12\n      initializer=Zeros ()\n    )\n    model_selector_decay=0.95\n    nominal_attributes=None\n    splitter=TEBSTSplitter (\n      digits=1\n    )\n    min_samples_split=5\n    bootstrap_sampling=False\n    drift_window_threshold=300\n    drift_detector=ADWIN (\n      delta=0.002\n      clock=32\n      max_buckets=5\n      min_window_length=5\n      grace_period=10\n    )\n    switch_significance=0.05\n    binary_split=False\n    max_size=500.\n    memory_estimate_period=1000000\n    stop_mem_management=False\n    remove_poor_attrs=False\n    merit_preprune=True\n    seed=None\n  ), HoeffdingAdaptiveTreeRegressor (\n    grace_period=200\n    max_depth=inf\n    delta=1e-07\n    tau=0.05\n    leaf_prediction=\"adaptive\"\n    leaf_model=LinearRegression (\n      optimizer=SGD (\n        lr=Constant (\n          learning_rate=0.01\n        )\n      )\n      loss=Squared ()\n      l2=0.\n      l1=0.\n      intercept_init=0.\n      intercept_lr=Constant (\n        learning_rate=0.01\n      )\n      clip_gradient=1e+12\n      initializer=Zeros ()\n    )\n    model_selector_decay=0.95\n    nominal_attributes=None\n    splitter=TEBSTSplitter (\n      digits=1\n    )\n    min_samples_split=5\n    bootstrap_sampling=False\n    drift_window_threshold=300\n    drift_detector=ADWIN (\n      delta=0.002\n      clock=32\n      max_buckets=5\n      min_window_length=5\n      grace_period=10\n    )\n    switch_significance=0.05\n    binary_split=False\n    max_size=500.\n    memory_estimate_period=1000000\n    stop_mem_management=False\n    remove_poor_attrs=False\n    merit_preprune=True\n    seed=None\n  ), HoeffdingAdaptiveTreeRegressor (\n    grace_period=200\n    max_depth=inf\n    delta=1e-07\n    tau=0.05\n    leaf_prediction=\"adaptive\"\n    leaf_model=LinearRegression (\n      optimizer=SGD (\n        lr=Constant (\n          learning_rate=0.01\n        )\n      )\n      loss=Squared ()\n      l2=0.\n      l1=0.\n      intercept_init=0.\n      intercept_lr=Constant (\n        learning_rate=0.01\n      )\n      clip_gradient=1e+12\n      initializer=Zeros ()\n    )\n    model_selector_decay=0.95\n    nominal_attributes=None\n    splitter=TEBSTSplitter (\n      digits=1\n    )\n    min_samples_split=5\n    bootstrap_sampling=False\n    drift_window_threshold=300\n    drift_detector=ADWIN (\n      delta=0.002\n      clock=32\n      max_buckets=5\n      min_window_length=5\n      grace_period=10\n    )\n    switch_significance=0.05\n    binary_split=False\n    max_size=500.\n    memory_estimate_period=1000000\n    stop_mem_management=False\n    remove_poor_attrs=False\n    merit_preprune=True\n    seed=None\n  ), HoeffdingAdaptiveTreeRegressor (\n    grace_period=200\n    max_depth=inf\n    delta=1e-07\n    tau=0.05\n    leaf_prediction=\"adaptive\"\n    leaf_model=LinearRegression (\n      optimizer=SGD (\n        lr=Constant (\n          learning_rate=0.01\n        )\n      )\n      loss=Squared ()\n      l2=0.\n      l1=0.\n      intercept_init=0.\n      intercept_lr=Constant (\n        learning_rate=0.01\n      )\n      clip_gradient=1e+12\n      initializer=Zeros ()\n    )\n    model_selector_decay=0.95\n    nominal_attributes=None\n    splitter=TEBSTSplitter (\n      digits=1\n    )\n    min_samples_split=5\n    bootstrap_sampling=False\n    drift_window_threshold=300\n    drift_detector=ADWIN (\n      delta=0.002\n      clock=32\n      max_buckets=5\n      min_window_length=5\n      grace_period=10\n    )\n    switch_significance=0.05\n    binary_split=False\n    max_size=500.\n    memory_estimate_period=1000000\n    stop_mem_management=False\n    remove_poor_attrs=False\n    merit_preprune=True\n    seed=None\n  ), HoeffdingAdaptiveTreeRegressor (\n    grace_period=200\n    max_depth=inf\n    delta=1e-07\n    tau=0.05\n    leaf_prediction=\"adaptive\"\n    leaf_model=LinearRegression (\n      optimizer=SGD (\n        lr=Constant (\n          learning_rate=0.01\n        )\n      )\n      loss=Squared ()\n      l2=0.\n      l1=0.\n      intercept_init=0.\n      intercept_lr=Constant (\n        learning_rate=0.01\n      )\n      clip_gradient=1e+12\n      initializer=Zeros ()\n    )\n    model_selector_decay=0.95\n    nominal_attributes=None\n    splitter=TEBSTSplitter (\n      digits=1\n    )\n    min_samples_split=5\n    bootstrap_sampling=False\n    drift_window_threshold=300\n    drift_detector=ADWIN (\n      delta=0.002\n      clock=32\n      max_buckets=5\n      min_window_length=5\n      grace_period=10\n    )\n    switch_significance=0.05\n    binary_split=False\n    max_size=500.\n    memory_estimate_period=1000000\n    stop_mem_management=False\n    remove_poor_attrs=False\n    merit_preprune=True\n    seed=None\n  ), HoeffdingAdaptiveTreeRegressor (\n    grace_period=200\n    max_depth=inf\n    delta=1e-07\n    tau=0.05\n    leaf_prediction=\"adaptive\"\n    leaf_model=LinearRegression (\n      optimizer=SGD (\n        lr=Constant (\n          learning_rate=0.01\n        )\n      )\n      loss=Squared ()\n      l2=0.\n      l1=0.\n      intercept_init=0.\n      intercept_lr=Constant (\n        learning_rate=0.01\n      )\n      clip_gradient=1e+12\n      initializer=Zeros ()\n    )\n    model_selector_decay=0.95\n    nominal_attributes=None\n    splitter=TEBSTSplitter (\n      digits=1\n    )\n    min_samples_split=5\n    bootstrap_sampling=False\n    drift_window_threshold=300\n    drift_detector=ADWIN (\n      delta=0.002\n      clock=32\n      max_buckets=5\n      min_window_length=5\n      grace_period=10\n    )\n    switch_significance=0.05\n    binary_split=False\n    max_size=500.\n    memory_estimate_period=1000000\n    stop_mem_management=False\n    remove_poor_attrs=False\n    merit_preprune=True\n    seed=None\n  ), HoeffdingAdaptiveTreeRegressor (\n    grace_period=200\n    max_depth=inf\n    delta=1e-07\n    tau=0.05\n    leaf_prediction=\"adaptive\"\n    leaf_model=LinearRegression (\n      optimizer=SGD (\n        lr=Constant (\n          learning_rate=0.01\n        )\n      )\n      loss=Squared ()\n      l2=0.\n      l1=0.\n      intercept_init=0.\n      intercept_lr=Constant (\n        learning_rate=0.01\n      )\n      clip_gradient=1e+12\n      initializer=Zeros ()\n    )\n    model_selector_decay=0.95\n    nominal_attributes=None\n    splitter=TEBSTSplitter (\n      digits=1\n    )\n    min_samples_split=5\n    bootstrap_sampling=False\n    drift_window_threshold=300\n    drift_detector=ADWIN (\n      delta=0.002\n      clock=32\n      max_buckets=5\n      min_window_length=5\n      grace_period=10\n    )\n    switch_significance=0.05\n    binary_split=False\n    max_size=500.\n    memory_estimate_period=1000000\n    stop_mem_management=False\n    remove_poor_attrs=False\n    merit_preprune=True\n    seed=None\n  ), HoeffdingAdaptiveTreeRegressor (\n    grace_period=200\n    max_depth=inf\n    delta=1e-07\n    tau=0.05\n    leaf_prediction=\"adaptive\"\n    leaf_model=LinearRegression (\n      optimizer=SGD (\n        lr=Constant (\n          learning_rate=0.01\n        )\n      )\n      loss=Squared ()\n      l2=0.\n      l1=0.\n      intercept_init=0.\n      intercept_lr=Constant (\n        learning_rate=0.01\n      )\n      clip_gradient=1e+12\n      initializer=Zeros ()\n    )\n    model_selector_decay=0.95\n    nominal_attributes=None\n    splitter=TEBSTSplitter (\n      digits=1\n    )\n    min_samples_split=5\n    bootstrap_sampling=False\n    drift_window_threshold=300\n    drift_detector=ADWIN (\n      delta=0.002\n      clock=32\n      max_buckets=5\n      min_window_length=5\n      grace_period=10\n    )\n    switch_significance=0.05\n    binary_split=False\n    max_size=500.\n    memory_estimate_period=1000000\n    stop_mem_management=False\n    remove_poor_attrs=False\n    merit_preprune=True\n    seed=None\n  ), HoeffdingAdaptiveTreeRegressor (\n    grace_period=200\n    max_depth=inf\n    delta=1e-07\n    tau=0.05\n    leaf_prediction=\"adaptive\"\n    leaf_model=LinearRegression (\n      optimizer=SGD (\n        lr=Constant (\n          learning_rate=0.01\n        )\n      )\n      loss=Squared ()\n      l2=0.\n      l1=0.\n      intercept_init=0.\n      intercept_lr=Constant (\n        learning_rate=0.01\n      )\n      clip_gradient=1e+12\n      initializer=Zeros ()\n    )\n    model_selector_decay=0.95\n    nominal_attributes=None\n    splitter=TEBSTSplitter (\n      digits=1\n    )\n    min_samples_split=5\n    bootstrap_sampling=False\n    drift_window_threshold=300\n    drift_detector=ADWIN (\n      delta=0.002\n      clock=32\n      max_buckets=5\n      min_window_length=5\n      grace_period=10\n    )\n    switch_significance=0.05\n    binary_split=False\n    max_size=500.\n    memory_estimate_period=1000000\n    stop_mem_management=False\n    remove_poor_attrs=False\n    merit_preprune=True\n    seed=None\n  )]\n)

    \n

    \nExponentially Weighted Average\n

    Pipeline (\n  StandardScaler (\n    with_std=True\n  ),\n  [LinearRegression (\n    optimizer=SGD (\n      lr=Constant (\n        learning_rate=0.01\n      )\n    )\n    loss=Squared ()\n    l2=0.\n    l1=0.\n    intercept_init=0.\n    intercept_lr=Constant (\n      learning_rate=0.01\n    )\n    clip_gradient=1e+12\n    initializer=Zeros ()\n  ), HoeffdingAdaptiveTreeRegressor (\n    grace_period=200\n    max_depth=inf\n    delta=1e-07\n    tau=0.05\n    leaf_prediction=\"adaptive\"\n    leaf_model=LinearRegression (\n      optimizer=SGD (\n        lr=Constant (\n          learning_rate=0.01\n        )\n      )\n      loss=Squared ()\n      l2=0.\n      l1=0.\n      intercept_init=0.\n      intercept_lr=Constant (\n        learning_rate=0.01\n      )\n      clip_gradient=1e+12\n      initializer=Zeros ()\n    )\n    model_selector_decay=0.95\n    nominal_attributes=None\n    splitter=TEBSTSplitter (\n      digits=1\n    )\n    min_samples_split=5\n    bootstrap_sampling=True\n    drift_window_threshold=300\n    drift_detector=ADWIN (\n      delta=0.002\n      clock=32\n      max_buckets=5\n      min_window_length=5\n      grace_period=10\n    )\n    switch_significance=0.05\n    binary_split=False\n    max_size=500.\n    memory_estimate_period=1000000\n    stop_mem_management=False\n    remove_poor_attrs=False\n    merit_preprune=True\n    seed=None\n  ), KNNRegressor (\n    n_neighbors=5\n    window_size=100\n    aggregation_method=\"mean\"\n    min_distance_keep=0.\n    distance_func=functools.partial(, p=2)\n  ), AMRules (\n    n_min=200\n    delta=1e-07\n    tau=0.05\n    pred_type=\"adaptive\"\n    pred_model=LinearRegression (\n      optimizer=SGD (\n        lr=Constant (\n          learning_rate=0.01\n        )\n      )\n      loss=Squared ()\n      l2=0.\n      l1=0.\n      intercept_init=0.\n      intercept_lr=Constant (\n        learning_rate=0.01\n      )\n      clip_gradient=1e+12\n      initializer=Zeros ()\n    )\n    splitter=TEBSTSplitter (\n      digits=1\n    )\n    drift_detector=ADWIN (\n      delta=0.002\n      clock=32\n      max_buckets=5\n      min_window_length=5\n      grace_period=10\n    )\n    fading_factor=0.99\n    anomaly_threshold=-0.75\n    m_min=30\n    ordered_rule_set=True\n    min_samples_split=5\n  )]\n)\n

    \nRiver MLP\n

    Pipeline (\n  StandardScaler (\n    with_std=True\n  ),\n  MLPRegressor (\n    hidden_dims=(5,)\n    activations=(, , )\n    loss=Squared ()\n    optimizer=SGD (\n      lr=Constant (\n        learning_rate=0.001\n      )\n    )\n    seed=42\n  )\n)\n

    \n[baseline] Mean predictor\n

    StatisticRegressor (\n  statistic=Mean ()\n)

    \n

    "},{"location":"benchmarks/Regression/#environment","title":"Environment","text":"
    Python implementation: CPython\nPython version       : 3.11.5\nIPython version      : 8.15.0\n\nriver       : 0.19.0\nnumpy       : 1.26.0\nscikit-learn: 1.3.0\npandas      : 2.1.0\nscipy       : 1.11.2\n\nCompiler    : GCC 11.4.0\nOS          : Linux\nRelease     : 6.2.0-1011-azure\nMachine     : x86_64\nProcessor   : x86_64\nCPU cores   : 2\nArchitecture: 64bit\n
    "},{"location":"examples/batch-to-online/","title":"From batch to online/stream","text":""},{"location":"examples/batch-to-online/#a-quick-overview-of-batch-learning","title":"A quick overview of batch learning","text":"

    If you've already delved into machine learning, then you shouldn't have any difficulty in getting to use incremental learning. If you are somewhat new to machine learning, then do not worry! The point of this notebook in particular is to introduce simple notions. We'll also start to show how River fits in and explain how to use it.

    The whole point of machine learning is to learn from data. In supervised learning you want to learn how to predict a target \\(y\\) given a set of features \\(X\\). Meanwhile in an unsupervised learning there is no target, and the goal is rather to identify patterns and trends in the features \\(X\\). At this point most people tend to imagine \\(X\\) as a somewhat big table where each row is an observation and each column is a feature, and they would be quite right. Learning from tabular data is part of what's called batch learning, which basically that all of the data is available to our learning algorithm at once. Multiple libraries have been created to handle the batch learning regime, with one of the most prominent being Python's scikit-learn.

    As a simple example of batch learning let's say we want to learn to predict if a women has breast cancer or not. We'll use the breast cancer dataset available with scikit-learn. We'll learn to map a set of features to a binary decision using a logistic regression. Like many other models based on numerical weights, logistic regression is sensitive to the scale of the features. Rescaling the data so that each feature has mean 0 and variance 1 is generally considered good practice. We can apply the rescaling and fit the logistic regression sequentially in an elegant manner using a Pipeline. To measure the performance of the model we'll evaluate the average ROC AUC score using a 5 fold cross-validation.

    from sklearn import datasets\nfrom sklearn import linear_model\nfrom sklearn import metrics\nfrom sklearn import model_selection\nfrom sklearn import pipeline\nfrom sklearn import preprocessing\n\n\n# Load the data\ndataset = datasets.load_breast_cancer()\nX, y = dataset.data, dataset.target\n\n# Define the steps of the model\nmodel = pipeline.Pipeline([\n    ('scale', preprocessing.StandardScaler()),\n    ('lin_reg', linear_model.LogisticRegression(solver='lbfgs'))\n])\n\n# Define a determistic cross-validation procedure\ncv = model_selection.KFold(n_splits=5, shuffle=True, random_state=42)\n\n# Compute the MSE values\nscorer = metrics.make_scorer(metrics.roc_auc_score)\nscores = model_selection.cross_val_score(model, X, y, scoring=scorer, cv=cv)\n\n# Display the average score and it's standard deviation\nprint(f'ROC AUC: {scores.mean():.3f} (\u00b1 {scores.std():.3f})')\n
    ROC AUC: 0.975 (\u00b1 0.011)\n

    This might be a lot to take in if you're not accustomed to scikit-learn, but it probably isn't if you are. Batch learning basically boils down to:

    1. Loading (and preprocessing) the data
    2. Fitting a model to the data
    3. Computing the performance of the model on unseen data

    This is pretty standard and is maybe how most people imagine a machine learning pipeline. However, this way of proceeding has certain downsides. First of all your laptop would crash if the load_boston function returned a dataset who's size exceeds your available amount of RAM. Sometimes you can use some tricks to get around this. For example by optimizing the data types and by using sparse representations when applicable you can potentially save precious gigabytes of RAM. However, like many tricks this only goes so far. If your dataset weighs hundreds of gigabytes then you won't go far without some special hardware. One solution is to do out-of-core learning; that is, algorithms that can learn by being presented the data in chunks or mini-batches. If you want to go down this road then take a look at Dask and Spark's MLlib.

    Another issue with the batch learning regime is that it can't elegantly learn from new data. Indeed if new data is made available, then the model has to learn from scratch with a new dataset composed of the old data and the new data. This is particularly annoying in a real situation where you might have new incoming data every week, day, hour, minute, or even second. For example if you're building a recommendation engine for an e-commerce app, then you're probably training your model from 0 every week or so. As your app grows in popularity, so does the dataset you're training on. This will lead to longer and longer training times and might require a hardware upgrade.

    A final downside that isn't very easy to grasp concerns the manner in which features are extracted. Every time you want to train your model you first have to extract features. The trick is that some features might not be accessible at the particular point in time you are at. For example maybe that some attributes in your data warehouse get overwritten with time. In other words maybe that all the features pertaining to a particular observations are not available, whereas they were a week ago. This happens more often than not in real scenarios, and apart if you have a sophisticated data engineering pipeline then you will encounter these issues at some point.

    "},{"location":"examples/batch-to-online/#a-hands-on-introduction-to-incremental-learning","title":"A hands-on introduction to incremental learning","text":"

    Incremental learning is also often called online learning or stream learning, but if you google online learning a lot of the results will point to educational websites. Hence, the terms \"incremental learning\" and \"stream learning\" (from which River derives it's name) are prefered. The point of incremental learning is to fit a model to a stream of data. In other words, the data isn't available in it's entirety, but rather the observations are provided one by one. As an example let's stream through the dataset used previously.

    for xi, yi in zip(X, y):\n    # This is where the model learns\n    pass\n

    In this case we're iterating over a dataset that is already in memory, but we could just as well stream from a CSV file, a Kafka stream, an SQL query, etc. If we look at xi we can notice that it is a numpy.ndarray.

    xi\n
    array([7.760e+00, 2.454e+01, 4.792e+01, 1.810e+02, 5.263e-02, 4.362e-02,\n       0.000e+00, 0.000e+00, 1.587e-01, 5.884e-02, 3.857e-01, 1.428e+00,\n       2.548e+00, 1.915e+01, 7.189e-03, 4.660e-03, 0.000e+00, 0.000e+00,\n       2.676e-02, 2.783e-03, 9.456e+00, 3.037e+01, 5.916e+01, 2.686e+02,\n       8.996e-02, 6.444e-02, 0.000e+00, 0.000e+00, 2.871e-01, 7.039e-02])\n

    River by design works with dicts. We believe that dicts are more enjoyable to program with than numpy.ndarrays, at least for when single observations are concerned. dict's bring the added benefit that each feature can be accessed by name rather than by position.

    for xi, yi in zip(X, y):\n    xi = dict(zip(dataset.feature_names, xi))\n    pass\n\nxi\n
    {'mean radius': 7.76,\n 'mean texture': 24.54,\n 'mean perimeter': 47.92,\n 'mean area': 181.0,\n 'mean smoothness': 0.05263,\n 'mean compactness': 0.04362,\n 'mean concavity': 0.0,\n 'mean concave points': 0.0,\n 'mean symmetry': 0.1587,\n 'mean fractal dimension': 0.05884,\n 'radius error': 0.3857,\n 'texture error': 1.428,\n 'perimeter error': 2.548,\n 'area error': 19.15,\n 'smoothness error': 0.007189,\n 'compactness error': 0.00466,\n 'concavity error': 0.0,\n 'concave points error': 0.0,\n 'symmetry error': 0.02676,\n 'fractal dimension error': 0.002783,\n 'worst radius': 9.456,\n 'worst texture': 30.37,\n 'worst perimeter': 59.16,\n 'worst area': 268.6,\n 'worst smoothness': 0.08996,\n 'worst compactness': 0.06444,\n 'worst concavity': 0.0,\n 'worst concave points': 0.0,\n 'worst symmetry': 0.2871,\n 'worst fractal dimension': 0.07039}\n

    Conveniently, River's stream module has an iter_sklearn_dataset method that we can use instead.

    from river import stream\n\nfor xi, yi in stream.iter_sklearn_dataset(datasets.load_breast_cancer()):\n    pass\n

    The simple fact that we are getting the data as a stream means that we can't do a lot of things the same way as in a batch setting. For example let's say we want to scale the data so that it has mean 0 and variance 1, as we did earlier. To do so we simply have to subtract the mean of each feature to each value and then divide the result by the standard deviation of the feature. The problem is that we can't possible know the values of the mean and the standard deviation before actually going through all the data! One way to proceed would be to do a first pass over the data to compute the necessary values and then scale the values during a second pass. The problem is that this defeats our purpose, which is to learn by only looking at the data once. Although this might seem rather restrictive, it reaps sizable benefits down the road.

    The way we do feature scaling in River involves computing running statistics (also know as moving statistics). The idea is that we use a data structure that estimates the mean and updates itself when it is provided with a value. The same goes for the variance (and thus the standard deviation). For example, if we denote \\(\\mu_t\\) the mean and \\(n_t\\) the count at any moment \\(t\\), then updating the mean can be done as so:

    \\[ \\begin{cases} n_{t+1} = n_t + 1 \\\\ \\mu_{t+1} = \\mu_t + \\frac{x - \\mu_t}{n_{t+1}} \\end{cases} \\]

    Likewise, the running variance can be computed as so:

    \\[ \\begin{cases} n_{t+1} = n_t + 1 \\\\ \\mu_{t+1} = \\mu_t + \\frac{x - \\mu_t}{n_{t+1}} \\\\ s_{t+1} = s_t + (x - \\mu_t) \\times (x - \\mu_{t+1}) \\\\ \\sigma_{t+1} = \\frac{s_{t+1}}{n_{t+1}} \\end{cases} \\]

    where \\(s_t\\) is a running sum of squares and \\(\\sigma_t\\) is the running variance at time \\(t\\). This might seem a tad more involved than the batch algorithms you learn in school, but it is rather elegant. Implementing this in Python is not too difficult. For example let's compute the running mean and variance of the 'mean area' variable.

    n, mean, sum_of_squares, variance = 0, 0, 0, 0\n\nfor xi, yi in stream.iter_sklearn_dataset(datasets.load_breast_cancer()):\n    n += 1\n    old_mean = mean\n    mean += (xi['mean area'] - mean) / n\n    sum_of_squares += (xi['mean area'] - old_mean) * (xi['mean area'] - mean)\n    variance = sum_of_squares / n\n\nprint(f'Running mean: {mean:.3f}')\nprint(f'Running variance: {variance:.3f}')\n
    Running mean: 654.889\nRunning variance: 123625.903\n

    Let's compare this with numpy. But remember, numpy requires access to \"all\" the data.

    import numpy as np\n\ni = list(dataset.feature_names).index('mean area')\nprint(f'True mean: {np.mean(X[:, i]):.3f}')\nprint(f'True variance: {np.var(X[:, i]):.3f}')\n
    True mean: 654.889\nTrue variance: 123625.903\n

    The results seem to be exactly the same! The twist is that the running statistics won't be very accurate for the first few observations. In general though this doesn't matter too much. Some would even go as far as to say that this descrepancy is beneficial and acts as some sort of regularization...

    Now the idea is that we can compute the running statistics of each feature and scale them as they come along. The way to do this with River is to use the StandardScaler class from the preprocessing module, as so:

    from river import preprocessing\n\nscaler = preprocessing.StandardScaler()\n\nfor xi, yi in stream.iter_sklearn_dataset(datasets.load_breast_cancer()):\n    scaler = scaler.learn_one(xi)\n

    Now that we are scaling the data, we can start doing some actual machine learning. We're going to implement an online linear regression task. Because all the data isn't available at once, we are obliged to do what is called stochastic gradient descent, which is a popular research topic and has a lot of variants. SGD is commonly used to train neural networks. The idea is that at each step we compute the loss between the target prediction and the truth. We then calculate the gradient, which is simply a set of derivatives with respect to each weight from the linear regression. Once we have obtained the gradient, we can update the weights by moving them in the opposite direction of the gradient. The amount by which the weights are moved typically depends on a learning rate, which is typically set by the user. Different optimizers have different ways of managing the weight update, and some handle the learning rate implicitly. Online linear regression can be done in River with the LinearRegression class from the linear_model module. We'll be using plain and simple SGD using the SGD optimizer from the optim module. During training we'll measure the squared error between the truth and the predictions.

    from river import linear_model\nfrom river import optim\n\nscaler = preprocessing.StandardScaler()\noptimizer = optim.SGD(lr=0.01)\nlog_reg = linear_model.LogisticRegression(optimizer)\n\ny_true = []\ny_pred = []\n\nfor xi, yi in stream.iter_sklearn_dataset(datasets.load_breast_cancer(), shuffle=True, seed=42):\n\n    # Scale the features\n    xi_scaled = scaler.learn_one(xi).transform_one(xi)\n\n    # Test the current model on the new \"unobserved\" sample\n    yi_pred = log_reg.predict_proba_one(xi_scaled)\n    # Train the model with the new sample\n    log_reg.learn_one(xi_scaled, yi)\n\n    # Store the truth and the prediction\n    y_true.append(yi)\n    y_pred.append(yi_pred[True])\n\nprint(f'ROC AUC: {metrics.roc_auc_score(y_true, y_pred):.3f}')\n
    ROC AUC: 0.990\n

    The ROC AUC is significantly better than the one obtained from the cross-validation of scikit-learn's logisitic regression. However to make things really comparable it would be nice to compare with the same cross-validation procedure. River has a compat module that contains utilities for making River compatible with other Python libraries. Because we're doing regression we'll be using the SKLRegressorWrapper. We'll also be using Pipeline to encapsulate the logic of the StandardScaler and the LogisticRegression in one single object.

    from river import compat\nfrom river import compose\n\n# We define a Pipeline, exactly like we did earlier for sklearn \nmodel = compose.Pipeline(\n    ('scale', preprocessing.StandardScaler()),\n    ('log_reg', linear_model.LogisticRegression())\n)\n\n# We make the Pipeline compatible with sklearn\nmodel = compat.convert_river_to_sklearn(model)\n\n# We compute the CV scores using the same CV scheme and the same scoring\nscores = model_selection.cross_val_score(model, X, y, scoring=scorer, cv=cv)\n\n# Display the average score and it's standard deviation\nprint(f'ROC AUC: {scores.mean():.3f} (\u00b1 {scores.std():.3f})')\n
    ROC AUC: 0.964 (\u00b1 0.016)\n

    This time the ROC AUC score is lower, which is what we would expect. Indeed online learning isn't as accurate as batch learning. However it all depends in what you're interested in. If you're only interested in predicting the next observation then the online learning regime would be better. That's why it's a bit hard to compare both approaches: they're both suited to different scenarios.

    "},{"location":"examples/batch-to-online/#going-further","title":"Going further","text":"

    Here a few resources if you want to do some reading:

    • Online learning -- Wikipedia
    • What is online machine learning? -- Max Pagels
    • Introduction to Online Learning -- USC course
    • Online Methods in Machine Learning -- MIT course
    • Online Learning: A Comprehensive Survey
    • Streaming 101: The world beyond batch
    • Machine learning for data streams
    • Data Stream Mining: A Practical Approach
    "},{"location":"examples/bike-sharing-forecasting/","title":"Bike-sharing forecasting","text":"

    In this tutorial we're going to forecast the number of bikes in 5 bike stations from the city of Toulouse. We'll do so by building a simple model step by step. The dataset contains 182,470 observations. Let's first take a peak at the data.

    from pprint import pprint\nfrom river import datasets\n\ndataset = datasets.Bikes()\n\nfor x, y in dataset:\n    pprint(x)\n    print(f'Number of available bikes: {y}')\n    break\n
    {'clouds': 75,\n 'description': 'light rain',\n 'humidity': 81,\n 'moment': datetime.datetime(2016, 4, 1, 0, 0, 7),\n 'pressure': 1017.0,\n 'station': 'metro-canal-du-midi',\n 'temperature': 6.54,\n 'wind': 9.3}\nNumber of available bikes: 1\n

    Let's start by using a simple linear regression on the numeric features. We can select the numeric features and discard the rest of the features using a Select. Linear regression is very likely to go haywire if we don't scale the data, so we'll use a StandardScaler to do just that. We'll evaluate the model by measuring the mean absolute error. Finally we'll print the score every 20,000 observations.

    from river import compose\nfrom river import linear_model\nfrom river import metrics\nfrom river import evaluate\nfrom river import preprocessing\nfrom river import optim\n\nmodel = compose.Select('clouds', 'humidity', 'pressure', 'temperature', 'wind')\nmodel |= preprocessing.StandardScaler()\nmodel |= linear_model.LinearRegression(optimizer=optim.SGD(0.001))\n\nmetric = metrics.MAE()\n\nevaluate.progressive_val_score(dataset, model, metric, print_every=20_000)\n
    [20,000] MAE: 4.912763\n[40,000] MAE: 5.333578\n[60,000] MAE: 5.330969\n[80,000] MAE: 5.392334\n[100,000] MAE: 5.423078\n[120,000] MAE: 5.541239\n[140,000] MAE: 5.613038\n[160,000] MAE: 5.622441\n[180,000] MAE: 5.567836\n[182,470] MAE: 5.563905\n\n\n\n\n\nMAE: 5.563905\n

    The model doesn't seem to be doing that well, but then again we didn't provide a lot of features. Generally, a good idea for this kind of problem is to look at an average of the previous values. For example, for each station we can look at the average number of bikes per hour. To do so we first have to extract the hour from the moment field. We can then use a TargetAgg to aggregate the values of the target.

    from river import feature_extraction\nfrom river import stats\n\ndef get_hour(x):\n    x['hour'] = x['moment'].hour\n    return x\n\nmodel = compose.Select('clouds', 'humidity', 'pressure', 'temperature', 'wind')\nmodel += (\n    get_hour |\n    feature_extraction.TargetAgg(by=['station', 'hour'], how=stats.Mean())\n)\nmodel |= preprocessing.StandardScaler()\nmodel |= linear_model.LinearRegression(optimizer=optim.SGD(0.001))\n\nmetric = metrics.MAE()\n\nevaluate.progressive_val_score(dataset, model, metric, print_every=20_000)\n
    [20,000] MAE: 3.720766\n[40,000] MAE: 3.829739\n[60,000] MAE: 3.844905\n[80,000] MAE: 3.910137\n[100,000] MAE: 3.888553\n[120,000] MAE: 3.923644\n[140,000] MAE: 3.980882\n[160,000] MAE: 3.949972\n[180,000] MAE: 3.934489\n[182,470] MAE: 3.933442\n\n\n\n\n\nMAE: 3.933442\n

    By adding a single feature, we've managed to significantly reduce the mean absolute error. At this point you might think that the model is getting slightly complex, and is difficult to understand and test. Pipelines have the advantage of being terse, but they aren't always to debug. Thankfully River has some ways to relieve the pain.

    The first thing we can do it to visualize the pipeline, to get an idea of how the data flows through it.

    model\n
    ['clouds', [...]
    Select ( clouds humidity pressure temperature wind )
    get_hour
    def get_hour(x): x['hour'] = x['moment'].hour return x
    y_mean_by_station_and_hour
    TargetAgg ( by=['station', 'hour'] how=Mean () target_name=\"y\" )
    StandardScaler
    StandardScaler ( with_std=True )
    LinearRegression
    LinearRegression ( optimizer=SGD ( lr=Constant ( learning_rate=0.001 ) ) loss=Squared () l2=0. l1=0. intercept_init=0. intercept_lr=Constant ( learning_rate=0.01 ) clip_gradient=1e+12 initializer=Zeros () )

    We can also use the debug_one method to see what happens to one particular instance. Let's train the model on the first 10,000 observations and then call debug_one on the next one. To do this, we will turn the Bike object into a Python generator with iter() function. The Pythonic way to read the first 10,000 elements of a generator is to use itertools.islice.

    import itertools\n\nmodel = compose.Select('clouds', 'humidity', 'pressure', 'temperature', 'wind')\nmodel += (\n    get_hour |\n    feature_extraction.TargetAgg(by=['station', 'hour'], how=stats.Mean())\n)\nmodel |= preprocessing.StandardScaler()\nmodel |= linear_model.LinearRegression()\n\nfor x, y in itertools.islice(dataset, 10000):\n    y_pred = model.predict_one(x)\n    model.learn_one(x, y)\n\nx, y = next(iter(dataset))\nprint(model.debug_one(x))\n
    0. Input\n--------\nclouds: 75 (int)\ndescription: light rain (str)\nhumidity: 81 (int)\nmoment: 2016-04-01 00:00:07 (datetime)\npressure: 1,017.00000 (float)\nstation: metro-canal-du-midi (str)\ntemperature: 6.54000 (float)\nwind: 9.30000 (float)\n\n1. Transformer union\n--------------------\n    1.0 Select\n    ----------\n    clouds: 75 (int)\n    humidity: 81 (int)\n    pressure: 1,017.00000 (float)\n    temperature: 6.54000 (float)\n    wind: 9.30000 (float)\n\n    1.1 get_hour | y_mean_by_station_and_hour\n    -----------------------------------------\n    y_mean_by_station_and_hour: 4.43243 (float)\n\nclouds: 75 (int)\nhumidity: 81 (int)\npressure: 1,017.00000 (float)\ntemperature: 6.54000 (float)\nwind: 9.30000 (float)\ny_mean_by_station_and_hour: 4.43243 (float)\n\n2. StandardScaler\n-----------------\nclouds: 0.47566 (float)\nhumidity: 0.42247 (float)\npressure: 1.05314 (float)\ntemperature: -1.22098 (float)\nwind: 2.21104 (float)\ny_mean_by_station_and_hour: -0.59098 (float)\n\n3. LinearRegression\n-------------------\nName      Value     Weight    Contribution  \nIntercept  1.00000   6.58252       6.58252  \npressure   1.05314   3.78529       3.98646  \nhumidity   0.42247   1.44921       0.61225  \ny_mean_by_station_and_hour -0.59098   0.54167      -0.32011  \n clouds    0.47566  -1.92255      -0.91448  \n   wind    2.21104  -0.77720      -1.71843  \ntemperature -1.22098   2.47030      -3.01619\n\nPrediction: 5.21201\n

    The debug_one method shows what happens to an input set of features, step by step.

    And now comes the catch. Up until now we've been using the progressive_val_score method from the evaluate module. What this does it that it sequentially predicts the output of an observation and updates the model immediately afterwards. This way of proceeding is often used for evaluating online learning models. But in some cases it is the wrong approach.

    When evaluating a machine learning model, the goal is to simulate production conditions in order to get a trust-worthy assessment of the performance of the model. In our case, we typically want to forecast the number of bikes available in a station, say, 30 minutes ahead. Then, once the 30 minutes have passed, the true number of available bikes will be available and we will be able to update the model using the features available 30 minutes ago.

    What we really want is to evaluate the model by forecasting 30 minutes ahead and only updating the model once the true values are available. This can be done using the moment and delay parameters in the progressive_val_score method. The idea is that each observation in the stream of the data is shown twice to the model: once for making a prediction, and once for updating the model when the true value is revealed. The moment parameter determines which variable should be used as a timestamp, while the delay parameter controls the duration to wait before revealing the true values to the model.

    import datetime as dt\n\nevaluate.progressive_val_score(\n    dataset=dataset,\n    model=model.clone(),\n    metric=metrics.MAE(),\n    moment='moment',\n    delay=dt.timedelta(minutes=30),\n    print_every=20_000\n)\n
    [20,000] MAE: 20.198137\n[40,000] MAE: 12.199763\n[60,000] MAE: 9.468279\n[80,000] MAE: 8.126625\n[100,000] MAE: 7.273133\n[120,000] MAE: 6.735469\n[140,000] MAE: 6.376704\n[160,000] MAE: 6.06156\n[180,000] MAE: 5.806744\n[182,470] MAE: 5.780772\n\n\n\n\n\nMAE: 5.780772\n

    The performance is a bit worse, which is to be expected. Indeed, the task is more difficult: the model is only shown the ground truth 30 minutes after making a prediction.

    The takeaway of this notebook is that the progressive_val_score method can be used to simulate a production scenario, and is thus extremely valuable.

    "},{"location":"examples/building-a-simple-nowcasting-model/","title":"Building a simple nowcasting model","text":"

    Nowcasting is a special case of forecasting. It simply consists in predicting the next value in a time series.

    We'll be using the international airline passenger data available from here. This particular dataset is included with River in the datasets module.

    from river import datasets\n\nfor x, y in datasets.AirlinePassengers():\n    print(x, y)\n    break\n
    {'month': datetime.datetime(1949, 1, 1, 0, 0)} 112\n

    The data is as simple as can be: it consists of a sequence of months and values representing the total number of international airline passengers per month. Our goal is going to be to predict the number of passengers for the next month at each step. Notice that because the dataset is small -- which is usually the case for time series -- we could just fit a model from scratch each month. However for the sake of example we're going to train a single model online. Although the overall performance might be potentially weaker, training a time series model online has the benefit of being scalable if, say, you have have thousands of time series to manage.

    We'll start with a very simple model where the only feature will be the ordinal date of each month. This should be able to capture some of the underlying trend.

    from river import compose\nfrom river import linear_model\nfrom river import preprocessing\n\n\ndef get_ordinal_date(x):\n    return {'ordinal_date': x['month'].toordinal()}\n\n\nmodel = compose.Pipeline(\n    ('ordinal_date', compose.FuncTransformer(get_ordinal_date)),\n    ('scale', preprocessing.StandardScaler()),\n    ('lin_reg', linear_model.LinearRegression())\n)\n

    We'll write down a function to evaluate the model. This will go through each observation in the dataset and update the model as it goes on. The prior predictions will be stored along with the true values and will be plotted together.

    from river import metrics\nfrom river import utils\nimport matplotlib.pyplot as plt\n\n\ndef evaluate_model(model): \n\n    metric = utils.Rolling(metrics.MAE(), 12)\n\n    dates = []\n    y_trues = []\n    y_preds = []\n\n    for x, y in datasets.AirlinePassengers():\n\n        # Obtain the prior prediction and update the model in one go\n        y_pred = model.predict_one(x)\n        model.learn_one(x, y)\n\n        # Update the error metric\n        metric.update(y, y_pred)\n\n        # Store the true value and the prediction\n        dates.append(x['month'])\n        y_trues.append(y)\n        y_preds.append(y_pred)\n\n    # Plot the results\n    fig, ax = plt.subplots(figsize=(10, 6))\n    ax.grid(alpha=0.75)\n    ax.plot(dates, y_trues, lw=3, color='#2ecc71', alpha=0.8, label='Ground truth')\n    ax.plot(dates, y_preds, lw=3, color='#e74c3c', alpha=0.8, label='Prediction')\n    ax.legend()\n    ax.set_title(metric)\n

    Let's evaluate our first model.

    evaluate_model(model)\n

    The model has captured a trend but not the right one. Indeed it thinks the trend is linear whereas we can visually see that the growth of the data increases with time. In other words the second derivative of the series is positive. This is a well know problem in time series forecasting and there are thus many ways to handle it; for example by using a Box-Cox transform. However we are going to do something a bit different, and instead linearly detrend the series using a TargetStandardScaler.

    from river import stats\n\n\nmodel = compose.Pipeline(\n    ('ordinal_date', compose.FuncTransformer(get_ordinal_date)),\n    ('scale', preprocessing.StandardScaler()),\n    ('lin_reg', linear_model.LinearRegression(intercept_lr=0)),\n)\n\nmodel = preprocessing.TargetStandardScaler(regressor=model)\n\nevaluate_model(model)\n

    Now let's try and capture the monthly trend by one-hot encoding the month name.

    import calendar\n\n\ndef get_month(x):\n    return {\n        calendar.month_name[month]: month == x['month'].month\n        for month in range(1, 13)\n    }\n\n\nmodel = compose.Pipeline(\n    ('features', compose.TransformerUnion(\n        ('ordinal_date', compose.FuncTransformer(get_ordinal_date)),\n        ('month', compose.FuncTransformer(get_month)),\n    )),\n    ('scale', preprocessing.StandardScaler()),\n    ('lin_reg', linear_model.LinearRegression(intercept_lr=0))\n)\n\nmodel = preprocessing.TargetStandardScaler(regressor=model)\n\nevaluate_model(model)\n

    This seems pretty decent. We can take a look at the weights of the linear regression to get an idea of the importance of each feature.

    model.regressor['lin_reg'].weights\n
    {'January': -0.13808091575141299,\n 'February': -0.18716063793638954,\n 'March': -0.026469206216021102,\n 'April': -0.03500685108350436,\n 'May': -0.013638742192777328,\n 'June': 0.16194267303548826,\n 'July': 0.31995865445067634,\n 'August': 0.2810396556938982,\n 'September': 0.03834350518076595,\n 'October': -0.11655850082390988,\n 'November': -0.2663497734491209,\n 'December': -0.15396048501165746,\n 'ordinal_date': 1.0234863735122575}\n

    As could be expected the months of July and August have the highest weights because these are the months where people typically go on holiday abroad. The month of December has a low weight because this is a month of festivities in most of the Western world where people usually stay at home.

    Our model seems to understand which months are important, but it fails to see that the importance of each month grows multiplicatively as the years go on. In other words our model is too shy. We can fix this by increasing the learning rate of the LinearRegression's optimizer.

    from river import optim\n\nmodel = compose.Pipeline(\n    ('features', compose.TransformerUnion(\n        ('ordinal_date', compose.FuncTransformer(get_ordinal_date)),\n        ('month', compose.FuncTransformer(get_month)),\n    )),\n    ('scale', preprocessing.StandardScaler()),\n    ('lin_reg', linear_model.LinearRegression(\n        intercept_lr=0,\n        optimizer=optim.SGD(0.03)\n    ))\n)\n\nmodel = preprocessing.TargetStandardScaler(regressor=model)\n\nevaluate_model(model)\n

    This is starting to look good! Naturally in production we would tune the learning rate, ideally in real-time.

    Before finishing, we're going to introduce a cool feature extraction trick based on radial basis function kernels. The one-hot encoding we did on the month is a good idea but if you think about it is a bit rigid. Indeed the value of each feature is going to be 0 or 1, depending on the month of each observation. We're basically saying that the month of September is as distant to the month of August as it is to the month of March. Of course this isn't true, and it would be nice if our features would reflect this. To do so we can simply calculate the distance between the month of each observation and all the months in the calendar. Instead of simply computing the distance linearly, we're going to use a so-called Gaussian radial basic function kernel. This is a bit of a mouthful but for us it boils down to a simple formula, which is:

    \\[d(i, j) = exp(-\\frac{(i - j)^2}{2\\sigma^2})\\]

    Intuitively this computes a similarity between two months -- denoted by \\(i\\) and \\(j\\) -- which decreases the further apart they are from each other. The \\(sigma\\) parameter can be seen as a hyperparameter than can be tuned -- in the following snippet we'll simply ignore it. The thing to take away is that this results in smoother predictions than when using a one-hot encoding scheme, which is often a desirable property. You can also see trick in action in this nice presentation.

    import math\n\ndef get_month_distances(x):\n    return {\n        calendar.month_name[month]: math.exp(-(x['month'].month - month) ** 2)\n        for month in range(1, 13)\n    }\n\n\nmodel = compose.Pipeline(\n    ('features', compose.TransformerUnion(\n        ('ordinal_date', compose.FuncTransformer(get_ordinal_date)),\n        ('month_distances', compose.FuncTransformer(get_month_distances)),\n    )),\n    ('scale', preprocessing.StandardScaler()),\n    ('lin_reg', linear_model.LinearRegression(\n        intercept_lr=0,\n        optimizer=optim.SGD(0.03)\n    ))\n)\n\nmodel = preprocessing.TargetStandardScaler(regressor=model)\n\nevaluate_model(model)\n

    We've managed to get a good looking prediction curve with a reasonably simple model. What's more our model has the advantage of being interpretable and easy to debug. There surely are more rocks to squeeze (e.g. tune the hyperparameters, use an ensemble model, etc.) but we'll leave that as an exercice to the reader.

    As a finishing touch we'll rewrite our pipeline using the | operator, which is called a \"pipe\".

    extract_features = compose.TransformerUnion(get_ordinal_date, get_month_distances)\n\nscale = preprocessing.StandardScaler()\n\nlearn = linear_model.LinearRegression(\n    intercept_lr=0,\n    optimizer=optim.SGD(0.03)\n)\n\nmodel = extract_features | scale | learn\nmodel = preprocessing.TargetStandardScaler(regressor=model)\n\nevaluate_model(model)\n

    model\n
    TargetStandardScaler
    TargetStandardScaler ( regressor=Pipeline ( steps=OrderedDict([('TransformerUnion', TransformerUnion ( FuncTransformer ( func=\"get_ordinal_date\" ), FuncTransformer ( func=\"get_month_distances\" ) )), ('StandardScaler', StandardScaler ( with_std=True )), ('LinearRegression', LinearRegression ( optimizer=SGD ( lr=Constant ( learning_rate=0.03 ) ) loss=Squared () l2=0. l1=0. intercept_init=0. intercept_lr=Constant ( learning_rate=0 ) clip_gradient=1e+12 initializer=Zeros () ))]) ) )
    get_ordinal_date
    def get_ordinal_date(x): return {'ordinal_date': x['month'].toordinal()}
    get_month_distances
    def get_month_distances(x): return { calendar.month_name[month]: math.exp(-(x['month'].month - month) ** 2) for month in range(1, 13) }
    StandardScaler
    StandardScaler ( with_std=True )
    LinearRegression
    LinearRegression ( optimizer=SGD ( lr=Constant ( learning_rate=0.03 ) ) loss=Squared () l2=0. l1=0. intercept_init=0. intercept_lr=Constant ( learning_rate=0 ) clip_gradient=1e+12 initializer=Zeros () ) "},{"location":"examples/content-personalization/","title":"Content personalization","text":""},{"location":"examples/content-personalization/#without-context","title":"Without context","text":"

    This example takes inspiration from Vowpal Wabbit's excellent tutorial.

    Content personalization is about taking into account user preferences. It's a special case of recommender systems. Ideally, side-information should be taken into account in addition to the user. But we'll start with something simpler. We'll assume that each user has stable preferences that are independent of the context. We capture this by implementing a \"reward\" function.

    def get_reward(user, item, context):\n\n    time_of_day = context['time_of_day']\n\n    USER_LIKED_ARTICLE = 1\n    USER_DISLIKED_ARTICLE = 0\n\n    if user == 'Tom':\n        if time_of_day == 'morning' and item == 'politics':\n            return USER_LIKED_ARTICLE\n        elif time_of_day == 'afternoon' and item == 'music':\n            return USER_LIKED_ARTICLE\n        else:\n            return USER_DISLIKED_ARTICLE\n    elif user == 'Anna':\n        if time_of_day == 'morning' and item == 'sports':\n            return USER_LIKED_ARTICLE\n        elif time_of_day == 'afternoon' and item == 'politics':\n            return USER_LIKED_ARTICLE\n        else:\n            return USER_DISLIKED_ARTICLE\n\nget_reward('Tom', 'politics', {'time_of_day': 'morning'})\n
    1\n

    Measuring the performance of a recommendation is not straightforward, mostly because of the interactive aspect of recommender systems. In a real situation, recommendations are presented to a user, and the user gives feedback indicating whether they like what they have been recommended or not. This feedback loop can't be captured entirely by a historical dataset. Some kind of simulator is required to generate recommendations and capture feedback. We already have a reward function. Now let's implement a simulation function.

    import random\nimport matplotlib.pyplot as plt\n\ndef plot_ctr(ctr):\n    plt.plot(range(1, len(ctr) + 1), ctr)\n    plt.xlabel('n_iterations', fontsize=14)\n    plt.ylabel('CTR', fontsize=14)\n    plt.ylim([0, 1])\n    plt.title(f'final CTR: {ctr[-1]:.2%}', fontsize=14)\n    plt.grid()\n\nusers = ['Tom', 'Anna']\ntimes_of_day = ['morning', 'afternoon']\nitems = {'politics', 'sports', 'music', 'food', 'finance', 'health', 'camping'}\n\ndef simulate(n, reward_func, model, seed):\n\n    rng = random.Random(seed)\n    n_clicks = 0\n    ctr = []  # click-through rate along time\n\n    for i in range(n):\n\n        # Generate a context at random\n        user = rng.choice(users)\n        context = {\n            'time_of_day': rng.choice(times_of_day)\n        }\n\n        # Make a single recommendation\n        item = model.rank(user, items=items, x=context)[0]\n\n        # Measure the reward\n        clicked = reward_func(user, item, context)\n        n_clicks += clicked\n        ctr.append(n_clicks / (i + 1))\n\n        # Update the model\n        model.learn_one(user, item, y=clicked, x=context)\n\n    plot_ctr(ctr)\n

    This simulation function does quite a few things. It can be seen as a simple reinforcement learning simulation. It samples a user, and then ask the model to provide a single recommendation. The user then gives as to whether they liked the recommendation or not. Crucially, the user doesn't tell us what item they would have liked. We could model this as a multi-class classification problem if that were the case.

    The strategy parameter determines the mechanism used to generate the recommendations. The 'best' strategy means that the items are each scored by the model, and are then ranked from the most preferred to the least preferred. Here the most preferred item is the one which gets recommended. But you could imagine all sorts of alternative ways to proceed.

    We can first evaluate a recommended which acts completely at random. It assigns a random preference to each item, regardless of the user.

    from river import reco\n\nmodel = reco.RandomNormal(seed=10)\nsimulate(5_000, get_reward, model, seed=42)\n

    We can see that the click-through rate (CTR) oscillates around 28.74%. In fact, this model is expected to be correct 100 * (2 / 7)% = 28.57% of the time. Indeed, each user likes two items, and there are seven items in total.

    Let's now use the Baseline recommended. This one models each preference as the following sum:

    \\[preference = \\bar{y} + b_{u} + b_{i}\\]

    where

    • \\(\\bar{y}\\) is the average CTR overall
    • \\(b_{u}\\) is the average CTR per user minus \\(\\bar{y}\\) -- it's therefore called a bias
    • \\(b_{i}\\) is the average CTR per item minus \\(\\bar{y}\\)

    This model is considered to be a baseline because it doesn't actually learn what items are preferred by each user. Instead it models each user and item separately. We shouldn't expect it to be a strong model. It should however do better than the random model used above.

    model = reco.Baseline(seed=10)\nsimulate(5_000, get_reward, model, seed=42)\n

    This baseline model seems perfect, which is surprising. The reason why it works so well is because both users have in common that they both like politics. The model therefore learns that the 'politics' is a good item to recommend.

    model.i_biases\n
    defaultdict(Zeros (),\n            {'politics': 0.06389451550325113,\n             'music': -0.04041254194187752,\n             'finance': -0.040319730234734,\n             'camping': -0.03581829597317823,\n             'food': -0.037778771188204816,\n             'health': -0.04029646665611086,\n             'sports': -0.03661678982763635})\n

    The model is not as performant if we use a reward function where both users have different preferences.

    simulate(\n    5_000,\n    reward_func=lambda user, item, context: (\n        item in {'music', 'politics'} if user == \"Tom\" else\n        item in {'food', 'sports'}\n    ),\n    model=model,\n    seed=42\n)\n

    A good recommender model should at the very least understand what kind of items each user prefers. One of the simplest and yet performant way to do this is Simon Funk's SGD method he developped for the Netflix challenge and wrote about here. It models each user and each item as latent vectors. The dot product of these two vectors is the expected preference of the user for the item.

    model = reco.FunkMF(seed=10)\nsimulate(5_000, get_reward, model, seed=42)\n

    We can see that this model learns what items each user enjoys very well. Of course, there are some caveats. In our simulation, we ask the model to recommend the item most likely to be preferred for each user. Indeed, we rank all the items and pick the item at the top of the list. We do this many times for only two users.

    This is of course not realistic. Users will get fed up with recommendations if they're always shown the same item. It's important to include diversity into recommendations, and to let the model explore other options instead of always focusing on the item with the highest score. This is where evaluating recommender systems gets tricky: the reward function itself is difficult to model.

    We will keep ignoring these caveats in this notebook. Instead we will focus on a different concern: making recommendations when context is involved.

    "},{"location":"examples/content-personalization/#with-context","title":"With context","text":"

    We'll add some context by making it so that user preferences change depending on the time the day. Very simply, preferences might change from morning to afternoon. This is captured by the following reward function.

    times_of_day = ['morning', 'afternoon']\n\ndef get_reward(user, item, context):\n    if user == 'Tom':\n        if context['time_of_day'] == 'morning':\n            return item == 'politics'\n        if context['time_of_day'] == 'afternoon':\n            return item == 'music'\n    if user == 'Anna':\n        if context['time_of_day'] == 'morning':\n            return item == 'sports'\n        if context['time_of_day'] == 'afternoon':\n            return item == 'politics'\n

    We have to update our simulation function to generate a random context at each step. We also want our model to use it for recommending items as well as learning.

    def simulate(n, reward_func, model, seed):\n\n    rng = random.Random(seed)\n    n_clicks = 0\n    ctr = []\n\n    for i in range(n):\n\n        user = rng.choice(users)\n\n        # New: pass a context\n        context = {'time_of_day': rng.choice(times_of_day)}\n        item = model.rank(user, items, context)[0]\n\n        clicked = reward_func(user, item, context)\n        n_clicks += clicked\n        ctr.append(n_clicks / (i + 1))\n\n        # New: pass a context\n        model.learn_one(user, item, clicked, context)\n\n    plot_ctr(ctr)\n

    Not all models are capable of taking into account context. For instance, the FunkMF model only models users and items. It completely ignores the context, even when we provide one. All recommender models inherit from the base Recommender class. They also have a property which indicates whether or not they are able to handle context:

    model = reco.FunkMF(seed=10)\nmodel.is_contextual\n
    False\n

    Let's see well it performs.

    simulate(5_000, get_reward, model, seed=42)\n

    The performance has roughly been divided by half. This is most likely because there are now two times of day, and if the model has learnt preferences for one time of the day, then it's expected to be wrong half of the time.

    Before delving into recsys models that can handle context, a simple hack is to notice that we can append the time of day to the user. This effectively results in new users which our model can distinguish between. We could apply this trick during the simulation, but we can also override the behavior of the learn_one and rank methods of our model.

    class FunkMFWithHack(reco.FunkMF):\n\n    def learn_one(self, user, item, reward, context):\n        user = f\"{user}@{context['time_of_day']}\"\n        return super().learn_one(user, item, reward, context)\n\n    def rank(self, user, items, context):\n        user = f\"{user}@{context['time_of_day']}\"\n        return super().rank(user, items, context)\n\nmodel = FunkMFWithHack(seed=29)\nsimulate(5_000, get_reward, model, seed=42)\n

    We can verify that the model has learnt the correct preferences by looking at the expected preference for each (user, item) pair.

    import pandas as pd\n\n(\n    pd.DataFrame(\n        {\n            'user': user,\n            'item': item,\n            'preference': model.predict_one(user, item)\n        }\n        for user in model.u_latents\n        for item in model.i_latents\n    )\n    .pivot(index='user', columns='item')\n    .style.highlight_max(color='lightgreen', axis='columns')\n)\n
    preference item camping finance food health music politics sports user Anna@afternoon -0.059041 -0.018105 0.069222 0.032865 0.168353 1.000000 0.195960 Anna@morning -0.136399 -0.117577 0.076300 0.081131 0.154483 0.221890 1.000000 Tom@afternoon -0.233071 0.057220 -0.074671 -0.027115 1.000000 0.163607 0.141781 Tom@morning -0.050107 -0.028562 0.061163 -0.005428 0.063483 1.000000 0.125515"},{"location":"examples/debugging-a-pipeline/","title":"Debugging a pipeline","text":"

    River encourages users to make use of pipelines. The biggest pain point of pipelines is that it can be hard to understand what's happening to the data, especially when the pipeline is complex. Fortunately the Pipeline class has a debug_one method that can help out.

    Let's look at a fairly complex pipeline for predicting the number of bikes in 5 bike stations from the city of Toulouse. It doesn't matter if you understand the pipeline or not; the point of this notebook is to learn how to introspect a pipeline.

    import datetime as dt\nfrom river import compose\nfrom river import datasets\nfrom river import feature_extraction\nfrom river import linear_model\nfrom river import metrics\nfrom river import preprocessing\nfrom river import stats\nfrom river import stream\n\n\nX_y = datasets.Bikes()\nX_y = stream.simulate_qa(X_y, moment='moment', delay=dt.timedelta(minutes=30))\n\ndef add_time_features(x):\n    return {\n        **x,\n        'hour': x['moment'].hour,\n        'day': x['moment'].weekday()\n    }\n\nmodel = add_time_features\nmodel |= (\n    compose.Select('clouds', 'humidity', 'pressure', 'temperature', 'wind') +\n    feature_extraction.TargetAgg(by=['station', 'hour'], how=stats.Mean()) +\n    feature_extraction.TargetAgg(by='station', how=stats.EWMean())\n)\nmodel |= preprocessing.StandardScaler()\nmodel |= linear_model.LinearRegression()\n\nmetric = metrics.MAE()\n\nquestions = {}\n\nfor i, x, y in X_y:\n    # Question\n    is_question = y is None\n    if is_question:\n        y_pred = model.predict_one(x)\n        questions[i] = y_pred\n\n    # Answer\n    else:\n        metric.update(y, questions[i])\n        model = model.learn_one(x, y)\n\n        if i >= 30000 and i % 30000 == 0:\n            print(i, metric)\n
    30000 MAE: 13.328051\n60000 MAE: 7.824087\n90000 MAE: 6.003909\n120000 MAE: 5.052855\n150000 MAE: 4.496826\n180000 MAE: 4.140702\n

    Let's start by looking at the pipeline. You can click each cell to display the current state for each step of the pipeline.

    model\n
    add_time_features
    def add_time_features(x): return { **x, 'hour': x['moment'].hour, 'day': x['moment'].weekday() }
    ['clouds', [...]
    Select ( clouds humidity pressure temperature wind )
    y_mean_by_station_and_hour
    TargetAgg ( by=['station', 'hour'] how=Mean () target_name=\"y\" )
    y_ewm_0.5_by_station
    TargetAgg ( by=['station'] how=EWMean ( fading_factor=0.5 ) target_name=\"y\" )
    StandardScaler
    StandardScaler ( with_std=True )
    LinearRegression
    LinearRegression ( optimizer=SGD ( lr=Constant ( learning_rate=0.01 ) ) loss=Squared () l2=0. l1=0. intercept_init=0. intercept_lr=Constant ( learning_rate=0.01 ) clip_gradient=1e+12 initializer=Zeros () )

    As mentioned above the Pipeline class has a debug_one method. You can use this at any point you want to visualize what happen to an input x. For example, let's see what happens to the last seen x.

    print(model.debug_one(x))\n
    0. Input\n--------\nclouds: 88 (int)\ndescription: overcast clouds (str)\nhumidity: 84 (int)\nmoment: 2016-10-05 09:57:18 (datetime)\npressure: 1,017.34000 (float)\nstation: pomme (str)\ntemperature: 17.45000 (float)\nwind: 1.95000 (float)\n\n1. add_time_features\n--------------------\nclouds: 88 (int)\nday: 2 (int)\ndescription: overcast clouds (str)\nhour: 9 (int)\nhumidity: 84 (int)\nmoment: 2016-10-05 09:57:18 (datetime)\npressure: 1,017.34000 (float)\nstation: pomme (str)\ntemperature: 17.45000 (float)\nwind: 1.95000 (float)\n\n2. Transformer union\n--------------------\n    2.0 Select\n    ----------\n    clouds: 88 (int)\n    humidity: 84 (int)\n    pressure: 1,017.34000 (float)\n    temperature: 17.45000 (float)\n    wind: 1.95000 (float)\n\n    2.1 TargetAgg\n    -------------\n    y_mean_by_station_and_hour: 7.89396 (float)\n\n    2.2 TargetAgg1\n    --------------\n    y_ewm_0.5_by_station: 11.80372 (float)\n\nclouds: 88 (int)\nhumidity: 84 (int)\npressure: 1,017.34000 (float)\ntemperature: 17.45000 (float)\nwind: 1.95000 (float)\ny_ewm_0.5_by_station: 11.80372 (float)\ny_mean_by_station_and_hour: 7.89396 (float)\n\n3. StandardScaler\n-----------------\nclouds: 1.54778 (float)\nhumidity: 1.16366 (float)\npressure: 0.04916 (float)\ntemperature: -0.51938 (float)\nwind: -0.69426 (float)\ny_ewm_0.5_by_station: 0.19640 (float)\ny_mean_by_station_and_hour: -0.27110 (float)\n\n4. LinearRegression\n-------------------\nName       Value      Weight     Contribution  \nIntercept   1.00000    9.19960        9.19960  \ny_ewm_0.5_by_station  0.19640    9.19349        1.80562  \nhumidity    1.16366    1.01680        1.18320  \ntemperature -0.51938   -0.41575        0.21593  \n    wind   -0.69426   -0.03810        0.02645  \npressure    0.04916    0.18321        0.00901  \ny_mean_by_station_and_hour -0.27110    0.19553       -0.05301  \n  clouds    1.54778   -0.32838       -0.50827\n\nPrediction: 11.87854\n

    The pipeline does quite a few things, but using debug_one shows what happens step by step. This is really useful for checking that the pipeline is behaving as you're expecting it too. Remember that you can debug_one whenever you wish, be it before, during, or after training a model.

    "},{"location":"examples/imbalanced-learning/","title":"Working with imbalanced data","text":"

    In machine learning it is quite usual to have to deal with imbalanced dataset. This is particularly true in online learning for tasks such as fraud detection and spam classification. In these two cases, which are binary classification problems, there are usually many more 0s than 1s, which generally hinders the performance of the classifiers we thrown at them.

    As an example we'll use the credit card dataset available in River. We'll first use a collections.Counter to count the number of 0s and 1s in order to get an idea of the class balance.

    import collections\nfrom river import datasets\n\nX_y = datasets.CreditCard()\n\ncounts = collections.Counter(y for _, y in X_y)\n\nfor c, count in counts.items():\n    print(f'{c}: {count} ({count / sum(counts.values()):.5%})')\n
    0: 284315 (99.82725%)\n1: 492 (0.17275%)\n
    "},{"location":"examples/imbalanced-learning/#baseline","title":"Baseline","text":"

    The dataset is quite unbalanced. For each 1 there are about 578 0s. Let's now train a logistic regression with default parameters and see how well it does. We'll measure the ROC AUC score.

    from river import linear_model\nfrom river import metrics\nfrom river import evaluate\nfrom river import preprocessing\n\n\nX_y = datasets.CreditCard()\n\nmodel = (\n    preprocessing.StandardScaler() |\n    linear_model.LogisticRegression()\n)\n\nmetric = metrics.ROCAUC()\n\nevaluate.progressive_val_score(X_y, model, metric)\n
    ROCAUC: 89.11%\n
    "},{"location":"examples/imbalanced-learning/#importance-weighting","title":"Importance weighting","text":"

    The performance is already quite acceptable, but as we will now see we can do even better. The first thing we can do is to add weight to the 1s by using the weight_pos argument of the Log loss function.

    from river import optim\n\nmodel = (\n    preprocessing.StandardScaler() |\n    linear_model.LogisticRegression(\n        loss=optim.losses.Log(weight_pos=5)\n    )\n)\n\nmetric = metrics.ROCAUC()\n\nevaluate.progressive_val_score(X_y, model, metric)\n
    ROCAUC: 91.43%\n
    "},{"location":"examples/imbalanced-learning/#focal-loss","title":"Focal loss","text":"

    The deep learning for object detection community has produced a special loss function for imbalanced learning called focal loss. We are doing binary classification, so we can plug the binary version of focal loss into our logistic regression and see how well it fairs.

    model = (\n    preprocessing.StandardScaler() |\n    linear_model.LogisticRegression(loss=optim.losses.BinaryFocalLoss(2, 1))\n)\n\nmetric = metrics.ROCAUC()\n\nevaluate.progressive_val_score(X_y, model, metric)\n
    ROCAUC: 91.31%\n
    "},{"location":"examples/imbalanced-learning/#under-sampling-the-majority-class","title":"Under-sampling the majority class","text":"

    Adding importance weights only works with gradient-based models (which includes neural networks). A more generic, and potentially more effective approach, is to use undersamplig and oversampling. As an example, we'll under-sample the stream so that our logistic regression encounter 20% of 1s and 80% of 0s. Under-sampling has the additional benefit of requiring less training steps, and thus reduces the total training time.

    from river import imblearn\n\nmodel = (\n    preprocessing.StandardScaler() |\n    imblearn.RandomUnderSampler(\n        classifier=linear_model.LogisticRegression(),\n        desired_dist={0: .8, 1: .2},\n        seed=42\n    )\n)\n\nmetric = metrics.ROCAUC()\n\nevaluate.progressive_val_score(X_y, model, metric)\n
    ROCAUC: 94.75%\n

    The RandomUnderSampler class is a wrapper for classifiers. This is represented by a rectangle around the logistic regression bubble when we visualize the model.

    model\n
    StandardScaler
    StandardScaler ( with_std=True )
    RandomUnderSampler
    RandomUnderSampler ( classifier=LogisticRegression ( optimizer=SGD ( lr=Constant ( learning_rate=0.01 ) ) loss=Log ( weight_pos=1. weight_neg=1. ) l2=0. l1=0. intercept_init=0. intercept_lr=Constant ( learning_rate=0.01 ) clip_gradient=1e+12 initializer=Zeros () ) desired_dist={0: 0.8, 1: 0.2} seed=42 )
    LogisticRegression
    LogisticRegression ( optimizer=SGD ( lr=Constant ( learning_rate=0.01 ) ) loss=Log ( weight_pos=1. weight_neg=1. ) l2=0. l1=0. intercept_init=0. intercept_lr=Constant ( learning_rate=0.01 ) clip_gradient=1e+12 initializer=Zeros () ) "},{"location":"examples/imbalanced-learning/#over-sampling-the-minority-class","title":"Over-sampling the minority class","text":"

    We can also attain the same class distribution by over-sampling the minority class. This will come at cost of having to train with more samples.

    model = (\n    preprocessing.StandardScaler() |\n    imblearn.RandomOverSampler(\n        classifier=linear_model.LogisticRegression(),\n        desired_dist={0: .8, 1: .2},\n        seed=42\n    )\n)\n\nmetric = metrics.ROCAUC()\n\nevaluate.progressive_val_score(X_y, model, metric)\n
    ROCAUC: 91.71%\n
    "},{"location":"examples/imbalanced-learning/#sampling-with-a-desired-sample-size","title":"Sampling with a desired sample size","text":"

    The downside of both RandomUnderSampler and RandomOverSampler is that you don't have any control on the amount of data the classifier trains on. The number of samples is adjusted so that the target distribution can be attained, either by under-sampling or over-sampling. However, you can do both at the same time and choose how much data the classifier will see. To do so, we can use the RandomSampler class. In addition to the desired class distribution, we can specify how much data to train on. The samples will both be under-sampled and over-sampled in order to fit your constraints. This is powerful because it allows you to control both the class distribution and the size of the training data (and thus the training time). In the following example we'll set it so that the model will train with 1 percent of the data.

    model = (\n    preprocessing.StandardScaler() |\n    imblearn.RandomSampler(\n        classifier=linear_model.LogisticRegression(),\n        desired_dist={0: .8, 1: .2},\n        sampling_rate=.01,\n        seed=42\n    )\n)\n\nmetric = metrics.ROCAUC()\n\nevaluate.progressive_val_score(X_y, model, metric)\n
    ROCAUC: 94.71%\n
    "},{"location":"examples/imbalanced-learning/#hybrid-approach","title":"Hybrid approach","text":"

    As you might have guessed by now, nothing is stopping you from mixing imbalanced learning methods together. As an example, let's combine sampling.RandomUnderSampler and the weight_pos parameter from the optim.losses.Log loss function.

    model = (\n    preprocessing.StandardScaler() |\n    imblearn.RandomUnderSampler(\n        classifier=linear_model.LogisticRegression(\n            loss=optim.losses.Log(weight_pos=5)\n        ),\n        desired_dist={0: .8, 1: .2},\n        seed=42\n    )\n)\n\nmetric = metrics.ROCAUC()\n\nevaluate.progressive_val_score(X_y, model, metric)\n
    ROCAUC: 96.52%\n
    "},{"location":"examples/quantile-regression-uncertainty/","title":"Handling uncertainty with quantile regression","text":"
    %matplotlib inline\n

    Quantile regression is useful when you're not so much interested in the accuracy of your model, but rather you want your model to be good at ranking observations correctly. The typical way to perform quantile regression is to use a special loss function, namely the quantile loss. The quantile loss takes a parameter, \\(\\alpha\\) (alpha), which indicates which quantile the model should be targeting. In the case of \\(\\alpha = 0.5\\), then this is equivalent to asking the model to predict the median value of the target, and not the most likely value which would be the mean.

    A nice thing we can do with quantile regression is to produce a prediction interval for each prediction. Indeed, if we predict the lower and upper quantiles of the target then we will be able to obtain a \"trust region\" in between which the true value is likely to belong. Of course, the likeliness will depend on the chosen quantiles. For a slightly more detailed explanation see this blog post.

    As an example, let us take the simple nowcasting model we built in another notebook. Instead of predicting the mean value of the target distribution, we will predict the 5th, 50th, 95th quantiles. This will require training three separate models, so we will encapsulate the model building logic in a function called make_model. We also have to slightly adapt the training loop, but not by much. Finally, we will draw the prediction interval along with the predictions from for 50th quantile (i.e. the median) and the true values.

    import calendar\nimport math\nimport matplotlib.pyplot as plt\nfrom river import compose\nfrom river import datasets\nfrom river import linear_model\nfrom river import metrics\nfrom river import optim\nfrom river import preprocessing\nfrom river import stats\n\n\ndef get_ordinal_date(x):\n    return {'ordinal_date': x['month'].toordinal()}    \n\n\ndef get_month_distances(x):\n    return {\n        calendar.month_name[month]: math.exp(-(x['month'].month - month) ** 2)\n        for month in range(1, 13)\n    }\n\n\ndef make_model(alpha):\n\n    extract_features = compose.TransformerUnion(get_ordinal_date, get_month_distances)\n\n    scale = preprocessing.StandardScaler()\n\n    learn = linear_model.LinearRegression(\n        intercept_lr=0,\n        optimizer=optim.SGD(0.03),\n        loss=optim.losses.Quantile(alpha=alpha)\n    )\n\n    model = extract_features | scale | learn\n    model = preprocessing.TargetStandardScaler(regressor=model)\n\n    return model\n\nmetric = metrics.MAE()\n\nmodels = {\n    'lower': make_model(alpha=0.05),\n    'center': make_model(alpha=0.5),\n    'upper': make_model(alpha=0.95)\n}\n\ndates = []\ny_trues = []\ny_preds = {\n    'lower': [],\n    'center': [],\n    'upper': []\n}\n\nfor x, y in datasets.AirlinePassengers():\n    y_trues.append(y)\n    dates.append(x['month'])\n\n    for name, model in models.items():\n        y_preds[name].append(model.predict_one(x))\n        model.learn_one(x, y)\n\n    # Update the error metric\n    metric.update(y, y_preds['center'][-1])\n\n# Plot the results\nfig, ax = plt.subplots(figsize=(10, 6))\nax.grid(alpha=0.75)\nax.plot(dates, y_trues, lw=3, color='#2ecc71', alpha=0.8, label='Truth')\nax.plot(dates, y_preds['center'], lw=3, color='#e74c3c', alpha=0.8, label='Prediction')\nax.fill_between(dates, y_preds['lower'], y_preds['upper'], color='#e74c3c', alpha=0.3, label='Prediction interval')\nax.legend()\nax.set_title(metric);\n

    An important thing to note is that the prediction interval we obtained should not be confused with a confidence interval. Simply put, a prediction interval represents uncertainty for where the true value lies, whereas a confidence interval encapsulates the uncertainty on the prediction. You can find out more by reading this CrossValidated post.

    "},{"location":"examples/sentence-classification/","title":"Sentence classification","text":"

    In this tutorial we will try to predict whether an SMS is a spam or not. To train our model, we will use the SMSSpam dataset. This dataset is unbalanced, there is only 13.4% spam. Let's look at the data:

    from river import datasets\n\ndatasets.SMSSpam()\n
    SMS Spam Collection dataset.\n\nThe data contains 5,574 items and 1 feature (i.e. SMS body). Spam messages represent\n13.4% of the dataset. The goal is to predict whether an SMS is a spam or not.\n\n      Name  SMSSpam                                                                              \n      Task  Binary classification                                                                \n   Samples  5,574                                                                                \n  Features  1                                                                                    \n    Sparse  False                                                                                \n      Path  /Users/max/river_data/SMSSpam/SMSSpamCollection                                      \n       URL  https://archive.ics.uci.edu/ml/machine-learning-databases/00228/smsspamcollection.zip\n      Size  466.71 KB                                                                            \nDownloaded  True\n
    from pprint import pprint\n\nX_y = datasets.SMSSpam()\n\nfor x, y in X_y:\n    pprint(x)\n    print(f'Spam: {y}')\n    break\n
    {'body': 'Go until jurong point, crazy.. Available only in bugis n great world '\n         'la e buffet... Cine there got amore wat...\\n'}\nSpam: False\n

    Let's start by building a simple model like a Naive Bayes classifier. We will first preprocess the sentences with a TF-IDF transform that our model can consume. Then, we will measure the accuracy of our model with the AUC metric. This is the right metric to use when the classes are not balanced. In addition, the Naive Bayes models can perform very well on unbalanced datasets and can be used for both binary and multi-class classification problems.

    from river import feature_extraction\nfrom river import naive_bayes\nfrom river import metrics\n\nX_y = datasets.SMSSpam()\n\nmodel = (\n    feature_extraction.TFIDF(on='body') | \n    naive_bayes.BernoulliNB(alpha=0)\n)\n\nmetric = metrics.ROCAUC()\ncm = metrics.ConfusionMatrix()\n\nfor x, y in X_y:\n\n    y_pred = model.predict_one(x)\n\n    if y_pred is not None:\n        metric.update(y_pred=y_pred, y_true=y)\n        cm.update(y_pred=y_pred, y_true=y)\n\n    model.learn_one(x, y)\n\nmetric\n
    ROCAUC: 93.00%\n

    The confusion matrix:

    cm\n
         False   True  \nFalse 4,809     17  \nTrue   102    645\n

    The results are quite good with this first model.

    Since we are working with an imbalanced dataset, we can use the imblearn module to rebalance the classes of our dataset. For more information about the imblearn module, you can find a dedicated tutorial here.

    from river import imblearn\n\nX_y = datasets.SMSSpam()\n\nmodel = (\n    feature_extraction.TFIDF(on='body') | \n    imblearn.RandomUnderSampler(\n        classifier=naive_bayes.BernoulliNB(alpha=0),\n        desired_dist={0: .5, 1: .5},\n        seed=42\n    )\n)\n\nmetric = metrics.ROCAUC()\ncm = metrics.ConfusionMatrix()\n\nfor x, y in X_y:\n\n    y_pred = model.predict_one(x)\n\n    if y_pred is not None:\n        metric.update(y_pred=y_pred, y_true=y)\n        cm.update(y_pred=y_pred, y_true=y)\n\n    model.learn_one(x, y)\n\nmetric\n
    ROCAUC: 94.61%\n

    The imblearn module improved our results. Not bad! We can visualize the pipeline to understand how the data is processed.

    The confusion matrix:

    cm\n
         False   True  \nFalse 4,570    255  \nTrue    41    706\n
    model\n
    TFIDF
    TFIDF ( normalize=True on=\"body\" strip_accents=True lowercase=True preprocessor=None tokenizer=None ngram_range=(1, 1) )
    RandomUnderSampler
    RandomUnderSampler ( classifier=BernoulliNB ( alpha=0 true_threshold=0. ) desired_dist={0: 0.5, 1: 0.5} seed=42 )
    BernoulliNB
    BernoulliNB ( alpha=0 true_threshold=0. )

    Now let's try to use logistic regression to classify messages. We will use different tips to make my model perform better. As in the previous example, we rebalance the classes of our dataset. The logistics regression will be fed from a TF-IDF.

    from river import linear_model\nfrom river import optim\nfrom river import preprocessing\n\nX_y = datasets.SMSSpam()\n\nmodel = (\n    feature_extraction.TFIDF(on='body') | \n    preprocessing.Normalizer() | \n    imblearn.RandomUnderSampler(\n        classifier=linear_model.LogisticRegression(\n            optimizer=optim.SGD(.9), \n            loss=optim.losses.Log()\n        ),\n        desired_dist={0: .5, 1: .5},\n        seed=42\n    )\n)\n\nmetric = metrics.ROCAUC()\ncm = metrics.ConfusionMatrix()\n\nfor x, y in X_y:\n\n    y_pred = model.predict_one(x)\n\n    metric.update(y_pred=y_pred, y_true=y)\n    cm.update(y_pred=y_pred, y_true=y)\n\n    model.learn_one(x, y)\n\nmetric\n
    ROCAUC: 93.80%\n

    The confusion matrix:

    cm\n
         False   True  \nFalse 4,584    243  \nTrue    55    692\n
    model\n
    TFIDF
    TFIDF ( normalize=True on=\"body\" strip_accents=True lowercase=True preprocessor=None tokenizer=None ngram_range=(1, 1) )
    Normalizer
    Normalizer ( order=2 )
    RandomUnderSampler
    RandomUnderSampler ( classifier=LogisticRegression ( optimizer=SGD ( lr=Constant ( learning_rate=0.9 ) ) loss=Log ( weight_pos=1. weight_neg=1. ) l2=0. l1=0. intercept_init=0. intercept_lr=Constant ( learning_rate=0.01 ) clip_gradient=1e+12 initializer=Zeros () ) desired_dist={0: 0.5, 1: 0.5} seed=42 )
    LogisticRegression
    LogisticRegression ( optimizer=SGD ( lr=Constant ( learning_rate=0.9 ) ) loss=Log ( weight_pos=1. weight_neg=1. ) l2=0. l1=0. intercept_init=0. intercept_lr=Constant ( learning_rate=0.01 ) clip_gradient=1e+12 initializer=Zeros () )

    The results of the logistic regression are quite good but still inferior to the naive Bayes model.

    Let's try to use word embeddings to improve our logistic regression. Word embeddings allow you to represent a word as a vector. Embeddings are developed to build semantically rich vectors. For instance, the vector which represents the word python should be close to the vector which represents the word programming. We will use spaCy to convert our sentence to vectors. spaCy converts a sentence to a vector by calculating the average of the embeddings of the words in the sentence.

    You can download pre-trained embeddings in many languages. We will use English pre-trained embeddings as our SMS are in English.

    The command below allows you to download the pre-trained embeddings that spaCy makes available. More informations about spaCy and its installation may be found here here.

    python -m spacy download en_core_web_sm\n

    Here, we create a custom transformer to convert an input sentence to a dict of floats. We will integrate this transformer into our pipeline.

    import spacy\n\nfrom river.base import Transformer\n\nclass Embeddings(Transformer):\n\"\"\"My custom transformer, word embedding using spaCy.\"\"\"\n\n    def __init__(self, on: str):\n        self.on = on\n        self.embeddings = spacy.load('en_core_web_sm')\n\n    def transform_one(self, x, y=None):\n        return {dimension: xi for dimension, xi in enumerate(self.embeddings(x[self.on]).vector)}\n

    Let's train our logistic regression:

    X_y = datasets.SMSSpam()\n\nmodel = (\n    Embeddings(on='body') | \n    preprocessing.Normalizer() |\n    imblearn.RandomOverSampler(\n        classifier=linear_model.LogisticRegression(\n            optimizer=optim.SGD(.5), \n            loss=optim.losses.Log()\n        ),\n        desired_dist={0: .5, 1: .5},\n        seed=42\n    )\n)\n\nmetric = metrics.ROCAUC()\ncm = metrics.ConfusionMatrix()\n\nfor x, y in X_y:\n\n    y_pred = model.predict_one(x)\n\n    metric.update(y_pred=y_pred, y_true=y)\n    cm.update(y_pred=y_pred, y_true=y)\n\n    model.learn_one(x, y)\n\nmetric\n
    ROCAUC: 91.31%\n

    The confusion matrix:

    cm\n
         False   True  \nFalse 4,537    290  \nTrue    85    662\n
    model\n
    Embeddings
    Embeddings ( on=\"body\" )
    Normalizer
    Normalizer ( order=2 )
    RandomOverSampler
    RandomOverSampler ( classifier=LogisticRegression ( optimizer=SGD ( lr=Constant ( learning_rate=0.5 ) ) loss=Log ( weight_pos=1. weight_neg=1. ) l2=0. l1=0. intercept_init=0. intercept_lr=Constant ( learning_rate=0.01 ) clip_gradient=1e+12 initializer=Zeros () ) desired_dist={0: 0.5, 1: 0.5} seed=42 )
    LogisticRegression
    LogisticRegression ( optimizer=SGD ( lr=Constant ( learning_rate=0.5 ) ) loss=Log ( weight_pos=1. weight_neg=1. ) l2=0. l1=0. intercept_init=0. intercept_lr=Constant ( learning_rate=0.01 ) clip_gradient=1e+12 initializer=Zeros () )

    The results of the logistic regression using spaCy embeddings are lower than those obtained with TF-IDF values. We could surely improve the results by cleaning up the text. We could also use embeddings more suited to our dataset. However, on this problem, the logistic regression is not better than the Naive Bayes model. No free lunch today.

    "},{"location":"examples/the-art-of-using-pipelines/","title":"The art of using pipelines","text":"

    Pipelines are a natural way to think about a machine learning system. Indeed with some practice a data scientist can visualise data \"flowing\" through a series of steps. The input is typically some raw data which has to be processed in some manner. The goal is to represent the data in such a way that is can be ingested by a machine learning algorithm. Along the way some steps will extract features, while others will normalize the data and remove undesirable elements. Pipelines are simple, and yet they are a powerful way of designing sophisticated machine learning systems.

    Both scikit-learn and pandas make it possible to use pipelines. However it's quite rare to see pipelines being used in practice (at least on Kaggle). Sometimes you get to see people using scikit-learn's pipeline module, however the pipe method from pandas is sadly underappreciated. A big reason why pipelines are not given much love is that it's easier to think of batch learning in terms of a script or a notebook. Indeed many people doing data science seem to prefer a procedural style to a declarative style. Moreover in practice pipelines can be a bit rigid if one wishes to do non-orthodox operations.

    Although pipelines may be a bit of an odd fit for batch learning, they make complete sense when they are used for online learning. Indeed the UNIX philosophy has advocated the use of pipelines for data processing for many decades. If you can visualise data as a stream of observations then using pipelines should make a lot of sense to you. We'll attempt to convince you by writing a machine learning algorithm in a procedural way and then converting it to a declarative pipeline in small steps. Hopefully by the end you'll be convinced, or not!

    In this notebook we'll manipulate data from the Kaggle Recruit Restaurants Visitor Forecasting competition. The data is directly available through River's datasets module.

    from pprint import pprint\nfrom river import datasets\n\nfor x, y in datasets.Restaurants():\n    pprint(x)\n    pprint(y)\n    break\n
    Downloading https://maxhalford.github.io/files/datasets/kaggle_recruit_restaurants.zip (4.28 MB)\nUncompressing into /Users/max/river_data/Restaurants\n{'area_name': 'T\u014dky\u014d-to Nerima-ku Toyotamakita',\n 'date': datetime.datetime(2016, 1, 1, 0, 0),\n 'genre_name': 'Izakaya',\n 'is_holiday': True,\n 'latitude': 35.7356234,\n 'longitude': 139.6516577,\n 'store_id': 'air_04341b588bde96cd'}\n10\n

    We'll start by building and running a model using a procedural coding style. The performance of the model doesn't matter, we're simply interested in the design of the model.

    from river import feature_extraction\nfrom river import linear_model\nfrom river import metrics\nfrom river import preprocessing\nfrom river import stats\nfrom river import utils\n\nmeans = (\n    feature_extraction.TargetAgg(by='store_id', how=utils.Rolling(stats.Mean(), 7)),\n    feature_extraction.TargetAgg(by='store_id', how=utils.Rolling(stats.Mean(), 14)),\n    feature_extraction.TargetAgg(by='store_id', how=utils.Rolling(stats.Mean(), 21))\n)\n\nscaler = preprocessing.StandardScaler()\nlin_reg = linear_model.LinearRegression()\nmetric = metrics.MAE()\n\nfor x, y in datasets.Restaurants():\n\n    # Derive date features\n    x['weekday'] = x['date'].weekday()\n    x['is_weekend'] = x['date'].weekday() in (5, 6)\n\n    # Process the rolling means of the target  \n    for mean in means:\n        x = {**x, **mean.transform_one(x)}\n        mean.learn_one(x, y)\n\n    # Remove the key/value pairs that aren't features\n    for key in ['store_id', 'date', 'genre_name', 'area_name', 'latitude', 'longitude']:\n        x.pop(key)\n\n    # Rescale the data\n    x = scaler.learn_one(x).transform_one(x)\n\n    # Fit the linear regression\n    y_pred = lin_reg.predict_one(x)\n    lin_reg.learn_one(x, y)\n\n    # Update the metric using the out-of-fold prediction\n    metric.update(y, y_pred)\n\nprint(metric)\n
    MAE: 8.316538\n

    We're not using many features. We can print the last x to get an idea of the features (don't forget they've been scaled!)

    pprint(x)\n
    {'is_holiday': -0.23103573677646685,\n 'is_weekend': 1.6249280076334165,\n 'weekday': 1.0292832579142892,\n 'y_mean_by_store_id': -1.3980979075298516}\n

    The above chunk of code is quite explicit but it's a bit verbose. The whole point of libraries such as River is to make life easier for users. Moreover there's too much space for users to mess up the order in which things are done, which increases the chance of there being target leakage. We'll now rewrite our model in a declarative fashion using a pipeline \u00e0 la sklearn.

    from river import compose\n\n\ndef get_date_features(x):\n    weekday =  x['date'].weekday()\n    return {'weekday': weekday, 'is_weekend': weekday in (5, 6)}\n\n\nmodel = compose.Pipeline(\n    ('features', compose.TransformerUnion(\n        ('date_features', compose.FuncTransformer(get_date_features)),\n        ('last_7_mean', feature_extraction.TargetAgg(by='store_id', how=utils.Rolling(stats.Mean(), 7))),\n        ('last_14_mean', feature_extraction.TargetAgg(by='store_id', how=utils.Rolling(stats.Mean(), 14))),\n        ('last_21_mean', feature_extraction.TargetAgg(by='store_id', how=utils.Rolling(stats.Mean(), 21)))\n    )),\n    ('drop_non_features', compose.Discard('store_id', 'date', 'genre_name', 'area_name', 'latitude', 'longitude')),\n    ('scale', preprocessing.StandardScaler()),\n    ('lin_reg', linear_model.LinearRegression())\n)\n\nmetric = metrics.MAE()\n\nfor x, y in datasets.Restaurants():\n\n    # Make a prediction without using the target\n    y_pred = model.predict_one(x)\n\n    # Update the model using the target\n    model.learn_one(x, y)\n\n    # Update the metric using the out-of-fold prediction\n    metric.update(y, y_pred)\n\nprint(metric)\n
    MAE: 8.413859\n

    We use a Pipeline to arrange each step in a sequential order. A TransformerUnion is used to merge multiple feature extractors into a single transformer. The for loop is now much shorter and is thus easier to grok: we get the out-of-fold prediction, we fit the model, and finally we update the metric. This way of evaluating a model is typical of online learning, and so we put it wrapped it inside a function called progressive_val_score part of the evaluate module. We can use it to replace the for loop.

    from river import evaluate\n\nmodel = compose.Pipeline(\n    ('features', compose.TransformerUnion(\n        ('date_features', compose.FuncTransformer(get_date_features)),\n        ('last_7_mean', feature_extraction.TargetAgg(by='store_id', how=utils.Rolling(stats.Mean(), 7))),\n        ('last_14_mean', feature_extraction.TargetAgg(by='store_id', how=utils.Rolling(stats.Mean(), 14))),\n        ('last_21_mean', feature_extraction.TargetAgg(by='store_id', how=utils.Rolling(stats.Mean(), 21)))\n    )),\n    ('drop_non_features', compose.Discard('store_id', 'date', 'genre_name', 'area_name', 'latitude', 'longitude')),\n    ('scale', preprocessing.StandardScaler()),\n    ('lin_reg', linear_model.LinearRegression())\n)\n\nevaluate.progressive_val_score(dataset=datasets.Restaurants(), model=model, metric=metrics.MAE())\n
    MAE: 8.413859\n

    Notice that you couldn't have used the progressive_val_score method if you wrote the model in a procedural manner.

    Our code is getting shorter, but it's still a bit difficult on the eyes. Indeed there is a lot of boilerplate code associated with pipelines that can get tedious to write. However River has some special tricks up it's sleeve to save you from a lot of pain.

    The first trick is that the name of each step in the pipeline can be omitted. If no name is given for a step then River automatically infers one.

    model = compose.Pipeline(\n    compose.TransformerUnion(\n        compose.FuncTransformer(get_date_features),\n        feature_extraction.TargetAgg(by='store_id', how=utils.Rolling(stats.Mean(), 7)),\n        feature_extraction.TargetAgg(by='store_id', how=utils.Rolling(stats.Mean(), 14)),\n        feature_extraction.TargetAgg(by='store_id', how=utils.Rolling(stats.Mean(), 21))\n    ),\n    compose.Discard('store_id', 'date', 'genre_name', 'area_name', 'latitude', 'longitude'),\n    preprocessing.StandardScaler(),\n    linear_model.LinearRegression()\n)\n\nevaluate.progressive_val_score(datasets.Restaurants(), model, metrics.MAE())\n
    MAE: 8.413859\n

    Under the hood a Pipeline inherits from collections.OrderedDict. Indeed this makes sense because if you think about it a Pipeline is simply a sequence of steps where each step has a name. The reason we mention this is because it means you can manipulate a Pipeline the same way you would manipulate an ordinary dict. For instance we can print the name of each step by using the keys method.

    for name in model.steps:\n    print(name)\n
    TransformerUnion\nDiscard\nStandardScaler\nLinearRegression\n

    The first step is a FeatureUnion and it's string representation contains the string representation of each of it's elements. Not having to write names saves up some time and space and is certainly less tedious.

    The next trick is that we can use mathematical operators to compose our pipeline. For example we can use the + operator to merge Transformers into a TransformerUnion.

    model = compose.Pipeline(\n    compose.FuncTransformer(get_date_features) + \\\n    feature_extraction.TargetAgg(by='store_id', how=utils.Rolling(stats.Mean(), 7)) + \\\n    feature_extraction.TargetAgg(by='store_id', how=utils.Rolling(stats.Mean(), 14)) + \\\n    feature_extraction.TargetAgg(by='store_id', how=utils.Rolling(stats.Mean(), 21)),\n\n    compose.Discard('store_id', 'date', 'genre_name', 'area_name', 'latitude', 'longitude'),\n    preprocessing.StandardScaler(),\n    linear_model.LinearRegression()\n)\n\nevaluate.progressive_val_score(datasets.Restaurants(), model, metrics.MAE())\n
    MAE: 8.413859\n

    Likewhise we can use the | operator to assemble steps into a Pipeline.

    model = (\n    compose.FuncTransformer(get_date_features) +\n    feature_extraction.TargetAgg(by='store_id', how=utils.Rolling(stats.Mean(), 7)) +\n    feature_extraction.TargetAgg(by='store_id', how=utils.Rolling(stats.Mean(), 14)) +\n    feature_extraction.TargetAgg(by='store_id', how=utils.Rolling(stats.Mean(), 21))\n)\n\nto_discard = ['store_id', 'date', 'genre_name', 'area_name', 'latitude', 'longitude']\n\nmodel = model | compose.Discard(*to_discard) | preprocessing.StandardScaler()\n\nmodel |= linear_model.LinearRegression()\n\nevaluate.progressive_val_score(datasets.Restaurants(), model, metrics.MAE())\n
    MAE: 8.413859\n

    Hopefully you'll agree that this is a powerful way to express machine learning pipelines. For some people this should be quite remeniscent of the UNIX pipe operator. One final trick we want to mention is that functions are automatically wrapped with a FuncTransformer, which can be quite handy.

    model = get_date_features\n\nfor n in [7, 14, 21]:\n    model += feature_extraction.TargetAgg(by='store_id', how=utils.Rolling(stats.Mean(), n))\n\nmodel |= compose.Discard(*to_discard)\nmodel |= preprocessing.StandardScaler()\nmodel |= linear_model.LinearRegression()\n\nevaluate.progressive_val_score(datasets.Restaurants(), model, metrics.MAE())\n
    MAE: 8.413859\n

    Naturally some may prefer the procedural style we first used because they find it easier to work with. It all depends on your style and you should use what you feel comfortable with. However we encourage you to use operators because we believe that this will increase the readability of your code, which is very important. To each their own!

    Before finishing we can take an interactive look at our pipeline.

    model\n
    get_date_features
    def get_date_features(x): weekday = x['date'].weekday() return {'weekday': weekday, 'is_weekend': weekday in (5, 6)}
    y_mean_by_store_id
    TargetAgg ( by=['store_id'] how=Rolling ( obj=Mean () window_size=7 ) target_name=\"y\" )
    y_mean_by_store_id
    TargetAgg ( by=['store_id'] how=Rolling ( obj=Mean () window_size=14 ) target_name=\"y\" )
    y_mean_by_store_id
    TargetAgg ( by=['store_id'] how=Rolling ( obj=Mean () window_size=21 ) target_name=\"y\" )
    ~['area_name', [...]
    Discard ( area_name date genre_name latitude longitude store_id )
    StandardScaler
    StandardScaler ( with_std=True )
    LinearRegression
    LinearRegression ( optimizer=SGD ( lr=Constant ( learning_rate=0.01 ) ) loss=Squared () l2=0. l1=0. intercept_init=0. intercept_lr=Constant ( learning_rate=0.01 ) clip_gradient=1e+12 initializer=Zeros () ) "},{"location":"examples/matrix-factorization-for-recommender-systems/part-1/","title":"Part 1","text":"

    Table of contents of this tutorial series on matrix factorization for recommender systems:

    • Part 1 - Traditional Matrix Factorization methods for Recommender Systems
    • Part 2 - Factorization Machines and Field-aware Factorization Machines
    • Part 3 - Large scale learning and better predictive power with multiple pass learning
    "},{"location":"examples/matrix-factorization-for-recommender-systems/part-1/#introduction","title":"Introduction","text":"

    A recommender system is a software tool designed to generate and suggest items or entities to the users. Popular large scale examples include:

    • Amazon (suggesting products)
    • Facebook (suggesting posts in users' news feeds)
    • Spotify (suggesting music)

    Social recommendation from graph (mostly used by social networks) are not covered in River. We focus on the general case, item recommendation. This problem can be represented with the user-item matrix:

    \\[ \\normalsize \\begin{matrix} & \\begin{matrix} _1 & _\\cdots & _\\cdots & _\\cdots & _I \\end{matrix} \\\\ \\begin{matrix} _1 \\\\ _\\vdots \\\\ _\\vdots \\\\ _\\vdots \\\\ _U \\end{matrix} & \\begin{bmatrix} {\\color{Red} ?} & 2 & \\cdots & {\\color{Red} ?} & {\\color{Red} ?} \\\\ {\\color{Red} ?} & {\\color{Red} ?} & \\cdots & {\\color{Red} ?} & 4.5 \\\\ \\vdots & \\ddots & \\ddots & \\ddots & \\vdots \\\\ 3 & {\\color{Red} ?} & \\cdots & {\\color{Red} ?} & {\\color{Red} ?} \\\\ {\\color{Red} ?} & {\\color{Red} ?} & \\cdots & 5 & {\\color{Red} ?} \\end{bmatrix} \\end{matrix} \\]

    Where \\(U\\) and \\(I\\) are the number of user and item of the system, respectively. A matrix entry represents a user's preference for an item, it can be a rating, a like or dislike, etc. Because of the huge number of users and items compared to the number of observed entries, those matrices are very sparsed (usually less than 1% filled).

    Matrix Factorization (MF) is a class of collaborative filtering algorithms derived from Singular Value Decomposition (SVD). MF strength lies in its capacity to able to model high cardinality categorical variables interactions. This subfield boomed during the famous Netflix Prize contest in 2006, when numerous novel variants has been invented and became popular thanks to their attractive accuracy and scalability.

    MF approach seeks to fill the user-item matrix considering the problem as a matrix completion one. MF core idea assume a latent model learning its own representation of the users and the items in a lower latent dimensional space by factorizing the observed parts of the matrix.

    A factorized user or item is represented as a vector \\(\\mathbf{v}_u\\) or \\(\\mathbf{v}_i\\) composed of \\(k\\) latent factors, with \\(k << U, I\\). Those learnt latent variables represent, for an item the various aspects describing it, and for a user its interests in terms of those aspects. The model then assume a user's choice or fondness is composed of a sum of preferences about the various aspects of the concerned item. This sum being the dot product between the latent vectors of a given user-item pair:

    \\[ \\normalsize \\langle \\mathbf{v}_u, \\mathbf{v}_i \\rangle = \\sum_{f=1}^{k} \\mathbf{v}_{u, f} \\cdot \\mathbf{v}_{i, f} \\]

    MF models weights are learnt in an online fashion, often with stochastic gradient descent as it provides relatively fast running time and good accuracy. There is a great and widely popular library named surprise that implements MF models (and others) but in contrast with River doesn't follow a pure online philosophy (all the data have to be loaded in memory and the API doesn't allow you to update your model with new data).

    Notes:

    • In recent years, proposed deep learning techniques for recommendation tasks claim state of the art results. However, recent work (August 2019) showed that those promises can't be taken for granted and traditional MF methods are still relevant today.
    • For more information about how the business value of recommender systems is measured and why they are one of the main success stories of machine learning, see the following literature survey (December 2019).
    "},{"location":"examples/matrix-factorization-for-recommender-systems/part-1/#lets-start","title":"Let's start","text":"

    In this tutorial, we are going to explore MF algorithms available in River and test them on a movie recommendation problem with the MovieLens 100K dataset. This latter is a collection of movie ratings (from 1 to 5) that includes various information about both the items and the users. We can access it from the river.datasets module:

    import json\n\nfrom river import datasets\n\nfor x, y in datasets.MovieLens100K():\n    print(f'x = {json.dumps(x, indent=4)}')\n    print(f'y = {y}')\n    break\n
    Downloading https://maxhalford.github.io/files/datasets/ml_100k.zip (1.83 MB)\nUncompressing into /Users/max/river_data/MovieLens100K\nx = {\n    \"user\": \"259\",\n    \"item\": \"255\",\n    \"timestamp\": 874731910000000000,\n    \"title\": \"My Best Friend's Wedding (1997)\",\n    \"release_date\": 866764800000000000,\n    \"genres\": \"comedy, romance\",\n    \"age\": 21.0,\n    \"gender\": \"M\",\n    \"occupation\": \"student\",\n    \"zip_code\": \"48823\"\n}\ny = 4.0\n

    Let's define a routine to evaluate our different models on MovieLens 100K. Mean Absolute Error and Root Mean Squared Error will be our metrics printed alongside model's computation time and memory usage:

    from river import metrics\nfrom river.evaluate import progressive_val_score\n\ndef evaluate(model, unpack_user_and_item=True):\n    X_y = datasets.MovieLens100K(unpack_user_and_item)\n    metric = metrics.MAE() + metrics.RMSE()\n    _ = progressive_val_score(X_y, model, metric, print_every=25_000, show_time=True, show_memory=True)\n
    "},{"location":"examples/matrix-factorization-for-recommender-systems/part-1/#naive-prediction","title":"Naive prediction","text":"

    It's good practice in machine learning to start with a naive baseline and then iterate from simple things to complex ones observing progress incrementally. Let's start by predicting the target running mean as a first shot:

    from river import dummy\nfrom river import stats\n\nmodel = dummy.StatisticRegressor(stats.Mean())\nevaluate(model, unpack_user_and_item=False)\n
    [25,000] MAE: 0.934259, RMSE: 1.124469 \u2013 00:00:00 \u2013 514 B\n[50,000] MAE: 0.923893, RMSE: 1.105 \u2013 00:00:00 \u2013 514 B\n[75,000] MAE: 0.937359, RMSE: 1.123696 \u2013 00:00:01 \u2013 514 B\n[100,000] MAE: 0.942162, RMSE: 1.125783 \u2013 00:00:01 \u2013 514 B\n
    "},{"location":"examples/matrix-factorization-for-recommender-systems/part-1/#baseline-model","title":"Baseline model","text":"

    Now we can do machine learning and explore available models in river.reco module starting with the baseline model. It extends our naive prediction by adding to the global running mean two bias terms characterizing the user and the item discrepancy from the general tendency. The model equation is defined as:

    \\[ \\normalsize \\hat{y}(x) = \\bar{y} + bu_{u} + bi_{i} \\]

    This baseline model can be viewed as a linear regression where the intercept is replaced by the target running mean with the users and the items one hot encoded.

    All machine learning models in River expect dicts as input with feature names as keys and feature values as values. Specifically, models from river.reco expect a 'user' and an 'item' entries without any type constraint on their values (i.e. can be strings or numbers), e.g.:

    x = {\n    'user': 'Guido',\n    'item': \"Monty Python's Flying Circus\"\n}\n

    Other entries, if exist, are simply ignored. This is quite useful as we don't need to spend time and storage doing one hot encoding.

    from river import preprocessing\nfrom river import optim\nfrom river import reco\n\nbaseline_params = {\n    'optimizer': optim.SGD(0.025),\n    'l2': 0.,\n    'initializer': optim.initializers.Zeros()\n}\n\nmodel = preprocessing.PredClipper(\n    regressor=reco.Baseline(**baseline_params),\n    y_min=1,\n    y_max=5\n)\n\nevaluate(model)\n
    [25,000] MAE: 0.761844, RMSE: 0.960972 \u2013 00:00:00 \u2013 173.6 KB\n[50,000] MAE: 0.753292, RMSE: 0.951223 \u2013 00:00:00 \u2013 242.23 KB\n[75,000] MAE: 0.754177, RMSE: 0.953376 \u2013 00:00:01 \u2013 286.04 KB\n[100,000] MAE: 0.754651, RMSE: 0.954148 \u2013 00:00:01 \u2013 309.64 KB\n

    We won two tenth of MAE compared to our naive prediction (0.7546 vs 0.9421) meaning that significant information has been learnt by the model.

    "},{"location":"examples/matrix-factorization-for-recommender-systems/part-1/#funk-matrix-factorization-funkmf","title":"Funk Matrix Factorization (FunkMF)","text":"

    It's the pure form of matrix factorization consisting of only learning the users and items latent representations as discussed in introduction. Simon Funk popularized its stochastic gradient descent optimization in 2006 during the Netflix Prize. The model equation is defined as:

    \\[ \\normalsize \\hat{y}(x) = \\langle \\mathbf{v}_u, \\mathbf{v}_i \\rangle \\]

    Note: FunkMF is sometimes referred as Probabilistic Matrix Factorization which is an extended probabilistic version.

    funk_mf_params = {\n    'n_factors': 10,\n    'optimizer': optim.SGD(0.05),\n    'l2': 0.1,\n    'initializer': optim.initializers.Normal(mu=0., sigma=0.1, seed=73)\n}\n\nmodel = preprocessing.PredClipper(\n    regressor=reco.FunkMF(**funk_mf_params),\n    y_min=1,\n    y_max=5\n)\n\nevaluate(model)\n
    [25,000] MAE: 1.070136, RMSE: 1.397014 \u2013 00:00:00 \u2013 570.35 KB\n[50,000] MAE: 0.99174, RMSE: 1.290666 \u2013 00:00:01 \u2013 716 KB\n[75,000] MAE: 0.961072, RMSE: 1.250842 \u2013 00:00:01 \u2013 844.09 KB\n[100,000] MAE: 0.944883, RMSE: 1.227688 \u2013 00:00:02 \u2013 945.19 KB\n

    Results are equivalent to our naive prediction (0.9448 vs 0.9421). By only focusing on the users preferences and the items characteristics, the model is limited in his ability to capture different views of the problem. Despite its poor performance alone, this algorithm is quite useful combined in other models or when we need to build dense representations for other tasks.

    "},{"location":"examples/matrix-factorization-for-recommender-systems/part-1/#biased-matrix-factorization-biasedmf","title":"Biased Matrix Factorization (BiasedMF)","text":"

    It's the combination of the Baseline model and FunkMF. The model equation is defined as:

    \\[ \\normalsize \\hat{y}(x) = \\bar{y} + bu_{u} + bi_{i} + \\langle \\mathbf{v}_u, \\mathbf{v}_i \\rangle \\]

    Note: Biased Matrix Factorization name is used by some people but some others refer to it by SVD or Funk SVD. It's the case of Yehuda Koren and Robert Bell in Recommender Systems Handbook (Chapter 5 Advances in Collaborative Filtering) and of surprise library. Nevertheless, SVD could be confused with the original Singular Value Decomposition from which it's derived from, and Funk SVD could also be misleading because of the biased part of the model equation which doesn't come from Simon Funk's work. For those reasons, we chose to side with Biased Matrix Factorization which fits more naturally to it.

    biased_mf_params = {\n    'n_factors': 10,\n    'bias_optimizer': optim.SGD(0.025),\n    'latent_optimizer': optim.SGD(0.05),\n    'weight_initializer': optim.initializers.Zeros(),\n    'latent_initializer': optim.initializers.Normal(mu=0., sigma=0.1, seed=73),\n    'l2_bias': 0.,\n    'l2_latent': 0.\n}\n\nmodel = preprocessing.PredClipper(\n    regressor=reco.BiasedMF(**biased_mf_params),\n    y_min=1,\n    y_max=5\n)\n\nevaluate(model)\n
    [25,000] MAE: 0.761818, RMSE: 0.961057 \u2013 00:00:00 \u2013 669.27 KB\n[50,000] MAE: 0.751667, RMSE: 0.949443 \u2013 00:00:01 \u2013 869.85 KB\n[75,000] MAE: 0.749653, RMSE: 0.948723 \u2013 00:00:02 \u2013 1 MB\n[100,000] MAE: 0.748559, RMSE: 0.947854 \u2013 00:00:02 \u2013 1.11 MB\n

    Results improved (0.7485 vs 0.7546) demonstrating that users and items latent representations bring additional information.

    To conclude this first tutorial about factorization models, let's review the important parameters to tune when dealing with this family of methods:

    • n_factors: the number of latent factors. The more you set, the more items aspects and users preferences you are going to learn. Too many will cause overfitting, l2 regularization could help.
    • *_optimizer: the optimizers. Classic stochastic gradient descent performs well, finding the good learning rate will make the difference.
    • initializer: the latent weights initialization. Latent vectors have to be initialized with non-constant values. We generally sample them from a zero-mean normal distribution with small standard deviation.
    "},{"location":"examples/matrix-factorization-for-recommender-systems/part-2/","title":"Part 2","text":"

    As seen in Part 1, strength of Matrix Factorization (MF) lies in its ability to deal with sparse and high cardinality categorical variables. In this second tutorial we will have a look at Factorization Machines (FM) algorithm and study how it generalizes the power of MF.

    Table of contents of this tutorial series on matrix factorization for recommender systems:

    • Part 1 - Traditional Matrix Factorization methods for Recommender Systems
    • Part 2 - Factorization Machines and Field-aware Factorization Machines
    • Part 3 - Large scale learning and better predictive power with multiple pass learning
    "},{"location":"examples/matrix-factorization-for-recommender-systems/part-2/#factorization-machines","title":"Factorization Machines","text":"

    Steffen Rendel came up in 2010 with Factorization Machines, an algorithm able to handle any real valued feature vector, combining the advantages of general predictors with factorization models. It became quite popular in the field of online advertising, notably after winning several Kaggle competitions. The modeling technique starts with a linear regression to capture the effects of each variable individually:

    \\[ \\normalsize \\hat{y}(x) = w_{0} + \\sum_{j=1}^{p} w_{j} x_{j} \\]

    Then are added interaction terms to learn features relations. Instead of learning a single and specific weight per interaction (as in polynomial regression), a set of latent factors is learnt per feature (as in MF). An interaction is calculated by multiplying involved features product with their latent vectors dot product. The degree of factorization \u2014 or model order \u2014 represents the maximum number of features per interaction considered. The model equation for a factorization machine of degree \\(d\\) = 2 is defined as:

    \\[ \\normalsize \\hat{y}(x) = w_{0} + \\sum_{j=1}^{p} w_{j} x_{j} + \\sum_{j=1}^{p} \\sum_{j'=j+1}^{p} \\langle \\mathbf{v}_j, \\mathbf{v}_{j'} \\rangle x_{j} x_{j'} \\]

    Where \\(\\normalsize \\langle \\mathbf{v}_j, \\mathbf{v}_{j'} \\rangle\\) is the dot product of \\(j\\) and \\(j'\\) latent vectors:

    \\[ \\normalsize \\langle \\mathbf{v}_j, \\mathbf{v}_{j'} \\rangle = \\sum_{f=1}^{k} \\mathbf{v}_{j, f} \\cdot \\mathbf{v}_{j', f} \\]

    Higher-order FM will be covered in a following section, just note that factorization models express their power in sparse settings, which is also where higher-order interactions are hard to estimate.

    Strong emphasis must be placed on feature engineering as it allows FM to mimic most factorization models and significantly impact its performance. High cardinality categorical variables one hot encoding is the most frequent step before feeding the model with data. For more efficiency, River FM implementation considers string values as categorical variables and automatically one hot encode them. FM models have their own module river.facto.

    ## Mimic Biased Matrix Factorization (BiasedMF)

    Let's start with a simple example where we want to reproduce the Biased Matrix Factorization model we trained in the previous tutorial. For a fair comparison with Part 1 example, let's set the same evaluation framework:

    from river import datasets\nfrom river import metrics\nfrom river.evaluate import progressive_val_score\n\ndef evaluate(model):\n    X_y = datasets.MovieLens100K()\n    metric = metrics.MAE() + metrics.RMSE()\n    _ = progressive_val_score(X_y, model, metric, print_every=25_000, show_time=True, show_memory=True)\n

    In order to build an equivalent model we need to use the same hyper-parameters. As we can't replace FM intercept by the global running mean we won't be able to build the exact same model:

    from river import compose\nfrom river import facto\nfrom river import preprocessing\nfrom river import optim\nfrom river import stats\n\nfm_params = {\n    'n_factors': 10,\n    'weight_optimizer': optim.SGD(0.025),\n    'latent_optimizer': optim.SGD(0.05),\n    'sample_normalization': False,\n    'l1_weight': 0.,\n    'l2_weight': 0.,\n    'l1_latent': 0.,\n    'l2_latent': 0.,\n    'intercept': 3,\n    'intercept_lr': .01,\n    'weight_initializer': optim.initializers.Zeros(),\n    'latent_initializer': optim.initializers.Normal(mu=0., sigma=0.1, seed=73),\n}\n\nregressor = compose.Select('user', 'item')\nregressor |= facto.FMRegressor(**fm_params)\n\nmodel = preprocessing.PredClipper(\n    regressor=regressor,\n    y_min=1,\n    y_max=5\n)\n\nevaluate(model)\n
    [25,000] MAE: 0.761761, RMSE: 0.960662 \u2013 00:00:01 \u2013 818.86 KB\n[50,000] MAE: 0.751922, RMSE: 0.949783 \u2013 00:00:03 \u2013 948.77 KB\n[75,000] MAE: 0.749822, RMSE: 0.948634 \u2013 00:00:04 \u2013 1.07 MB\n[100,000] MAE: 0.748393, RMSE: 0.94776 \u2013 00:00:06 \u2013 1.19 MB\n

    Both MAE are very close to each other (0.7486 vs 0.7485) showing that we almost reproduced [reco.BiasedMF](../../../api/reco/BiasedMF) algorithm. The cost is a naturally slower running time as FM implementation offers more flexibility.

    "},{"location":"examples/matrix-factorization-for-recommender-systems/part-2/#feature-engineering-for-fm-models","title":"Feature engineering for FM models","text":"

    Let's study the basics of how to properly encode data for FM models. We are going to keep using MovieLens 100K as it provides various feature types:

    import json\n\nfor x, y in datasets.MovieLens100K():\n    print(f'x = {json.dumps(x, indent=4)}\\ny = {y}')\n    break\n
    x = {\n    \"user\": \"259\",\n    \"item\": \"255\",\n    \"timestamp\": 874731910000000000,\n    \"title\": \"My Best Friend's Wedding (1997)\",\n    \"release_date\": 866764800000000000,\n    \"genres\": \"comedy, romance\",\n    \"age\": 21.0,\n    \"gender\": \"M\",\n    \"occupation\": \"student\",\n    \"zip_code\": \"48823\"\n}\ny = 4.0\n

    The features we are going to add to our model don't improve its predictive power. Nevertheless, they are useful to illustrate different methods of data encoding:

    1. Set-categorical variables

    We have seen that categorical variables are one hot encoded automatically if set to strings, in the other hand, set-categorical variables must be encoded explicitly by the user. A good way of doing so is to assign them a value of \\(1/m\\), where \\(m\\) is the number of elements of the sample set. It gives the feature a constant \"weight\" across all samples preserving model's stability. Let's create a routine to encode movies genres this way:

    def split_genres(x):\n    genres = x['genres'].split(', ')\n    return {f'genre_{genre}': 1 / len(genres) for genre in genres}\n
    1. Numerical variables

    In practice, transforming numerical features into categorical ones works better in most cases. Feature binning is the natural way, but finding good bins is sometimes more an art than a science. Let's encode users age with something simple:

    def bin_age(x):\n    if x['age'] <= 18:\n        return {'age_0-18': 1}\n    elif x['age'] <= 32:\n        return {'age_19-32': 1}\n    elif x['age'] < 55:\n        return {'age_33-54': 1}\n    else:\n        return {'age_55-100': 1}\n

    Let's put everything together:

    fm_params = {\n    'n_factors': 14,\n    'weight_optimizer': optim.SGD(0.01),\n    'latent_optimizer': optim.SGD(0.025),\n    'intercept': 3,\n    'latent_initializer': optim.initializers.Normal(mu=0., sigma=0.05, seed=73),\n}\n\nregressor = compose.Select('user', 'item')\nregressor += (\n    compose.Select('genres') |\n    compose.FuncTransformer(split_genres)\n)\nregressor += (\n    compose.Select('age') |\n    compose.FuncTransformer(bin_age)\n)\nregressor |= facto.FMRegressor(**fm_params)\n\nmodel = preprocessing.PredClipper(\n    regressor=regressor,\n    y_min=1,\n    y_max=5\n)\n\nevaluate(model)\n
    [25,000] MAE: 0.760059, RMSE: 0.961415 \u2013 00:00:04 \u2013 935.54 KB\n[50,000] MAE: 0.751429, RMSE: 0.951504 \u2013 00:00:08 \u2013 1.06 MB\n[75,000] MAE: 0.750568, RMSE: 0.951592 \u2013 00:00:13 \u2013 1.22 MB\n[100,000] MAE: 0.75018, RMSE: 0.951622 \u2013 00:00:17 \u2013 1.37 MB\n

    Note that using more variables involves factorizing a larger latent space, then increasing the number of latent factors \\(k\\) often helps capturing more information.

    Some other feature engineering tips from 3 idiots' winning solution for Kaggle Criteo display ads competition in 2014:

    • Infrequent modalities often bring noise and little information, transforming them into a special tag can help
    • In some cases, sample-wise normalization seems to make the optimization problem easier to be solved
    "},{"location":"examples/matrix-factorization-for-recommender-systems/part-2/#higher-order-factorization-machines-hofm","title":"Higher-Order Factorization Machines (HOFM)","text":"

    The model equation generalized to any order \\(d \\geq 2\\) is defined as:

    \\[ \\normalsize \\hat{y}(x) = w_{0} + \\sum_{j=1}^{p} w_{j} x_{j} + \\sum_{l=2}^{d} \\sum_{j_1=1}^{p} \\cdots \\sum_{j_l=j_{l-1}+1}^{p} \\left(\\prod_{j'=1}^{l} x_{j_{j'}} \\right) \\left(\\sum_{f=1}^{k_l} \\prod_{j'=1}^{l} v_{j_{j'}, f}^{(l)} \\right) \\]
    hofm_params = {\n    'degree': 3,\n    'n_factors': 12,\n    'weight_optimizer': optim.SGD(0.01),\n    'latent_optimizer': optim.SGD(0.025),\n    'intercept': 3,\n    'latent_initializer': optim.initializers.Normal(mu=0., sigma=0.05, seed=73),\n}\n\nregressor = compose.Select('user', 'item')\nregressor += (\n    compose.Select('genres') |\n    compose.FuncTransformer(split_genres)\n)\nregressor += (\n    compose.Select('age') |\n    compose.FuncTransformer(bin_age)\n)\nregressor |= facto.HOFMRegressor(**hofm_params)\n\nmodel = preprocessing.PredClipper(\n    regressor=regressor,\n    y_min=1,\n    y_max=5\n)\n\nevaluate(model)\n
    [25,000] MAE: 0.761379, RMSE: 0.96214 \u2013 00:00:16 \u2013 1.73 MB\n[50,000] MAE: 0.751998, RMSE: 0.951589 \u2013 00:00:32 \u2013 2.03 MB\n[75,000] MAE: 0.750994, RMSE: 0.951616 \u2013 00:00:48 \u2013 2.36 MB\n[100,000] MAE: 0.750849, RMSE: 0.952142 \u2013 00:01:04 \u2013 2.66 MB\n

    As said previously, high-order interactions are often hard to estimate due to too much sparsity, that's why we won't spend too much time here.

    "},{"location":"examples/matrix-factorization-for-recommender-systems/part-2/#field-aware-factorization-machines-ffm","title":"Field-aware Factorization Machines (FFM)","text":"

    Field-aware variant of FM (FFM) improved the original method by adding the notion of \"fields\". A \"field\" is a group of features that belong to a specific domain (e.g. the \"users\" field, the \"items\" field, or the \"movie genres\" field).

    FFM restricts itself to pairwise interactions and factorizes separated latent spaces \u2014 one per combination of fields (e.g. users/items, users/movie genres, or items/movie genres) \u2014 instead of a common one shared by all fields. Therefore, each feature has one latent vector per field it can interact with \u2014 so that it can learn the specific effect with each different field.

    The model equation is defined by:

    \\[ \\normalsize \\hat{y}(x) = w_{0} + \\sum_{j=1}^{p} w_{j} x_{j} + \\sum_{j=1}^{p} \\sum_{j'=j+1}^{p} \\langle \\mathbf{v}_{j, f_{j'}}, \\mathbf{v}_{j', f_{j}} \\rangle x_{j} x_{j'} \\]

    Where \\(f_j\\) and \\(f_{j'}\\) are the fields corresponding to \\(j\\) and \\(j'\\) features, respectively.

    ffm_params = {\n    'n_factors': 8,\n    'weight_optimizer': optim.SGD(0.01),\n    'latent_optimizer': optim.SGD(0.025),\n    'intercept': 3,\n    'latent_initializer': optim.initializers.Normal(mu=0., sigma=0.05, seed=73),\n}\n\nregressor = compose.Select('user', 'item')\nregressor += (\n    compose.Select('genres') |\n    compose.FuncTransformer(split_genres)\n)\nregressor += (\n    compose.Select('age') |\n    compose.FuncTransformer(bin_age)\n)\nregressor |= facto.FFMRegressor(**ffm_params)\n\nmodel = preprocessing.PredClipper(\n    regressor=regressor,\n    y_min=1,\n    y_max=5\n)\n\nevaluate(model)\n
    [25,000] MAE: 0.758339, RMSE: 0.959047 \u2013 00:00:06 \u2013 2.16 MB\n[50,000] MAE: 0.749833, RMSE: 0.948531 \u2013 00:00:13 \u2013 2.54 MB\n[75,000] MAE: 0.749631, RMSE: 0.949418 \u2013 00:00:19 \u2013 2.96 MB\n[100,000] MAE: 0.749776, RMSE: 0.950131 \u2013 00:00:26 \u2013 3.35 MB\n

    Note that FFM usually needs to learn smaller number of latent factors \\(k\\) than FM as each latent vector only deals with one field.

    "},{"location":"examples/matrix-factorization-for-recommender-systems/part-2/#field-weighted-factorization-machines-fwfm","title":"Field-weighted Factorization Machines (FwFM)","text":"

    Field-weighted Factorization Machines (FwFM) address FFM memory issues caused by its large number of parameters, which is in the order of feature number times field number. As FFM, FwFM is an extension of FM restricted to pairwise interactions, but instead of factorizing separated latent spaces, it learns a specific weight \\(r_{f_j, f_{j'}}\\) for each field combination modelling the interaction strength.

    The model equation is defined as:

    \\[ \\normalsize \\hat{y}(x) = w_{0} + \\sum_{j=1}^{p} w_{j} x_{j} + \\sum_{j=1}^{p} \\sum_{j'=j+1}^{p} r_{f_j, f_{j'}} \\langle \\mathbf{v}_j, \\mathbf{v}_{j'} \\rangle x_{j} x_{j'} \\]
    fwfm_params = {\n    'n_factors': 10,\n    'weight_optimizer': optim.SGD(0.01),\n    'latent_optimizer': optim.SGD(0.025),\n    'intercept': 3,\n    'seed': 73,\n}\n\nregressor = compose.Select('user', 'item')\nregressor += (\n    compose.Select('genres') |\n    compose.FuncTransformer(split_genres)\n)\nregressor += (\n    compose.Select('age') |\n    compose.FuncTransformer(bin_age)\n)\nregressor |= facto.FwFMRegressor(**fwfm_params)\n\nmodel = preprocessing.PredClipper(\n    regressor=regressor,\n    y_min=1,\n    y_max=5\n)\n\nevaluate(model)\n
    [25,000] MAE: 0.761435, RMSE: 0.962211 \u2013 00:00:08 \u2013 834.1 KB\n[50,000] MAE: 0.754063, RMSE: 0.953248 \u2013 00:00:17 \u2013 964.01 KB\n[75,000] MAE: 0.754729, RMSE: 0.95507 \u2013 00:00:25 \u2013 1.08 MB\n[100,000] MAE: 0.755697, RMSE: 0.956542 \u2013 00:00:34 \u2013 1.21 MB\n
    "},{"location":"examples/matrix-factorization-for-recommender-systems/part-3/","title":"Part 3","text":"

    To do.

    "},{"location":"faq/","title":"Frequently Asked Questions","text":""},{"location":"faq/#do-all-classifiers-support-multi-class-classification","title":"Do all classifiers support multi-class classification?","text":"

    No, they don't. Although binary classification can be seen as a special case of multi-class classification, there are many optimizations that can be performed if we know that there are only two classes. It would be annoying to have to check whether this is the case in an online setting. All in all we find that separating both cases leads to much cleaner code. Note that the multiclass module contains wrapper models that enable you to perform multi-class classification with binary classifiers.

    "},{"location":"faq/#how-do-i-know-if-a-classifier-supports-multi-class-classification","title":"How do I know if a classifier supports multi-class classification?","text":"

    Each classifier in River inherits from the base.Classifier class. Each classifier therefore has a _multiclass property which indicates whether or not it can process a non-boolean target value.

    >>> from river import linear_model\n\n>>> classifier = linear_model.LogisticRegression()\n>>> classifier._multiclass\nFalse\n
    "},{"location":"faq/#why-doesnt-river-do-any-input-validation","title":"Why doesn't river do any input validation?","text":"

    Python encourages a coding style called EAFP, which stands for \"Easier to Ask for Forgiveness than Permission\". The idea is to assume that runtime errors don't occur, and instead use try/expects to catch errors. The great benefit is that we don't have to drown our code with if statements, which is symptomatic of the LBYL style, which stands for \"look before you leap\". This makes our implementations much more readable than, say, scikit-learn, which does a lot of input validation. The catch is that users have to be careful to use sane inputs. As always, there is no free lunch!

    "},{"location":"faq/#what-about-reinforcement-learning","title":"What about reinforcement learning?","text":"

    Reinforcement learning works in an online manner because of the nature of the task. Reinforcement learning can be therefore be seen as a subcase of online machine learning. However, we prefer not to support it because there are already many existing opensource libraries dedicated to it.

    "},{"location":"faq/#what-are-the-differences-between-scikit-learns-online-learning-algorithm-which-have-a-partial_fit-method-and-their-equivalents-in-river","title":"What are the differences between scikit-learn's online learning algorithm which have a partial_fit method and their equivalents in River?","text":"

    The algorithms from sklearn that support incremental learning are mostly meant for mini-batch learning. In a pure streaming context where the observations arrive one by one, then River is much faster than sklearn. This is mostly because sklearn incurs a lot of overhead by performing data checks. Also, sklearn assumes that you're always using the same number of features. This is not the case with River because it use dictionaries which allows you to drop and add features as you wish.

    "},{"location":"faq/#how-do-i-save-and-load-models","title":"How do I save and load models?","text":"
    >>> from river import ensemble\n>>> import pickle\n\n>>> model = ensemble.AdaptiveRandomForestClassifier()\n\n# save\n>>> with open('model.pkl', 'wb') as f:\n...     pickle.dump(model, f)\n\n# load\n>>> with open('model.pkl', 'rb') as f:\n...     model = pickle.load(f)\n

    We also encourage you to try out dill and cloudpickle.

    "},{"location":"faq/#what-about-neural-networks","title":"What about neural networks?","text":"

    There are many great open-source libraries for building neural network models. We don't feel that we can bring anything of value to the existing Python ecosystem. However, we are open to implementing compatibility wrappers for popular libraries such as PyTorch and Keras.

    "},{"location":"faq/#who-are-the-authors-of-this-library","title":"Who are the authors of this library?","text":"

    We are research engineers, graduate students, PhDs and machine learning researchers. The members of the develompent team are mainly located in France, Brazil and New Zealand.

    "},{"location":"introduction/basic-concepts/","title":"Basic concepts","text":"

    Here are some concepts to give you a feel for what problems River addresses.

    "},{"location":"introduction/basic-concepts/#data-streams","title":"Data streams","text":"

    River is a library to build online machine learning models. Such models operate on data streams. But a data stream is a bit of a vague concept.

    In general, a data stream is a sequence of individual elements. In the case of machine learning, each element is a bunch of features. We call these samples, or observations. Each sample might follow a fixed structure and always contain the same features. But features can also appear and disappear over time. That depends on the use case.

    "},{"location":"introduction/basic-concepts/#reactive-and-proactive-data-streams","title":"Reactive and proactive data streams","text":"

    The origin of a data stream can vary, and usually it doesn't matter. You should be able to use River regardless of where your data comes from. It is however important to keep in mind the difference between reactive and proactive data streams.

    Reactive data streams are ones where the data comes to you. For instance, when a user visits your website, that's out of your control. You have no influence on the event. It just happens and you have to react to it.

    Proactive data streams are ones where you have control on the data stream. For example, you might be reading the data from a file. You decide at which speed you want to read the data, in what order, etc.

    If you consider data analysis as a whole, you're realize that the general approach is to turn reactive streams into proactive datasets. Events are usually logged into a database and are processed offline. Be it for building KPIs or training models.

    The challenge for machine learning is to ensure models you train offline on proactive datasets will perform correctly in production on reactive data streams.

    "},{"location":"introduction/basic-concepts/#online-processing","title":"Online processing","text":"

    Online processing is the act of processing a data stream one element at a time. In the case of machine learning, that means training a model by teaching it one sample at a time. This is completely opposite to the traditional way of doing machine learning, which is to train a model on a whole batch data at a time.

    An online model is therefore a stateful, dynamic object. It keeps learning and doesn't have to revisit past data. It's a different way of doing things, and therefore has its own set of pros and cons.

    "},{"location":"introduction/basic-concepts/#tasks","title":"Tasks","text":"

    Machine learning encompasses many different tasks: classification, regression, anomaly detection, time series forecasting, etc. The ideology behind River is to be a generic machine learning which allows to perform these tasks in a streaming manner. Indeed, many batch machine learning algorithms have online equivalents.

    Note that River also supports some more basic tasks. For instance, you might just want to calculate a running average of a data stream. These are usually smaller parts of a whole stream processing pipeline.

    "},{"location":"introduction/basic-concepts/#dictionaries-everywhere","title":"Dictionaries everywhere","text":"

    River is a Python library. It is composed of a bunch of classes which implement various online processing algorithms. Most of these classes are machine learning models which can process a single sample, be it for learning or for inference.

    We made the choice to use dictionaries as the basic building block. First of all, online processing is different to batch processing, in that vectorization doesn't bring any speedup. Therefore numeric processing libraries such as numpy and PyTorch actually bring too much overhead. Using native Python data structures is faster.

    Dictionaries are therefore a perfect fit. They're native to Python and have excellent support in the standard library. They allow naming each feature. They can hold any kind of data type. They allow transparent support of JSON payloads, allowing seemless integration with web apps.

    "},{"location":"introduction/basic-concepts/#datasets","title":"Datasets","text":"

    In production, you're almost always going to face data streams which you have to react to. Such as users visiting your website. The advantage of online machine learning is that you can design models which make predictions as well as learn from this data stream as it flows.

    But of course, when you're developping a model, you don't usually have access to a real-time feed on which to evaluate your model. You usually have an offline dataset which you want to evaluate your model on. River provides some datasets which can be read in online manner, one sample at a time. It is however crucial to keep in mind that the goal is to reproduce a production scenario as closely as possible, in order to ensure your model will perform just as well in production.

    "},{"location":"introduction/basic-concepts/#model-evaluation","title":"Model evaluation","text":"

    Online model evaluation differs from its traditional batch counterpart. In the latter, you usually perform cross-validation, whereby your training dataset is split into a learning and an evaluation dataset. This is fine, but it doesn't exactly reflect the data generation process that occurs in production.

    Online model evaluation involves learning and inference in the same order as what would happen in production. Indeed, if you know the order in which your data arrives, then you can process it the exact same order. This allows you to replay a production scenario and evaluate your model with higher fidelity than cross-validation.

    This is what makes online machine learning powerful. By replaying datasets in the correct order, you ensure you are designing models which will perform as expected in production.

    "},{"location":"introduction/basic-concepts/#concept-drift","title":"Concept drift","text":"

    The main reason why an offline model might not perform as expected in production is because of concept drift. But this is true for all machine learning models, be they offline or online.

    The advantage of online models over offline models is that they can cope with drift. Indeed, because they can keep learning, they usually adapt to concept drift in a seemless manner. As opposed to batch models which have to be retrained from scratch.

    "},{"location":"introduction/installation/","title":"Installation","text":"

    River is meant to work with Python 3.8 and above. Installation can be done via pip:

    pip install river\n

    You can install the latest development version from GitHub, as so:

    pip install git+https://github.com/online-ml/river --upgrade\n

    Or, through SSH:

    pip install git+ssh://git@github.com/online-ml/river.git --upgrade\n

    Feel welcome to open an issue on GitHub if you are having any trouble.

    "},{"location":"introduction/next-steps/","title":"Next steps","text":"

    The Recipes \ud83c\udf71 section is made up of small tutorials. Each one explains how to perform mundane tasks, such as measuring the performance of a model, selecting hyperparameters, etc.

    The Examples \ud83c\udf36\ufe0f section contains more involved notebooks with less explanations. Each notebook addresses a particular machine learning problem.

    The API \ud83d\udcda section references all the modules, classes, and functions in River. It is automatically generated from the codebase's Python docstrings.

    Feel welcome to open a discussion if you have a question. Before that you can check out the FAQ \ud83d\ude4b, which has answers to recurring questions.

    The released versions are listed in the Releases \ud83c\udfd7 section. Changes that will be part of the next release are listed in the unreleased section of the documentation's development version, which you may find here.

    We recommend checking out Awesome Online Machine Learning if you want to go deeper. There you will find online machine learning related content: research papers, alternative and complementary software, blog posts, etc.

    "},{"location":"introduction/related-projects/","title":"Related projects","text":"

    Here is a list of projects which are more or less coupled with River:

    • deep-river interfaces PyTorch models with River.
    • light-river implements fast algorithms in rust.
    • river-extra regroups experimental features which have yet to prove themselves to make it into the main River repository. Between us we call this \"the arena\".
    • Beaver is an MLOps tool for covering the whole lifecycle of online machine learning models.
    "},{"location":"introduction/why-use-river/","title":"Why use River?","text":""},{"location":"introduction/why-use-river/#processing-one-sample-at-a-time","title":"Processing one sample at a time","text":"

    All the tools in the library can be updated with a single observation at a time. They can therefore be used to process streaming data. Depending on your use case, this might be more convenient than using a batch model.

    "},{"location":"introduction/why-use-river/#adapting-to-drift","title":"Adapting to drift","text":"

    In the streaming setting, data can evolve. Adaptive methods are specifically designed to be robust against concept drift in dynamic environments. Many of River's models can cope with concept drift.

    "},{"location":"introduction/why-use-river/#general-purpose","title":"General purpose","text":"

    River supports different machine learning tasks, including regression, classification, and unsupervised learning. It can also be used for adhoc tasks, such as computing online metrics, as well as concept drift detection.

    "},{"location":"introduction/why-use-river/#user-experience","title":"User experience","text":"

    River is not the only library allowing you to do online machine learning. But it might just the simplest one to use in the Python ecosystem. River plays nicely with Python dictionaries, therefore making it easy to use in the context of web applications where JSON payloads are aplenty.

    "},{"location":"introduction/getting-started/binary-classification/","title":"Binary classification","text":"

    Classification is about predicting an outcome from a fixed list of classes. The prediction is a probability distribution that assigns a probability to each possible outcome.

    A labeled classification sample is made up of a bunch of features and a class. The class is a boolean in the case of binary classification. We'll use the phishing dataset as an example.

    from river import datasets\n\ndataset = datasets.Phishing()\ndataset\n
    Phishing websites.\n\nThis dataset contains features from web pages that are classified as phishing or not.\n\n    Name  Phishing                                                          \n    Task  Binary classification                                             \n Samples  1,250                                                             \nFeatures  9                                                                 \n  Sparse  False                                                             \n    Path  /Users/max/projects/online-ml/river/river/datasets/phishing.csv.gz\n

    This dataset is a streaming dataset which can be looped over.

    for x, y in dataset:\n    pass\n

    Let's take a look at the first sample.

    x, y = next(iter(dataset))\nx\n
    {'empty_server_form_handler': 0.0,\n 'popup_window': 0.0,\n 'https': 0.0,\n 'request_from_other_domain': 0.0,\n 'anchor_from_other_domain': 0.0,\n 'is_popular': 0.5,\n 'long_url': 1.0,\n 'age_of_domain': 1,\n 'ip_in_url': 1}\n
    y\n
    True\n

    A binary classifier's goal is to learn to predict a binary target y from some given features x. We'll try to do this with a logistic regression.

    from river import linear_model\n\nmodel = linear_model.LogisticRegression()\nmodel.predict_proba_one(x)\n
    {False: 0.5, True: 0.5}\n

    The model hasn't been trained on any data, and therefore outputs a default probability of 50% for each class.

    The model can be trained on the sample, which will update the model's state.

    model = model.learn_one(x, y)\n

    If we try to make a prediction on the same sample, we can see that the probabilities are different, because the model has learned something.

    model.predict_proba_one(x)\n
    {False: 0.494687699901455, True: 0.505312300098545}\n

    Note that there is also a predict_one if you're only interested in the most likely class rather than the probability distribution.

    model.predict_one(x)\n
    True\n

    Typically, an online model makes a prediction, and then learns once the ground truth reveals itself. The prediction and the ground truth can be compared to measure the model's correctness. If you have a dataset available, you can loop over it, make a prediction, update the model, and compare the model's output with the ground truth. This is called progressive validation.

    from river import metrics\n\nmodel = linear_model.LogisticRegression()\n\nmetric = metrics.ROCAUC()\n\nfor x, y in dataset:\n    y_pred = model.predict_proba_one(x)\n    model.learn_one(x, y)\n    metric.update(y, y_pred)\n\nmetric\n
    ROCAUC: 89.36%\n

    This is a common way to evaluate an online model. In fact, there is a dedicated evaluate.progressive_val_score function that does this for you.

    from river import evaluate\n\nmodel = linear_model.LogisticRegression()\nmetric = metrics.ROCAUC()\n\nevaluate.progressive_val_score(dataset, model, metric)\n
    ROCAUC: 89.36%\n

    A common way to improve the performance of a logistic regression is to scale the data. This can be done by using a preprocessing.StandardScaler. In particular, we can define a pipeline to organise our model into a sequence of steps:

    from river import compose\nfrom river import preprocessing\n\nmodel = compose.Pipeline(\n    preprocessing.StandardScaler(),\n    linear_model.LogisticRegression()\n)\n\nmodel\n
    StandardScaler
    StandardScaler ( with_std=True )
    LogisticRegression
    LogisticRegression ( optimizer=SGD ( lr=Constant ( learning_rate=0.01 ) ) loss=Log ( weight_pos=1. weight_neg=1. ) l2=0. l1=0. intercept_init=0. intercept_lr=Constant ( learning_rate=0.01 ) clip_gradient=1e+12 initializer=Zeros () )
    metric = metrics.ROCAUC()\nevaluate.progressive_val_score(dataset, model, metric)\n
    ROCAUC: 95.07%\n
    "},{"location":"introduction/getting-started/concept-drift-detection/","title":"Concept drift","text":"

    In online machine learning, it is assumed that data can change over time. When building machine learning models, we assume data has a probability distribution, which is usually fixed, i.e., stationary. Changes in the data distribution give rise to the phenomenon called Concept drift. Such drifts can be either virtual or real. In virtual drifts, only the distribution of the features, \\(P(X)\\), changes, whereas the relationship between \\(X\\) (features) and the target, \\(y\\), remains unchanged. The joint probability of \\(P(X, y)\\) changes in real concept drifts. Consequently, non-supervised online machine learning problems might face only virtual concept drifts.

    Real concept drits can be further divided in abrupt (happen instantly at a given point) or gradual (one \"concept\" changes to another gradually). There are other possible divisions, but they can be fit into abrupt or gradual drifts.

    "},{"location":"introduction/getting-started/concept-drift-detection/#examples-of-concept-drift","title":"Examples of concept drift","text":"

    Concept drifts might happen in the electricity demand across the year, in the stock market, in buying preferences, and in the likelihood of a new movie's success, among others.

    Let us consider the movie example: two movies made at different epochs can have similar features such as famous actors/directors, storyline, production budget, marketing campaigns, etc., yet it is not certain that both will be similarly successful. What the target audience considers is worth watching (and their money) is constantly changing, and production companies must adapt accordingly to avoid \"box office flops\".

    Prior to the pandemics, the usage of hand sanitizers and facial masks was not widespread. When the cases of COVID-19 started increasing, there was a lack of such products for the final consumer. Imagine a batch-learning model deciding how much of each product a supermarket should stock during those times. What a mess!

    "},{"location":"introduction/getting-started/concept-drift-detection/#impact-of-drift-on-learning","title":"Impact of drift on learning","text":"

    Concept drift can have a significant impact on predictive performance if not handled properly. Most batch learning models will fail in the presence of concept drift as they are essentially trained on different data. On the other hand, stream learning methods continuously update themselves and adapt to new concepts. Furthermore, drift-aware methods use change detection methods (a.k.a. drift detectors) to trigger mitigation mechanisms if a change in performance is detected.

    "},{"location":"introduction/getting-started/concept-drift-detection/#detecting-concept-drift","title":"Detecting concept drift","text":"

    Multiple drift detection methods have been proposed. The goal of a drift detector is to signal an alarm in the presence of drift. A good drift detector maximizes the number of true positives while keeping the number of false positives to a minimum. It must also be resource-wise efficient to work in the context of infinite data streams.

    For this example, we will generate a synthetic data stream by concatenating 3 distributions of 1000 samples each:

    • \\(dist_a\\): \\(\\mu=0.8\\), \\(\\sigma=0.05\\)
    • \\(dist_b\\): \\(\\mu=0.4\\), \\(\\sigma=0.02\\)
    • \\(dist_c\\): \\(\\mu=0.6\\), \\(\\sigma=0.1\\).
    import numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib import gridspec\n\n# Generate data for 3 distributions\nrandom_state = np.random.RandomState(seed=42)\ndist_a = random_state.normal(0.8, 0.05, 1000)\ndist_b = random_state.normal(0.4, 0.02, 1000)\ndist_c = random_state.normal(0.6, 0.1, 1000)\n\n# Concatenate data to simulate a data stream with 2 drifts\nstream = np.concatenate((dist_a, dist_b, dist_c))\n\n# Auxiliary function to plot the data\ndef plot_data(dist_a, dist_b, dist_c, drifts=None):\n    fig = plt.figure(figsize=(7,3), tight_layout=True)\n    gs = gridspec.GridSpec(1, 2, width_ratios=[3, 1])\n    ax1, ax2 = plt.subplot(gs[0]), plt.subplot(gs[1])\n    ax1.grid()\n    ax1.plot(stream, label='Stream')\n    ax2.grid(axis='y')\n    ax2.hist(dist_a, label=r'$dist_a$')\n    ax2.hist(dist_b, label=r'$dist_b$')\n    ax2.hist(dist_c, label=r'$dist_c$')\n    if drifts is not None:\n        for drift_detected in drifts:\n            ax1.axvline(drift_detected, color='red')\n    plt.show()\n\nplot_data(dist_a, dist_b, dist_c)\n

    "},{"location":"introduction/getting-started/concept-drift-detection/#drift-detection-test","title":"Drift detection test","text":"

    We will use the ADaptive WINdowing (ADWIN) drift detection method. Remember that the goal is to indicate that drift has occurred after samples 1000 and 2000 in the synthetic data stream.

    from river import drift\n\ndrift_detector = drift.ADWIN()\ndrifts = []\n\nfor i, val in enumerate(stream):\n    drift_detector.update(val)   # Data is processed one sample at a time\n    if drift_detector.drift_detected:\n        # The drift detector indicates after each sample if there is a drift in the data\n        print(f'Change detected at index {i}')\n        drifts.append(i)\n\nplot_data(dist_a, dist_b, dist_c, drifts)\n
    Change detected at index 1055\nChange detected at index 2079\n

    We see that ADWIN successfully indicates the presence of drift (red vertical lines) close to the begining of a new data distribution.

    We conclude this example with some remarks regarding concept drift detectors and their usage:

    • In practice, drift detectors provide stream learning methods with robustness against concept drift. Drift detectors monitor the model usually through a performance metric.
    • Drift detectors work on univariate data. This is why they are used to monitor a model's performance and not the data itself. Remember that concept drift is defined as a change in the relationship between data and the target to learn (in supervised learning).
    • Drift detectors define their expectations regarding input data. It is important to know these expectations to feed a given drift detector with the correct data.
    "},{"location":"introduction/getting-started/multiclass-classification/","title":"Multi-class classification","text":"

    Classification is about predicting an outcome from a fixed list of classes. The prediction is a probability distribution that assigns a probability to each possible outcome.

    A labeled classification sample is made up of a bunch of features and a class. The class is a usually a string or a number in the case of multiclass classification. We'll use the image segments dataset as an example.

    from river import datasets\n\ndataset = datasets.ImageSegments()\ndataset\n
    Image segments classification.\n\nThis dataset contains features that describe image segments into 7 classes: brickface, sky,\nfoliage, cement, window, path, and grass.\n\n    Name  ImageSegments                                                     \n    Task  Multi-class classification                                        \n Samples  2,310                                                             \nFeatures  18                                                                \n Classes  7                                                                 \n  Sparse  False                                                             \n    Path  /Users/max/projects/online-ml/river/river/datasets/segment.csv.zip\n

    This dataset is a streaming dataset which can be looped over.

    for x, y in dataset:\n    pass\n

    Let's take a look at the first sample.

    x, y = next(iter(dataset))\nx\n
    {'region-centroid-col': 218,\n 'region-centroid-row': 178,\n 'short-line-density-5': 0.11111111,\n 'short-line-density-2': 0.0,\n 'vedge-mean': 0.8333326999999999,\n 'vegde-sd': 0.54772234,\n 'hedge-mean': 1.1111094,\n 'hedge-sd': 0.5443307,\n 'intensity-mean': 59.629630000000006,\n 'rawred-mean': 52.44444300000001,\n 'rawblue-mean': 75.22222,\n 'rawgreen-mean': 51.22222,\n 'exred-mean': -21.555555,\n 'exblue-mean': 46.77778,\n 'exgreen-mean': -25.222220999999998,\n 'value-mean': 75.22222,\n 'saturation-mean': 0.31899637,\n 'hue-mean': -2.0405545}\n
    y\n
    'path'\n

    A multiclass classifier's goal is to learn how to predict a class y from a bunch of features x. We'll attempt to do this with a decision tree.

    from river import tree\n\nmodel = tree.HoeffdingTreeClassifier()\nmodel.predict_proba_one(x)\n
    {}\n

    The reason why the output dictionary is empty is because the model hasn't seen any data yet. It isn't aware of the dataset whatsoever. If this were a binary classifier, then it would output a probability of 50% for True and False because the classes are implicit. But in this case we're doing multiclass classification.

    Likewise, the predict_one method initially returns None because the model hasn't seen any labeled data yet.

    print(model.predict_one(x))\n
    None\n

    If we update the model and try again, then we see that a probability of 100% is assigned to the 'path' class because that's the only one the model is aware of.

    model.learn_one(x, y)\nmodel.predict_proba_one(x)\n
    {'path': 1.0}\n

    This is a strength of online classifiers: they're able to deal with new classes appearing in the data stream.

    Typically, an online model makes a prediction, and then learns once the ground truth reveals itself. The prediction and the ground truth can be compared to measure the model's correctness. If you have a dataset available, you can loop over it, make a prediction, update the model, and compare the model's output with the ground truth. This is called progressive validation.

    from river import metrics\n\nmodel = tree.HoeffdingTreeClassifier()\n\nmetric = metrics.ClassificationReport()\n\nfor x, y in dataset:\n    y_pred = model.predict_one(x)\n    model.learn_one(x, y)\n    if y_pred is not None:\n        metric.update(y, y_pred)\n\nmetric\n
                   Precision      Recall         F1             Support\n\n   brickface         77.13%         84.85%         80.81%            330  \n      cement         78.92%         83.94%         81.35%            330  \n     foliage         65.69%         20.30%         31.02%            330  \n       grass        100.00%         96.97%         98.46%            330  \n        path         90.63%         91.19%         90.91%            329  \n         sky         99.08%         98.18%         98.63%            330  \n      window         43.50%         67.88%         53.02%            330\n\n       Macro         79.28%         77.62%         76.31%                 \n       Micro         77.61%         77.61%         77.61%                 \n    Weighted         79.27%         77.61%         76.31%\n\n                             77.61% accuracy\n

    This is a common way to evaluate an online model. In fact, there is a dedicated evaluate.progressive_val_score function that does this for you.

    from river import evaluate\n\nmodel = tree.HoeffdingTreeClassifier()\nmetric = metrics.ClassificationReport()\n\nevaluate.progressive_val_score(dataset, model, metric)\n
                   Precision      Recall         F1             Support\n\n   brickface         77.13%         84.85%         80.81%            330  \n      cement         78.92%         83.94%         81.35%            330  \n     foliage         65.69%         20.30%         31.02%            330  \n       grass        100.00%         96.97%         98.46%            330  \n        path         90.63%         91.19%         90.91%            329  \n         sky         99.08%         98.18%         98.63%            330  \n      window         43.50%         67.88%         53.02%            330\n\n       Macro         79.28%         77.62%         76.31%                 \n       Micro         77.61%         77.61%         77.61%                 \n    Weighted         79.27%         77.61%         76.31%\n\n                             77.61% accuracy\n
    "},{"location":"introduction/getting-started/regression/","title":"Regression","text":"

    Regression is about predicting a numeric output for a given sample. A labeled regression sample is made up of a bunch of features and a number. The number is usually continuous, but it may also be discrete. We'll use the Trump approval rating dataset as an example.

    from river import datasets\n\ndataset = datasets.TrumpApproval()\ndataset\n
    Donald Trump approval ratings.\n\nThis dataset was obtained by reshaping the data used by FiveThirtyEight for analyzing Donald\nTrump's approval ratings. It contains 5 features, which are approval ratings collected by\n5 polling agencies. The target is the approval rating from FiveThirtyEight's model. The goal of\nthis task is to see if we can reproduce FiveThirtyEight's model.\n\n    Name  TrumpApproval                                                           \n    Task  Regression                                                              \n Samples  1,001                                                                   \nFeatures  6                                                                       \n  Sparse  False                                                                   \n    Path  /Users/max/projects/online-ml/river/river/datasets/trump_approval.csv.gz\n

    This dataset is a streaming dataset which can be looped over.

    for x, y in dataset:\n    pass\n

    Let's take a look at the first sample.

    x, y = next(iter(dataset))\nx\n
    {'ordinal_date': 736389,\n 'gallup': 43.843213,\n 'ipsos': 46.19925042857143,\n 'morning_consult': 48.318749,\n 'rasmussen': 44.104692,\n 'you_gov': 43.636914000000004}\n

    A regression model's goal is to learn to predict a numeric target y from a bunch of features x. We'll attempt to do this with a nearest neighbors model.

    from river import neighbors\n\nmodel = neighbors.KNNRegressor()\nmodel.predict_one(x)\n
    0.0\n

    The model hasn't been trained on any data, and therefore outputs a default value of 0.

    The model can be trained on the sample, which will update the model's state.

    model = model.learn_one(x, y)\n

    If we try to make a prediction on the same sample, we can see that the output is different, because the model has learned something.

    model.predict_one(x)\n
    43.75505\n

    Typically, an online model makes a prediction, and then learns once the ground truth reveals itself. The prediction and the ground truth can be compared to measure the model's correctness. If you have a dataset available, you can loop over it, make a prediction, update the model, and compare the model's output with the ground truth. This is called progressive validation.

    from river import metrics\n\nmodel = neighbors.KNNRegressor()\n\nmetric = metrics.MAE()\n\nfor x, y in dataset:\n    y_pred = model.predict_one(x)\n    model.learn_one(x, y)\n    metric.update(y, y_pred)\n\nmetric\n
    MAE: 0.310353\n

    This is a common way to evaluate an online model. In fact, there is a dedicated evaluate.progressive_val_score function that does this for you.

    from river import evaluate\n\nmodel = neighbors.KNNRegressor()\nmetric = metrics.MAE()\n\nevaluate.progressive_val_score(dataset, model, metric)\n
    MAE: 0.310353\n
    "},{"location":"recipes/active-learning/","title":"Active learning","text":"

    Active learning is a training regime, where the goal is to fit a model on the most discriminative samples. It is usually applied in situations where a limited amount of labeled data is available. In such a case, a human might be asked to annotate a sample. Doing this is expensive, so it's important to ask for labels for the most samples that will have the most impact.

    Online active learning is active learning done in a streaming fashion. Every time a prediction is made, an active learning strategy decides whether a label should be asked for or not. In case the strategy decides a yes, then the system could ask for a human to intervene. This is well summarized in the following schema from Online Active Learning Methods for Fast Label-Efficient Spam Filtering.

    "},{"location":"recipes/active-learning/#online-active-learning","title":"Online active learning","text":"

    River's online active learning strategies are located in the active module. The latter contains wrapper models. These wrappers enrich the predict_one and predict_proba_one methods to include a boolean in the output.

    The returned boolean indicates whether or not a label should be asked for. In a production system, we could feed this to a web interface, and get the human to annotate the sample. Offline, we can simply use the label in the dataset.

    We'll implement this basic flow. We'll apply a TFIDF followed by logistic regression to a datasets of spam/ham received by SMS.

    from river import active\nfrom river import datasets\nfrom river import feature_extraction\nfrom river import linear_model\nfrom river import metrics\n\ndataset = datasets.SMSSpam()\nmetric = metrics.Accuracy()\nmodel = (\n    feature_extraction.TFIDF(on='body') |\n    linear_model.LogisticRegression()\n)\nmodel = active.EntropySampler(model, seed=42)\n\nn_samples_used = 0\nfor x, y in dataset:\n    y_pred, ask = model.predict_one(x)\n    metric.update(y, y_pred)\n    if ask:\n        n_samples_used += 1\n        model.learn_one(x, y)\n\nmetric\n
    Accuracy: 86.60%\n

    The performance is reasonable, even though all the dataset wasn't used for training. We can check how many samples were actually used.

    print(f\"{n_samples_used} / {dataset.n_samples} = {n_samples_used / dataset.n_samples:.2%}\")\n
    1921 / 5574 = 34.46%\n

    Note that the above logic can be succinctly reproduced with the progressive_val_score function from the evaluate module. It recognises when an active learning model is provided, and will automatically display the number of samples used.

    from river import evaluate\n\nevaluate.progressive_val_score(\n    dataset=dataset,\n    model=model.clone(),\n    metric=metric.clone(),\n    print_every=1000\n)\n
    [1,000] Accuracy: 86.32% \u2013 661 samples used\n[2,000] Accuracy: 86.44% \u2013 1,057 samples used\n[3,000] Accuracy: 86.52% \u2013 1,339 samples used\n[4,000] Accuracy: 86.62% \u2013 1,568 samples used\n[5,000] Accuracy: 86.57% \u2013 1,790 samples used\n[5,574] Accuracy: 86.60% \u2013 1,921 samples used\n\n\n\n\n\nAccuracy: 86.60%\n
    "},{"location":"recipes/active-learning/#reduce-training-time","title":"Reduce training time","text":"

    Active learning is primarly used to label data in an efficient manner. However, in an online setting, active learning can also be used simply to speed up training. The point is that you can achieve a very good performance without training on an entire dataset. Active learning is a powerful way to decide which samples to train on.

    "},{"location":"recipes/active-learning/#_1","title":"Active learning","text":""},{"location":"recipes/active-learning/#production-considerations","title":"Production considerations","text":"

    In production, you might want to deploy a system where humans may annotate samples queried by an active learning strategy. You have several options at your disposal, all of which go beyond the scope of River.

    The general idea is to have some kind of queue in which queried samples are fed into. Then you would have a user interface which displays the elements in the queue one-by-one. Each time a sample is labeled, the label would be used to update the model. You might have one or more threads/processes doing inference. You'll want to update the model in each one each time the model learns.

    "},{"location":"recipes/bandits-101/","title":"Multi-armed bandits","text":"

    River has a bandit module. It contains several multi-armed bandit policies, bandit environments, and utilities to benchmark policies on bandit problems.

    Bandit environments in River implement the Gym interface. You can thus load them with gym.make. Note that Gym is intended for reinforcement learning algorithms, while bandit policies are the simplest form of reinforcement learing. Bandit policies learn by receiving a reward after each step, while reinforcement learning algorithms have to learn from feedback that may arrive at the end of a (long) sequence of steps.

    import gym\n\nfor k in gym.envs.registry:\n    if k.startswith('river_bandits'):\n        print(k)\n

    River's bandit module offers the bandit.evaluate function to benchmark several policies on a given environment. It takes as input a list of bandit policies, a bandit environment (the problem to solve), and a reward object.

    import gym\nfrom river import bandit\nimport pandas as pd\nfrom tqdm import tqdm\nfrom river import stats\n\npolicies=[\n    bandit.EpsilonGreedy(epsilon=0.1),\n    bandit.EpsilonGreedy(epsilon=0.01),\n    bandit.EpsilonGreedy(epsilon=0),\n]\n\nenv = gym.make(\n    'river_bandits/KArmedTestbed-v0',\n    max_episode_steps=1000\n)\n\ntrace = bandit.evaluate(\n    policies=policies,\n    env=env,\n    reward_stat=stats.Mean(),\n    n_episodes=(n_episodes := 2000),\n)\n

    The bandit.evaluate function returns a generator containing the results at each step of the benchmark. This can be wrapped with a pandas.DataFrame to gather all the results.

    trace_df = pd.DataFrame(tqdm(\n    trace, position=0, total=(\n        n_episodes *\n        len(policies) *\n        env._max_episode_steps\n    )\n))\ntrace_df.sample(5, random_state=42)\n
      0%|                                               | 0/6000000 [00:00<?, ?it/s]/Users/max/.pyenv/versions/3.10/envs/river310/lib/python3.10/site-packages/gym/utils/passive_env_checker.py:233: DeprecationWarning: `np.bool8` is a deprecated alias for `np.bool_`.  (Deprecated NumPy 1.24)\n  if not isinstance(terminated, (bool, np.bool8)):\n100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 6000000/6000000 [00:35<00:00, 169408.20it/s]\n
    episode step policy_idx arm reward reward_stat 1324896 441 632 0 4 -0.036801 0.457068 3566176 1188 725 1 5 1.837321 2.220956 1109043 369 681 0 6 0.616991 1.324610 4286042 1428 680 2 3 0.236458 0.570999 5395174 1798 391 1 1 -0.851223 0.446835

    It is then straightforward to plot the average reward each policy obtains at each step, by averaging over episodes.

    policy_names = {\n    0: '\u03b5 = 0.1',\n    1: '\u03b5 = 0.01',\n    2: '\u03b5 = 0 (greedy)'\n}\n\ncolors = {\n    '\u03b5 = 0.1': 'tab:blue',\n    '\u03b5 = 0.01': 'tab:red',\n    '\u03b5 = 0 (greedy)': 'tab:green'\n}\n\n(\n    trace_df\n    .assign(policy=trace_df.policy_idx.map(policy_names))\n    .groupby(['step', 'policy'])\n    ['reward'].mean()\n    .unstack()\n    .plot(color=colors)\n)\n
    <Axes: xlabel='step'>\n

    "},{"location":"recipes/bandits-101/#controlling-the-evaluation-loop","title":"Controlling the evaluation loop","text":"

    The bandit.evaluate function is useful for benchmarking. But in practice, you'll want to have control over your bandit policy. Indeed you'll want the freedom to pull arms (with the pull method) and update the policy (with the update method) at your discretion.

    As an example, the following is a possible reimplementation of the bandit.evaluate function. Here we'll be measuring the rate at which each policy selects the optimal arm.

    Note how the pull and update methods are used.

    import copy\n\npolicies=[\n    bandit.EpsilonGreedy(epsilon=0.1),\n    bandit.EpsilonGreedy(epsilon=0.01),\n    bandit.EpsilonGreedy(epsilon=0),\n]\n\nenv = gym.make(\n    'river_bandits/KArmedTestbed-v0',\n    max_episode_steps=1000\n)\nn_episodes = 2000\n\ntrace = []\n\nwith tqdm(total=len(policies) * n_episodes * env._max_episode_steps, position=0) as progress:\n    for policy in policies:\n        for episode in range(n_episodes):\n            episode_policy = policy.clone()\n            episode_env = copy.deepcopy(env)\n            episode_env.reset()\n            step = 0\n            while True:\n                action = episode_policy.pull(range(episode_env.action_space.n))\n                observation, reward, terminated, truncated, info = episode_env.step(action)\n                best_action = observation\n                episode_policy.update(action, reward)\n\n                trace.append({\n                    \"episode\": episode,\n                    \"step\": step,\n                    \"policy\": f\"\u03b5 = {policy.epsilon}\",\n                    \"is_action_optimal\": action == best_action\n                })\n                step += 1\n                progress.update()\n\n                if terminated or truncated:\n                    break\n\ntrace_df = pd.DataFrame(trace)\n
      0%|                                               | 0/6000000 [00:00<?, ?it/s]/Users/max/.pyenv/versions/3.10/envs/river310/lib/python3.10/site-packages/gym/utils/passive_env_checker.py:233: DeprecationWarning: `np.bool8` is a deprecated alias for `np.bool_`.  (Deprecated NumPy 1.24)\n  if not isinstance(terminated, (bool, np.bool8)):\n100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 6000000/6000000 [00:34<00:00, 172261.07it/s]\n
    colors = {\n    '\u03b5 = 0.1': 'tab:blue',\n    '\u03b5 = 0.01': 'tab:red',\n    '\u03b5 = 0': 'tab:green'\n}\n\n(\n    trace_df\n    .groupby(['step', 'policy'])\n    ['is_action_optimal'].mean()\n    .unstack()\n    .plot(color=colors)\n)\n
    <Axes: xlabel='step'>\n

    "},{"location":"recipes/bandits-101/#handling-drift","title":"Handling drift","text":"

    The environment used above is a toy situation used for introducing bandits. It is stationary, meaning that the expected reward of each arm does not change over time.

    In practice, arms are dynamic, and their performance can vary over time. A simple example of this is the Candy Cane Contest that was hosted on Kaggle in 2020. The expected reward of each arm diminishes each time it is pulled.

    The way bandit policies in River deal with drift depends on the method. For the bandit.EpsilonGreedy policy, it makes sense to use a rolling average as the reward object. What this means is that the empirical reward the policy calculates for each arm is a rolling average, rather than a global one.

    from river import proba, utils\n\npolicies=[\n    bandit.EpsilonGreedy(\n        epsilon=0.1,\n        seed=42\n    ),\n    bandit.EpsilonGreedy(\n        epsilon=0.3,\n        reward_obj=utils.Rolling(stats.Mean(), window_size=50),\n        seed=42\n    ),\n    bandit.ThompsonSampling(\n        reward_obj=proba.Beta(),\n        seed=42\n    )\n]\n\nenv = gym.make('river_bandits/CandyCaneContest-v0')\n\ntrace = bandit.evaluate(\n    policies=policies,\n    env=env,\n    n_episodes=(n_episodes := 30),\n    seed=42\n)\n\ntrace_df = pd.DataFrame(tqdm(\n    trace, position=0, total=(\n        n_episodes *\n        len(policies) *\n        env._max_episode_steps\n    )\n))\n
    100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 180000/180000 [00:14<00:00, 12305.42it/s]\n

    We can compare the performance of each policy by checking the average reward at the end of each episode.

    (\n    trace_df\n    .groupby(['policy_idx', 'episode'])\n    .last()\n    .groupby('policy_idx')\n    .reward_stat.mean()\n)\n
    policy_idx\n0    736.1\n1    817.0\n2    854.0\nName: reward_stat, dtype: float64\n

    We see that using a rolling average gives a boost to the epsilon greedy strategy. However, we see that the bandit.ThompsonSampling policy performs even better, even though no particular care was given to drift. A natural next step would thus be to see how it could be improved to handle drift. For instance, its dist parameter could be wrapped with a utils.Rolling:

    policy = bandit.ThompsonSampling(\n    reward_obj=utils.Rolling(proba.Beta(), window_size=50),\n    seed=42\n)\n

    Bandits can be used for several tasks. They can be used for content personalization, as well as online model selection (see model_selection.BanditRegressor). The policies in River are therefore designed to be flexible, so that they can be used in conjunction with other River modules. For instance, the reward_obj in bandit.EpsilonGreedy can be a metric, a probability distribution, or a statistic. This works because objects in River adher to a coherent get/update interface.

    "},{"location":"recipes/cloning-and-mutating/","title":"Cloning and mutating","text":"

    Sometimes you might want to reset a model, or edit (what we call mutate) its attributes. This can be useful in an online environment. Indeed, if you detect a drift, then you might want to mutate a model's attributes. Or if you see that a model's performance is plummeting, then you might to reset it to its \"factory settings\".

    Anyway, this is not to convince you, but rather to say that a model's attributes don't have be to set in stone throughout its lifetime. In particular, if you're developping your own model, then you might want to have good tools to do this. This is what this recipe is about.

    "},{"location":"recipes/cloning-and-mutating/#cloning","title":"Cloning","text":"

    The first thing you can do is clone a model. This creates a deep copy of the model. The resulting model is entirely independent of the original model. The clone is fresh, in the sense that it is as if it hasn't seen any data.

    For instance, say you have a linear regression model which you have trained on some data.

    from river import datasets, linear_model, optim, preprocessing\n\nmodel = (\n    preprocessing.StandardScaler() |\n    linear_model.LinearRegression(\n        optimizer=optim.SGD(3e-2)\n    )\n)\n\nfor x, y in datasets.TrumpApproval():\n    model.predict_one(x)\n    model.learn_one(x, y)\n\nmodel[-1].weights\n
    {'ordinal_date': 20.59955380229643,\n 'gallup': 0.39114944304212645,\n 'ipsos': 0.4101918314868111,\n 'morning_consult': 0.12042970179504908,\n 'rasmussen': 0.18951231512561392,\n 'you_gov': 0.04991712783831687}\n

    For whatever reason, we may want to clone this model. This can be done with the clone method.

    clone = model.clone()\nclone[-1].weights\n
    {}\n

    As we can see, there are no weights because the clone is fresh copy that has not seen any data. However, the learning rate we specified is preserved.

    clone[-1].optimizer.learning_rate\n
    0.03\n

    You may also specify parameters you want changed. For instance, let's say we want to clone the model, but we want to change the optimizer:

    clone = model.clone({\"LinearRegression\": {\"optimizer\": optim.Adam()}})\nclone[-1].optimizer\n
    Adam({'lr': Constant({'learning_rate': 0.1}), 'n_iterations': 0, 'beta_1': 0.9, 'beta_2': 0.999, 'eps': 1e-08, 'm': None, 'v': None})\n

    The first key indicates that we want to specify a different parameter for the LinearRegression part of the pipeline. Then the second key accesses the linear regression's optimizer parameter.

    Finally, note that the clone method isn't reserved to models. Indeed, every object in River has it. That's because they all inherit from the Base class in the base module.

    "},{"location":"recipes/cloning-and-mutating/#mutating-attributes","title":"Mutating attributes","text":"

    Cloning a model can be useful, but the fact that it essentially resets the model may not be desired. Instead, you might want to change a attribute while preserving the model's state. For example, let's change the l2 attribute, and the optimizer's lr attribute.

    model.mutate({\n    \"LinearRegression\": {\n        \"l2\": 0.1,\n        \"optimizer\": {\n            \"lr\": optim.schedulers.Constant(25e-3)\n        }\n    }\n})\n\nprint(repr(model))\n
    Pipeline (\n  StandardScaler (\n    with_std=True\n  ),\n  LinearRegression (\n    optimizer=SGD (\n      lr=Constant (\n        learning_rate=0.025\n      )\n    )\n    loss=Squared ()\n    l2=0.1\n    l1=0.\n    intercept_init=0.\n    intercept_lr=Constant (\n      learning_rate=0.01\n    )\n    clip_gradient=1e+12\n    initializer=Zeros ()\n  )\n)\n

    We can see the attributes we specified have changed. However, the model's state is preserved:

    model[-1].weights\n
    {'ordinal_date': 20.59955380229643,\n 'gallup': 0.39114944304212645,\n 'ipsos': 0.4101918314868111,\n 'morning_consult': 0.12042970179504908,\n 'rasmussen': 0.18951231512561392,\n 'you_gov': 0.04991712783831687}\n

    In other words, the mutate method does not create a deep copy of the model. It just sets attributes. At this point you may ask:

    Why can't I just change the attribute directly, without calling mutate?

    The answer is that you're free to do proceed as such, but it's not the way we recommend. The mutate method is safer, in that it prevents you from mutating attributes you shouldn't be touching. We call these immutable attributes. For instance, there's no reason you should be modifying the weights.

    try:\n    model.mutate({\n        \"LinearRegression\": {\n            \"weights\": \"this makes no sense\"\n        }\n    })\nexcept ValueError as e:\n    print(e)\n
    'weights' is not a mutable attribute of LinearRegression\n

    All attributes are immutable by default. Under the hood, each model can specify a set of mutable attributes via the _mutable_attributes property. In theory this can be overriden. But the general idea is that we will progressively add more and more mutable attributes with time.

    And that concludes this recipe. Arguably, this recipe caters to advanced users, and in particular users who are developping their own models. And yet, one could also argue that modifying parameters of a model on-the-fly is a great tool to have at your disposal when you're doing online machine learning.

    "},{"location":"recipes/feature-extraction/","title":"Feature extraction","text":"

    To do.

    "},{"location":"recipes/hyperparameter-tuning/","title":"Hyperparameter tuning","text":"

    To do.

    "},{"location":"recipes/mini-batching/","title":"Mini-batching","text":"

    In its purest form, online machine learning encompasses models which learn with one sample at a time. This is the design which is used in River.

    The main downside of single-instance processing is that it doesn't scale to big data, at least not in the sense of traditional batch learning. Indeed, processing one sample at a time means that we are unable to fully take advantage of vectorisation and other computational tools that are taken for granted in batch learning. On top of this, processing a large dataset in River essentially involves a Python for loop, which might be too slow for some usecases. However, this doesn't mean that River is slow. In fact, for processing a single instance, River is actually a couple of orders of magnitude faster than libraries such as scikit-learn, PyTorch, and Tensorflow. The reason why is because River is designed from the ground up to process a single instance, whereas the majority of other libraries choose to care about batches of data. Both approaches offer different compromises, and the best choice depends on your usecase.

    In order to propose the best of both worlds, River offers some limited support for mini-batch learning. Some of River's estimators implement *_many methods on top of their *_one counterparts. For instance, preprocessing.StandardScaler has a learn_many method as well as a transform_many method, in addition to learn_one and transform_one. Each mini-batch method takes as input a pandas.DataFrame. Supervised estimators also take as input a pandas.Series of target values. We choose to use pandas.DataFrames over numpy.ndarrays because of the simple fact that the former allows us to name each feature. This in turn allows us to offer a uniform interface for both single instance and mini-batch learning.

    As an example, we will build a simple pipeline that scales the data and trains a logistic regression. Indeed, the compose.Pipeline class can be applied to mini-batches, as long as each step is able to do so.

    from river import compose\nfrom river import linear_model\nfrom river import preprocessing\n\nmodel = compose.Pipeline(\n    preprocessing.StandardScaler(),\n    linear_model.LogisticRegression()\n)\n

    For this example, we will use datasets.Higgs.

    from river import datasets\n\ndataset = datasets.Higgs()\nif not dataset.is_downloaded:\n    dataset.download()\ndataset\n
    Higgs dataset.\n\nThe data has been produced using Monte Carlo simulations. The first 21 features (columns 2-22)\nare kinematic properties measured by the particle detectors in the accelerator. The last seven\nfeatures are functions of the first 21 features; these are high-level features derived by\nphysicists to help discriminate between the two classes.\n\n      Name  Higgs                                                                       \n      Task  Binary classification                                                       \n   Samples  11,000,000                                                                  \n  Features  28                                                                          \n    Sparse  False                                                                       \n      Path  /Users/max/river_data/Higgs/HIGGS.csv.gz                                    \n       URL  https://archive.ics.uci.edu/ml/machine-learning-databases/00280/HIGGS.csv.gz\n      Size  2.62 GB                                                                     \nDownloaded  True\n

    The easiest way to read the data in a mini-batch fashion is to use the read_csv from pandas.

    import pandas as pd\n\nnames = [\n    'target', 'lepton pT', 'lepton eta', 'lepton phi',\n    'missing energy magnitude', 'missing energy phi',\n    'jet 1 pt', 'jet 1 eta', 'jet 1 phi', 'jet 1 b-tag',\n    'jet 2 pt', 'jet 2 eta', 'jet 2 phi', 'jet 2 b-tag',\n    'jet 3 pt', 'jet 3 eta', 'jet 3 phi', 'jet 3 b-tag',\n    'jet 4 pt', 'jet 4 eta', 'jet 4 phi', 'jet 4 b-tag',\n    'm_jj', 'm_jjj', 'm_lv', 'm_jlv', 'm_bb', 'm_wbb', 'm_wwbb'\n]\n\nfor x in pd.read_csv(dataset.path, names=names, chunksize=8096, nrows=3e5):\n    y = x.pop('target')\n    y_pred = model.predict_proba_many(x)\n    model.learn_many(x, y)\n

    If you are familiar with scikit-learn, you might be aware that some of their estimators have a partial_fit method, which is similar to river's learn_many method. Here are some advantages that river has over scikit-learn:

    • We guarantee that river's is just as fast, if not faster than scikit-learn. The differences are negligeable, but are slightly in favor of river.
    • We take as input dataframes, which allows us to name each feature. The benefit is that you can add/remove/permute features between batches and everything will keep working.
    • Estimators that support mini-batches also support single instance learning. This means that you can enjoy the best of both worlds. For instance, you can train with mini-batches and use predict_one to make predictions.

    Note that you can check which estimators can process mini-batches programmatically:

    import importlib\nimport inspect\n\ndef can_mini_batch(obj):\n    return hasattr(obj, 'learn_many')\n\nfor module in importlib.import_module('river.api').__all__:\n    if module in ['datasets', 'synth']:\n        continue\n    for name, obj in inspect.getmembers(importlib.import_module(f'river.{module}'), can_mini_batch):\n        print(name)\n
    OneClassSVM\nMiniBatchClassifier\nMiniBatchRegressor\nMiniBatchSupervisedTransformer\nMiniBatchTransformer\nSKL2RiverClassifier\nSKL2RiverRegressor\nFuncTransformer\nPipeline\nSelect\nTransformerProduct\nTransformerUnion\nBagOfWords\nTFIDF\nLinearRegression\nLogisticRegression\nPerceptron\nOneVsRestClassifier\nBernoulliNB\nComplementNB\nMultinomialNB\nMLPRegressor\nOneHotEncoder\nOrdinalEncoder\nStandardScaler\n

    Because mini-batch learning isn't treated as a first-class citizen, some of the river's functionalities require some work in order to play nicely with mini-batches. For instance, the objects from the metrics module have an update method that take as input a single pair (y_true, y_pred). This might change in the future, depending on the demand.

    We plan to promote more models to the mini-batch regime. However, we will only be doing so for the methods that benefit the most from it, as well as those that are most popular. Indeed, River's core philosophy will remain to cater to single instance learning.

    "},{"location":"recipes/model-evaluation/","title":"Model evaluation","text":"

    To do.

    "},{"location":"recipes/on-hoeffding-trees/","title":"Incremental decision trees in river: the Hoeffding Tree case","text":"

    Decision trees (DT) are popular learning models due to their inherently simplicity, flexibility and self-explainable structure. Moreover, when aggregated in ensembles, high predictive power might be achieved. Bagging and gradient boosting-based tree ensembles are very popular solutions in competition platforms such as Kaggle, and also among researchers.

    Although fairly lightweight, traditional batch DTs cannot cope with data stream mining/online learning requirements, as they do multiple passes over the data and have to be retrained from scratch every time a new observation appears.

    The data stream literature has plenty of incremental DT (iDT) families that are better suited to online learning. Nonetheless, Hoeffding Trees (HT) are historically the most popular family of iDTs to date. In fact, HTs have some nice properties:

    • one-pass learning regime;
    • theoretical guarantees to converge to the batch DT model given enough observations and a stationary data distribution;
    • small memory and running time footprint (in most cases);
    • some of their variations can deal with non-stationary distributions.

    And the previous list goes on and on. Besides that, HTs also have the same advantages as batch DTs (C4.5/J48, CART, M5, etc.) do. We can inspect the structure of a HT to understand how decisions were made, which is a nice feature to have in online learning tasks.

    In River, HTs are first-class citizens, so we have multiple realizations of this framework that are suited to different learning tasks and scenarios.

    This brief introduction to HT does not aims at being extensive nor delving into algorithmic or implementation details of the HTs. Instead, we intend to provide a high-level overview of the HTs as they are envisioned in River, as well as their shared properties and important hyperparameters.

    In this guide, we are going to:

    1. summarize the differences accross the multiple HT versions available;
    2. learn how to inspect tree models;
    3. learn how to manage the memory usage of HTs;
    4. compare numerical tree splitters and understand their impact on the iDT induction process.

    Well, without further ado, let's go!

    First things first, we are going to start with some imports.

    import matplotlib.pyplot as plt\nimport datetime as dt\n\nfrom river import datasets\nfrom river import evaluate\nfrom river import metrics\nfrom river import preprocessing  # we are going to use that later\nfrom river.datasets import synth  # we are going to use some synthetic datasets too\nfrom river import tree\n
    "},{"location":"recipes/on-hoeffding-trees/#1-trees-trees-everywhere-gardening-101-with-river","title":"1. Trees, trees everywhere: gardening 101 with river","text":"

    At first glance, the amount of iDT algorithms in River might seem too much to handle, but in reality the distinction among them is easy to grasp. To facilitate our lives, here's a neat table listing the available HT models and summarizing their differences:

    Name Acronym Task Non-stationary? Comments Source Hoeffding Tree Classifier HTC Classification No Basic HT for classification tasks [1] Hoeffding Adaptive Tree Classifier HATC Classification Yes Modifies HTC by adding an instance of ADWIN to each node to detect and react to drift detection [2] Extremely Fast Decision Tree Classifier EFDT Classification No Deploys split decisions as soon as possible and periodically revisit decisions and redo them if necessary. Not as fast in practice as the name implies, but it tends to converge faster than HTC to the model generated by a batch DT [3] Hoeffding Tree Regressor HTR Regression No Basic HT for regression tasks. It is an adaptation of the FIRT/FIMT algorithm that bears some semblance to HTC [4] Hoeffding Adaptive Tree Regressor HATR Regression Yes Modifies HTR by adding an instance of ADWIN to each node to detect and react to drift detection - incremental Structured-Output Prediction Tree Regressor iSOUPT Multi-target regression No Multi-target version of HTR [5] Label Combination Hoeffding Tree Classifier LCHTC Multi-label classification No Creates a numerical code for each combination of the binary labels and uses HTC to learn from this encoded representation. At prediction time, decodes the modified representation to obtain the original label set -

    As we can see, although their application fields might overlap sometimes, the HT variations have specific situations in which they are better suited to work. Moreover, in River we provide a standardized API access to all the HT variants since they share many properties in common.

    "},{"location":"recipes/on-hoeffding-trees/#2-how-to-inspect-tree-models","title":"2. How to inspect tree models?","text":"

    We provide a handful of tools to inspect trained HTs in River. Here, we will provide some examples of how to access their inner structures, get useful information, and plot the iDT structure.

    Firstly, let's pick a toy dataset from which our tree will learn from. Here we are going to focus on the classification case, but the same operations apply to other learning tasks. We will select the Phishing dataset from the datasets module to exemplify the HTs' capabilities.

    dataset = datasets.Phishing()\ndataset\n
    Phishing websites.\n\nThis dataset contains features from web pages that are classified as phishing or not.\n\n    Name  Phishing                                                          \n    Task  Binary classification                                             \n Samples  1,250                                                             \nFeatures  9                                                                 \n  Sparse  False                                                             \n    Path  /Users/max/projects/online-ml/river/river/datasets/phishing.csv.gz\n

    We are going to train an instance of HoeffdingTreeClassifier using this dataset. As everything else in River, training an iDT is a piece of cake!

    %%time\n\nmodel = tree.HoeffdingTreeClassifier(grace_period=50)\n\nfor x, y in dataset:\n    model.learn_one(x, y)\n\nmodel\n
    CPU times: user 56.8 ms, sys: 984 \u00b5s, total: 57.8 ms\nWall time: 58.7 ms\n
    HoeffdingTreeClassifier
    HoeffdingTreeClassifier ( grace_period=50 max_depth=inf split_criterion=\"info_gain\" delta=1e-07 tau=0.05 leaf_prediction=\"nba\" nb_threshold=0 nominal_attributes=None splitter=GaussianSplitter ( n_splits=10 ) binary_split=False min_branch_fraction=0.01 max_share_to_split=0.99 max_size=100. memory_estimate_period=1000000 stop_mem_management=False remove_poor_attrs=False merit_preprune=True )

    That's it! We are not going to enter into details about some of the available parameters of HTC here. The user can refer to the documentation page for more information about that. Let's talk about model inspection :D

    At any time, we can easily get some statistics about our trained model by using the summary property:

    model.summary\n
    {'n_nodes': 5,\n 'n_branches': 2,\n 'n_leaves': 3,\n 'n_active_leaves': 3,\n 'n_inactive_leaves': 0,\n 'height': 3,\n 'total_observed_weight': 1250.0}\n

    This property show us the internal structure of the tree, including data concerning the memory-management routines that we are going to check later in this guide. We can also get a representation of the tree model as a pandas.DataFrame object:

    model.to_dataframe().iloc[:5, :5]\n
    parent is_leaf depth stats feature node 0 <NA> False 0 {True: 260.0, False: 390.0} empty_server_form_handler 1 0 True 1 {True: 443.4163997711022, False: 59.8769131081... NaN 2 0 False 1 {True: 71.58360022889781, False: 404.123086891... popup_window 3 2 True 2 {False: 31.426538522574834, True: 33.0} NaN 4 2 True 2 {False: 250.57346147742516, True: 6.0} NaN

    Hmm, maybe not the clearest of the representations. What about drawing the tree structure instead?

    model.draw()\n

    Much better, huh?

    Lastly, we can check how the tree predicts one specific instance by using the debug_one method:

    x, y = next(iter(dataset))  # Let's select the first example in the stream\nx, y\n
    ({'empty_server_form_handler': 0.0,\n  'popup_window': 0.0,\n  'https': 0.0,\n  'request_from_other_domain': 0.0,\n  'anchor_from_other_domain': 0.0,\n  'is_popular': 0.5,\n  'long_url': 1.0,\n  'age_of_domain': 1,\n  'ip_in_url': 1},\n True)\n
    print(model.debug_one(x))\n
    empty_server_form_handler \u2264 0.5454545454545454\nClass True:\n    P(False) = 0.1\n    P(True) = 0.9\n

    Our tree got this one right! The method debug_one is especially useful when we are dealing with a big tree model where drawing might not be the wisest of the choices (we will end up with a tree chart that has too much information to visually understand).

    Some additional hints:

    • the max_depth parameter is our friend when building HTs that need to be constantly inspected. This parameter, which is available for every HT variant, triggers a pre-pruning mechanism that stops tree growth when the given depth is reached.
    • we can also limit the depth when using the draw method.
    • in the case of tree ensembles, individual trees can be accessed using the [index] operator. Then, the same set of inspection tools are available to play with!
    "},{"location":"recipes/on-hoeffding-trees/#3-advanced-gardening-with-river-grab-your-pruning-shears-and-lets-limit-memory-usage","title":"3. Advanced gardening with river: grab your pruning shears and let's limit memory usage","text":"

    Online learning is well-suited to highly scalable processing centers with petabytes of data arriving intermittently, but it can also work with Internet of Things (IoT) devices operating at low power and with limited processing capability. Hence, making sure our trees are not going to use too much memory is a nice feature that can impact on both energy usage and the running time. HTs have memory-management routines that put the user in the control of computational resources that are available.

    In this brief guide, we are going to use a regression tree, since this kind of iDT typically spends more memory than the classification counterparts. However, the user can control the memory usage in the exact same way in River, regardless of the HT variant!

    We will rely on the Friedman synthetic dataset (data generator) from the synth module in our evaluation. Since data generators can produce instances indefinitely, we will select a sample of size 10K for our tests.

    We are almost ready to go. Let's first define a simple function that plots the results obtained from a given dataset, metric and

    def plot_performance(dataset, metric, models):\n    metric_name = metric.__class__.__name__\n\n    # To make the generated data reusable\n    dataset = list(dataset)\n    fig, ax = plt.subplots(figsize=(10, 5), nrows=3, dpi=300)\n    for model_name, model in models.items():\n        step = []\n        error = []\n        r_time = []\n        memory = []\n\n        for checkpoint in evaluate.iter_progressive_val_score(\n            dataset, model, metric, measure_time=True, measure_memory=True, step=100\n        ):\n            step.append(checkpoint[\"Step\"])\n            error.append(checkpoint[metric_name].get())\n\n            # Convert timedelta object into seconds\n            r_time.append(checkpoint[\"Time\"].total_seconds())\n            # Make sure the memory measurements are in MB\n            raw_memory = checkpoint[\"Memory\"]\n            memory.append(raw_memory * 2**-20)\n\n        ax[0].plot(step, error, label=model_name)\n        ax[1].plot(step, r_time, label=model_name)\n        ax[2].plot(step, memory, label=model_name)\n\n    ax[0].set_ylabel(metric_name)\n    ax[1].set_ylabel('Time (seconds)')\n    ax[2].set_ylabel('Memory (MB)')\n    ax[2].set_xlabel('Instances')\n\n    ax[0].grid(True)\n    ax[1].grid(True)\n    ax[2].grid(True)\n\n    ax[0].legend(\n        loc='upper center', bbox_to_anchor=(0.5, 1.25),\n        ncol=3, fancybox=True, shadow=True\n    )\n    plt.tight_layout()\n    plt.close()\n\n    return fig\n
    plot_performance(\n    synth.Friedman(seed=42).take(10_000),\n    metrics.MAE(),\n    {\n        \"Unbounded HTR\": (\n            preprocessing.StandardScaler() |\n            tree.HoeffdingTreeRegressor(splitter=tree.splitter.EBSTSplitter())\n        )\n    }\n)\n

    In our example we use the EBSTSplitter, which is going to discussed later. For now, is enough to know that it is a mechanism to evaluate split candidates in the trees.

    As we can see, our tree uses almost 10 MB to keep its structure. Let's say we wanted to limit our memory usage to 5 MB. How could we do that?

    Note that we are using a illustration case here. In real applications, data may be unbounded, so the trees might grow indefinitely.

    HTs expose some parameters related to memory management. The user can refer to the documentation for more details on that matter. Here, we are going to focus on two parameters:

    • max_size: determines the maximum amount of memory (in MB) that the HT can use.
    • memory_estimate_period: intervals after which the memory-management is triggered.

    We are going to limit our HTR to 5 MB and perform memory checks at intervals of 500 instances.

    plot_performance(\n    synth.Friedman(seed=42).take(10_000),\n    metrics.MAE(),\n    {\n        \"Restricted HTR\": (\n            preprocessing.StandardScaler()\n            | tree.HoeffdingTreeRegressor(\n                splitter=tree.splitter.EBSTSplitter(),\n                max_size=5,\n                memory_estimate_period=500\n            )\n        )\n    }\n)\n

    Note that as soon the memory usage reaches the limit that we determined (at the memory check intervals), HTR starts managing its resource usage to reduce the size. As a consequence, the running time also decreases. For more accurate management, the intervals between memory checks should be decreased. This action, however, has costs since the tree stops the learning process to estimate its size and alter its own structure. Too frequent memory checks might end up result in a slow learning process. Besides, by using fewer resources, the predictive performance can be negatively impacted. So, use this tool with caution!

    But how that works at all?

    HTs monitor the incoming feature values to perform split attempts. To do so, they rely on a class of algorithms called Attribute Observers (AO) or Splitters (spoiler alert!). Each leaf node in an HT keeps one AO per incoming feature. After pre-determined intervals (grace_period parameter), leaves query their AOs for split candidates. Well, there are costs to monitor input features (mainly the numerical ones). In fact, AOs correspond to one of the most time and memory-consuming portions of the HTs. To manage memory usage, an HT firstly determines its least promising leaves, w.r.t. how likely they will be split. Then, these leaves' AOs are removed, and the tree nodes are said to be \"deactivated.\" That's it! The deactivated leaves do not perform split attempts anymore, but they continue to be updated to provide responses. They will be kept as leaves as long as there are not available resources to enable tree growth. These leaves can be activated again (meaning that new AOs will be created for them) if there is available memory, so don't worry!

    Hint: another indirect way to bound memory usage is to limit the tree depth. By default, the trees can grow indefinitely, but the max_depth parameter can control this behavior.

    plot_performance(\n    synth.Friedman(seed=42).take(10_000),\n    metrics.MAE(),\n    {\n        \"HTR with at most 5 levels\": (\n            preprocessing.StandardScaler()\n            | tree.HoeffdingTreeRegressor(\n                splitter=tree.splitter.EBSTSplitter(),\n                max_depth=5\n            )\n        )\n    }\n)\n

    "},{"location":"recipes/on-hoeffding-trees/#4-branching-and-growth-splitters-the-heart-of-the-trees","title":"4. Branching and growth: splitters, the heart of the trees","text":"

    As previously stated, one of the core operations of iDT is, well, to grow. Plants and gardening-related jokes apart, growth in HTs is guided by their AOs or splitters, as mentioned in the end of Section 3.

    Nominal features can be easily monitored, since the feature partitions are well-defined beforehand. Numerical features, on the other hand, do not have an explicit best cut point. Still, numerical features are typically split by using a binary test: \\(\\le\\) or \\(>\\). Therefore, numerical splitters must somehow summarize the incoming feature values and be able to evaluate the merit of split point candidates.

    There are diverse strategies to monitor numerical features and choices related to them, including which data structure will be used to keep a summary of the incoming feature and also how many split points are going to be evaluated during split attempts. Again, this guide does not intend to be an exhaustive delve into the iDT subject. In fact, each of the following aspects of the iDTs could be considered a separate research area: AOs, intervals between split attempts, split heuristics (e.g., info gain, variance reduction, and so on), tree depth and max size, and much more!

    Let's focus a bit into the AO matter. River provides a handful of splitters for classification and regression trees, which can be chosen using the parameter splitter. We will list the available tree splitters in the following sections and compare some of their chacteristics.

    Some notation:

    • \\(n\\): Number of observations seen so far.
    • \\(c\\): the number of classes.
    • \\(s\\): the number of split points to evaluate (which means that this is a user-given parameter).
    • \\(h\\): the number of histogram bins or hash slots. Tipically, \\(h \\ll n\\).
    "},{"location":"recipes/on-hoeffding-trees/#41-classification-tree-splitters","title":"4.1. Classification tree splitters","text":"

    The following table summarizes the available classification splitters. The user might refer to the documentation of each splitter for more details about their functioning.

    Splitter Description Insertion Memory Split candidate query Works with Naive Bayes leaves? Exhaustive Keeps all the observed input values and class counts in a Binary Search Tree (BST) \\(O(\\log n)\\) (average) or \\(O(n)\\) (worst case) \\(O(n)\\) \\(O(n)\\) No Histogram Builds a histogram for each class in order to discretize the input feature \\(O(\\log h)\\) \\(O(c h)\\) \\(O(c h)\\) Yes Gaussian Approximates the class distributions using Gaussian distributions \\(O(1)\\) \\(O(c)\\) \\(O(cs)\\) Yes

    Note that some of the splitters have configurable parameters that directly impact not only on their time and memory costs, but also on the final predictive performance. Examples:

    • The number of split points can be configured in the Gaussian splitter. Increasing this number makes this splitter slower, but it also potentially increases the quality of the obtained query points, implying enhanced tree accuracy.
    • The number of stored bins can be selected in the Histogram splitter. Increasing this number increases the memory footprint and running time of this splitter, but it also potentially makes its split candidates more accurate and positively impacts on the tree's final predictive performance.

    Next, we provide a brief comparison of the classification splitters using 10K instances of the Random RBF synthetic dataset. Note that the tree equiped with the Exhaustive splitter does not use Naive Bayes leaves.

    plot_performance(\n    synth.RandomRBF(seed_model=7, seed_sample=42).take(10_000),\n    metrics.Accuracy(),\n    {\n        \"HTC + Exhaustive splitter\": tree.HoeffdingTreeClassifier(\n            splitter=tree.splitter.ExhaustiveSplitter(),\n            leaf_prediction=\"mc\"\n        ),\n        \"HTC + Histogram splitter\": tree.HoeffdingTreeClassifier(\n            splitter=tree.splitter.HistogramSplitter()\n        ),\n        \"HTC + Gaussian splitter\": tree.HoeffdingTreeClassifier(\n            splitter=tree.splitter.GaussianSplitter()\n        )\n    }\n)\n

    "},{"location":"recipes/on-hoeffding-trees/#42-regression-tree-splitters","title":"4.2 Regression tree splitters","text":"

    The available regression tree splitters are summarized in the next table. The TE-BST costs are expressed in terms of \\(n^*\\) because the number of stored elements can be smaller than or equal to \\(n\\).

    Splitter Description Insertion Memory Split candidate query Extended Binary Search Tree (E-BST) Stores all the observations and target statistics in a BST \\(O(\\log n)\\) (average) or \\(O(n)\\) (worst case) \\(O(n)\\) \\(O(n)\\) Truncated E-BST (TE-BST) Rounds the incoming data before passing it to the BST \\(O(\\log n^*)\\) (average) or \\(O(n^*)\\) (worst case) \\(O(n^*)\\) \\(O(n^*)\\) Quantization Observer (QO) Uses a hash-like structure to quantize the incoming data \\(O(1)\\) \\(O(h)\\) \\(O(h \\log h)\\)

    E-BST is an exhaustive algorithm, i.e., it works as batch solutions usually do, which might be prohibitive in real-world online scenarios. TE-BST and QO apply approximations to alleviate the costs involved in monitoring numerical data and performing split attempts. The number of desired decimal places to round the data (TE-BST) and the quantization radius (QO) are directly related to the running time, memory footprint, and error of the resulting tree model.

    We present a brief comparison of the available regression tree splitters using the 10K instances of the Friedman synthetic dataset.

    plot_performance(\n    synth.Friedman(seed=42).take(10_000),\n    metrics.MAE(),\n    {\n        \"HTR + E-BST\": (\n            preprocessing.StandardScaler() | tree.HoeffdingTreeRegressor(\n                splitter=tree.splitter.EBSTSplitter()\n            )\n        ),\n        \"HTR + TE-BST\": (\n            preprocessing.StandardScaler() | tree.HoeffdingTreeRegressor(\n                splitter=tree.splitter.TEBSTSplitter()\n            )\n        ),\n        \"HTR + QO\": (\n            preprocessing.StandardScaler() | tree.HoeffdingTreeRegressor(\n                splitter=tree.splitter.QOSplitter()\n            )\n        ),\n\n    }\n)\n

    "},{"location":"recipes/on-hoeffding-trees/#wrapping-up","title":"Wrapping up","text":"

    This guide provides a walkthrough in the HTs available in River. We discussed about model inspection, memory management, and feature splits. Keep in mind that each HT variant has specific details and capabilities that are out-of-the-scope of this introductory material. The user is advised to check the documentation page of the tree models for detailed information.

    "},{"location":"recipes/pipelines/","title":"Pipelines","text":"

    Pipelines are an integral part of River. We encourage their usage and apply them in many of their examples.

    The compose.Pipeline contains all the logic for building and applying pipelines. A pipeline is essentially a list of estimators that are applied in sequence. The only requirement is that the first n - 1 steps be transformers. The last step can be a regressor, a classifier, a clusterer, a transformer, etc. Here is an example:

    from river import compose\nfrom river import linear_model\nfrom river import preprocessing\nfrom river import feature_extraction\n\nmodel = compose.Pipeline(\n    preprocessing.StandardScaler(),\n    feature_extraction.PolynomialExtender(),\n    linear_model.LinearRegression()\n)\n

    You can also use the | operator, as so:

    model = (\n    preprocessing.StandardScaler() |\n    feature_extraction.PolynomialExtender() |\n    linear_model.LinearRegression()\n)\n

    Or, equally:

    model = preprocessing.StandardScaler() \nmodel |= feature_extraction.PolynomialExtender()\nmodel |= linear_model.LinearRegression()\n

    A pipeline has a draw method that can be used to visualize it:

    model\n
    StandardScaler
    StandardScaler ( with_std=True )
    PolynomialExtender
    PolynomialExtender ( degree=2 interaction_only=False include_bias=False bias_name=\"bias\" )
    LinearRegression
    LinearRegression ( optimizer=SGD ( lr=Constant ( learning_rate=0.01 ) ) loss=Squared () l2=0. l1=0. intercept_init=0. intercept_lr=Constant ( learning_rate=0.01 ) clip_gradient=1e+12 initializer=Zeros () )

    compose.Pipeline inherits from base.Estimator, which means that it has a learn_one method. You would expect learn_one to update each estimator, but that's not actually what happens. Instead, the transformers are updated when predict_one (or predict_proba_one for that matter) is called. Indeed, in online machine learning, we can update the unsupervised parts of our model when a sample arrives. We don't have to wait for the ground truth to arrive in order to update unsupervised estimators that don't depend on it. In other words, in a pipeline, learn_one updates the supervised parts, whilst predict_one updates the unsupervised parts. It's important to be aware of this behavior, as it is quite different to what is done in other libraries that rely on batch machine learning.

    Here is a small example to illustrate the previous point:

    from river import datasets\n\ndataset = datasets.TrumpApproval()\nx, y = next(iter(dataset))\nx, y\n
    ({'ordinal_date': 736389,\n  'gallup': 43.843213,\n  'ipsos': 46.19925042857143,\n  'morning_consult': 48.318749,\n  'rasmussen': 44.104692,\n  'you_gov': 43.636914000000004},\n 43.75505)\n

    Let us call predict_one, which will update each transformer, but won't update the linear regression.

    model.predict_one(x)\n
    0.0\n

    The prediction is nil because each weight of the linear regression is equal to 0.

    model['StandardScaler'].means\n
    defaultdict(float,\n            {'ordinal_date': 0.0,\n             'gallup': 0.0,\n             'ipsos': 0.0,\n             'morning_consult': 0.0,\n             'rasmussen': 0.0,\n             'you_gov': 0.0})\n

    As we can see, the means of each feature have been updated, even though we called predict_one and not learn_one.

    Note that if you call transform_one with a pipeline who's last step is not a transformer, then the output from the last transformer (which is thus the penultimate step) will be returned:

    model.transform_one(x)\n
    {'ordinal_date': 0.0,\n 'gallup': 0.0,\n 'ipsos': 0.0,\n 'morning_consult': 0.0,\n 'rasmussen': 0.0,\n 'you_gov': 0.0,\n 'ordinal_date*ordinal_date': 0.0,\n 'gallup*ordinal_date': 0.0,\n 'ipsos*ordinal_date': 0.0,\n 'morning_consult*ordinal_date': 0.0,\n 'ordinal_date*rasmussen': 0.0,\n 'ordinal_date*you_gov': 0.0,\n 'gallup*gallup': 0.0,\n 'gallup*ipsos': 0.0,\n 'gallup*morning_consult': 0.0,\n 'gallup*rasmussen': 0.0,\n 'gallup*you_gov': 0.0,\n 'ipsos*ipsos': 0.0,\n 'ipsos*morning_consult': 0.0,\n 'ipsos*rasmussen': 0.0,\n 'ipsos*you_gov': 0.0,\n 'morning_consult*morning_consult': 0.0,\n 'morning_consult*rasmussen': 0.0,\n 'morning_consult*you_gov': 0.0,\n 'rasmussen*rasmussen': 0.0,\n 'rasmussen*you_gov': 0.0,\n 'you_gov*you_gov': 0.0}\n

    In many cases, you might want to connect a step to multiple steps. For instance, you might to extract different kinds of features from a single input. An elegant way to do this is to use a compose.TransformerUnion. Essentially, the latter is a list of transformers who's results will be merged into a single dict when transform_one is called. As an example let's say that we want to apply a feature_extraction.RBFSampler as well as the feature_extraction.PolynomialExtender. This may be done as so:

    model = (\n    preprocessing.StandardScaler() |\n    (feature_extraction.PolynomialExtender() + feature_extraction.RBFSampler()) |\n    linear_model.LinearRegression()\n)\n\nmodel\n
    StandardScaler
    StandardScaler ( with_std=True )
    PolynomialExtender
    PolynomialExtender ( degree=2 interaction_only=False include_bias=False bias_name=\"bias\" )
    RBFSampler
    RBFSampler ( gamma=1. n_components=100 seed=None )
    LinearRegression
    LinearRegression ( optimizer=SGD ( lr=Constant ( learning_rate=0.01 ) ) loss=Squared () l2=0. l1=0. intercept_init=0. intercept_lr=Constant ( learning_rate=0.01 ) clip_gradient=1e+12 initializer=Zeros () )

    Note that the + symbol acts as a shorthand notation for creating a compose.TransformerUnion, which means that we could have declared the above pipeline as so:

    model = (\n    preprocessing.StandardScaler() |\n    compose.TransformerUnion(\n        feature_extraction.PolynomialExtender(),\n        feature_extraction.RBFSampler()\n    ) |\n    linear_model.LinearRegression()\n)\n

    Pipelines provide the benefit of removing a lot of cruft by taking care of tedious details for you. They also enable to clearly define what steps your model is made of. Finally, having your model in a single object means that you can move it around more easily. Note that you can include user-defined functions in a pipeline by using a compose.FuncTransformer.

    "},{"location":"recipes/reading-data/","title":"Reading data","text":"

    In River, the features of a sample are stored inside a dictionary, which in Python is called a dict and is a native data structure. In other words, we don't use any sophisticated data structure, such as a numpy.ndarray or a pandas.DataFrame.

    The main advantage of using plain dicts is that it removes the overhead that comes with using the aforementioned data structures. This is important in a streaming context because we want to be able to process many individual samples in rapid succession. Another advantage is that dicts allow us to give names to our features. Finally, dicts are not typed, and can therefore store heterogeneous data.

    Another advantage which we haven't mentioned is that dicts play nicely with Python's standard library. Indeed, Python contains many tools that allow manipulating dicts. For instance, the csv.DictReader can be used to read a CSV file and convert each row to a dict. In fact, the stream.iter_csv method from River is just a wrapper on top of csv.DictReader that adds a few bells and whistles.

    River provides some out-of-the-box datasets to get you started.

    from river import datasets\n\ndataset = datasets.Bikes()\ndataset\n
    Bike sharing station information from the city of Toulouse.\n\nThe goal is to predict the number of bikes in 5 different bike stations from the city of\nToulouse.\n\n      Name  Bikes                                                         \n      Task  Regression                                                    \n   Samples  182,470                                                       \n  Features  8                                                             \n    Sparse  False                                                         \n      Path  /Users/max/river_data/Bikes/toulouse_bikes.csv                \n       URL  https://maxhalford.github.io/files/datasets/toulouse_bikes.zip\n      Size  12.52 MB                                                      \nDownloaded  True\n

    Note that when we say \"loaded\", we don't mean that the actual data is read from the disk. On the contrary, the dataset is a streaming data that can be iterated over one sample at a time. In Python lingo, it's a generator.

    Let's take a look at the first sample:

    x, y = next(iter(dataset))\nx\n
    {'moment': datetime.datetime(2016, 4, 1, 0, 0, 7),\n 'station': 'metro-canal-du-midi',\n 'clouds': 75,\n 'description': 'light rain',\n 'humidity': 81,\n 'pressure': 1017.0,\n 'temperature': 6.54,\n 'wind': 9.3}\n

    Each dataset is iterable, which means we can also do:

    for x, y in dataset:\n    break\nx\n
    {'moment': datetime.datetime(2016, 4, 1, 0, 0, 7),\n 'station': 'metro-canal-du-midi',\n 'clouds': 75,\n 'description': 'light rain',\n 'humidity': 81,\n 'pressure': 1017.0,\n 'temperature': 6.54,\n 'wind': 9.3}\n

    As we can see, the values have different types.

    Under the hood, calling for x, y in dataset simply iterates over a file and parses each value appropriately. We can do this ourselves by using stream.iter_csv:

    from river import stream\n\nX_y = stream.iter_csv(dataset.path)\nx, y = next(X_y)\nx, y\n
    ({'moment': '2016-04-01 00:00:07',\n  'bikes': '1',\n  'station': 'metro-canal-du-midi',\n  'clouds': '75',\n  'description': 'light rain',\n  'humidity': '81',\n  'pressure': '1017.0',\n  'temperature': '6.54',\n  'wind': '9.3'},\n None)\n

    There are a couple things that are wrong. First of all, the numeric features have not been casted into numbers. Indeed, by default, stream.iter_csv assumes that everything is a string. A related issue is that the moment field hasn't been parsed into a datetime. Finally, the target field, which is bikes, hasn't been separated from the rest of the features. We can remedy to these issues by setting a few parameters:

    X_y = stream.iter_csv(\n    dataset.path,\n    converters={\n        'bikes': int,\n        'clouds': int,\n        'humidity': int,\n        'pressure': float,\n        'temperature': float,\n        'wind': float\n    },\n    parse_dates={'moment': '%Y-%m-%d %H:%M:%S'},\n    target='bikes'\n)\nx, y = next(X_y)\nx, y\n
    ({'moment': datetime.datetime(2016, 4, 1, 0, 0, 7),\n  'station': 'metro-canal-du-midi',\n  'clouds': 75,\n  'description': 'light rain',\n  'humidity': 81,\n  'pressure': 1017.0,\n  'temperature': 6.54,\n  'wind': 9.3},\n 1)\n

    That's much better. We invite you to take a look at the stream module to see for yourself what other methods are available. Note that River is first and foremost a machine learning library, and therefore isn't as much concerned about reading data as it is about statistical algorithms. We do however believe that the fact that we use dictionary gives you, the user, a lot of freedom and flexibility.

    The stream module provides helper functions to read data from different formats. For instance, you can use the stream.iter_sklearn_dataset function to turn any scikit-learn dataset into a stream.

    from sklearn import datasets\n\ndataset = datasets.load_diabetes()\n\nfor x, y in stream.iter_sklearn_dataset(dataset):\n    break\n\nx, y\n
    ({'age': 0.0380759064334241,\n  'sex': 0.0506801187398187,\n  'bmi': 0.0616962065186885,\n  'bp': 0.0218723549949558,\n  's1': -0.0442234984244464,\n  's2': -0.0348207628376986,\n  's3': -0.0434008456520269,\n  's4': -0.00259226199818282,\n  's5': 0.0199084208763183,\n  's6': -0.0176461251598052},\n 151.0)\n

    To conclude, let us shortly mention the difference between proactive learning and reactive learning in the specific context of online machine learning. When we loop over a data with a for loop, we have the control over the data and the order in which it arrives. We are proactive in the sense that we, the user, are asking for the data to arrive.

    In contract, in a reactive situation, we don't have control on the data arrival. A typical example of such a situation is a web server, where web requests arrive in an arbitrary order. This is a situation where River shines. For instance, in a Flask application, you could define a route to make predictions with a River model as so:

    import flask\n\napp = flask.Flask(__name__)\n\n@app.route('/', methods=['GET'])\ndef predict():\n    payload = flask.request.json\n    river_model = load_model()\n    return river_model.predict_proba_one(payload)\n

    Likewise, a model can be updated whenever a request arrives as so:

    @app.route('/', methods=['POST'])\ndef learn():\n    payload = flask.request.json\n    river_model = load_model()\n    river_model.learn_one(payload['features'], payload['target'])\n    return {}, 201\n

    To summarize, River can be used in many different ways. The fact that it uses dictionaries to represent features provides a lot of flexibility and space for creativity.

    "},{"location":"recipes/rolling-computations/","title":"Rolling computations","text":"

    You might wonder which classes in River can be wrapped with a utils.Rolling. This can be answered with a bit of metaprogramming.

    import importlib\nimport inspect\nfrom river.utils.rolling import Rollable\n\nfor submodule in importlib.import_module(\"river.api\").__all__:\n    for _, obj in inspect.getmembers(\n        importlib.import_module(f\"river.{submodule}\"), lambda x: isinstance(x, Rollable)\n    ):\n        print(f'{submodule}.{obj.__name__}')\n
    [covariance.EmpiricalCovariance](../../api/covariance/EmpiricalCovariance)\n[metrics.Accuracy](../../api/metrics/Accuracy)\n[metrics.AdjustedMutualInfo](../../api/metrics/AdjustedMutualInfo)\n[metrics.AdjustedRand](../../api/metrics/AdjustedRand)\n[metrics.BalancedAccuracy](../../api/metrics/BalancedAccuracy)\n[metrics.ClassificationReport](../../api/metrics/ClassificationReport)\n[metrics.CohenKappa](../../api/metrics/CohenKappa)\n[metrics.Completeness](../../api/metrics/Completeness)\n[metrics.ConfusionMatrix](../../api/metrics/ConfusionMatrix)\n[metrics.CrossEntropy](../../api/metrics/CrossEntropy)\n[metrics.F1](../../api/metrics/F1)\n[metrics.FBeta](../../api/metrics/FBeta)\n[metrics.FowlkesMallows](../../api/metrics/FowlkesMallows)\n[metrics.GeometricMean](../../api/metrics/GeometricMean)\n[metrics.Homogeneity](../../api/metrics/Homogeneity)\n[metrics.Jaccard](../../api/metrics/Jaccard)\n[metrics.LogLoss](../../api/metrics/LogLoss)\n[metrics.MAE](../../api/metrics/MAE)\n[metrics.MAPE](../../api/metrics/MAPE)\n[metrics.MCC](../../api/metrics/MCC)\n[metrics.MSE](../../api/metrics/MSE)\n[metrics.MacroF1](../../api/metrics/MacroF1)\n[metrics.MacroFBeta](../../api/metrics/MacroFBeta)\n[metrics.MacroJaccard](../../api/metrics/MacroJaccard)\n[metrics.MacroPrecision](../../api/metrics/MacroPrecision)\n[metrics.MacroRecall](../../api/metrics/MacroRecall)\n[metrics.MicroF1](../../api/metrics/MicroF1)\n[metrics.MicroFBeta](../../api/metrics/MicroFBeta)\n[metrics.MicroJaccard](../../api/metrics/MicroJaccard)\n[metrics.MicroPrecision](../../api/metrics/MicroPrecision)\n[metrics.MicroRecall](../../api/metrics/MicroRecall)\n[metrics.MultiFBeta](../../api/metrics/MultiFBeta)\n[metrics.MutualInfo](../../api/metrics/MutualInfo)\n[metrics.NormalizedMutualInfo](../../api/metrics/NormalizedMutualInfo)\n[metrics.Precision](../../api/metrics/Precision)\n[metrics.R2](../../api/metrics/R2)\n[metrics.RMSE](../../api/metrics/RMSE)\n[metrics.RMSLE](../../api/metrics/RMSLE)\n[metrics.ROCAUC](../../api/metrics/ROCAUC)\n[metrics.Rand](../../api/metrics/Rand)\n[metrics.Recall](../../api/metrics/Recall)\n[metrics.RollingROCAUC](../../api/metrics/RollingROCAUC)\n[metrics.SMAPE](../../api/metrics/SMAPE)\n[metrics.Silhouette](../../api/metrics/Silhouette)\n[metrics.VBeta](../../api/metrics/VBeta)\n[metrics.WeightedF1](../../api/metrics/WeightedF1)\n[metrics.WeightedFBeta](../../api/metrics/WeightedFBeta)\n[metrics.WeightedJaccard](../../api/metrics/WeightedJaccard)\n[metrics.WeightedPrecision](../../api/metrics/WeightedPrecision)\n[metrics.WeightedRecall](../../api/metrics/WeightedRecall)\n[proba.Beta](../../api/proba/Beta)\n[proba.Gaussian](../../api/proba/Gaussian)\n[proba.Multinomial](../../api/proba/Multinomial)\n[proba.MultivariateGaussian](../../api/proba/MultivariateGaussian)\n[stats.BayesianMean](../../api/stats/BayesianMean)\n[stats.Cov](../../api/stats/Cov)\n[stats.Mean](../../api/stats/Mean)\n[stats.PearsonCorr](../../api/stats/PearsonCorr)\n[stats.SEM](../../api/stats/SEM)\n[stats.Sum](../../api/stats/Sum)\n[stats.Var](../../api/stats/Var)\n
    "},{"location":"releases/0.0.2/","title":"0.0.2 - 2019-02-13","text":"
    • PyPI
    • GitHub
    "},{"location":"releases/0.0.2/#compat","title":"compat","text":"
    • Added sklearn wrappers.
    "},{"location":"releases/0.0.2/#ensemble","title":"ensemble","text":"
    • Added ensemble.HedgeClassifier.
    "},{"location":"releases/0.0.2/#feature_selection","title":"feature_selection","text":"
    • Added feature_selection.RandomDiscarder.
    "},{"location":"releases/0.0.2/#feature_extraction","title":"feature_extraction","text":"
    • Added feature_extraction.TargetEncoder.
    "},{"location":"releases/0.0.2/#impute","title":"impute","text":"
    • Added impute.NumericImputer.
    "},{"location":"releases/0.0.2/#optim","title":"optim","text":"
    • Added optim.AbsoluteLoss.
    • Added optim.HingeLoss.
    • Added optim.EpsilonInsensitiveHingeLoss.
    "},{"location":"releases/0.0.2/#stats","title":"stats","text":"
    • Added stats.NUnique.
    • Added stats.Min.
    • Added stats.Max.
    • Added stats.PeakToPeak.
    • Added stats.Kurtosis.
    • Added stats.Skew.
    • Added stats.Sum.
    • Added stats.EWMean.
    • Made sure the running statistics produce the same results as pandas.DataFrame.rolling method.
    "},{"location":"releases/0.0.3/","title":"0.0.3 - 2019-03-21","text":"
    • PyPI
    • GitHub
    "},{"location":"releases/0.0.3/#base","title":"base","text":"
    • Calling fit_one now returns the calling instance, not the out-of-fold prediction/transform; fit_predict_one, fit_predict_proba_one, and fit_transform_one are available to reproduce the previous behavior.
    • Binary classifiers now output a dict with probabilities for False and True when calling predict_proba_one, which solves the interface issues of having multi-class classifiers do binary classification.
    "},{"location":"releases/0.0.3/#compat","title":"compat","text":"
    • Added compat.convert_river_to_sklearn.
    "},{"location":"releases/0.0.3/#compose","title":"compose","text":"
    • Added compose.BoxCoxTransformRegressor.
    • Added compose.TargetModifierRegressor.
    "},{"location":"releases/0.0.3/#datasets","title":"datasets","text":"
    • Added datasets.fetch_restaurants.
    • Added datasets.load_airline.
    "},{"location":"releases/0.0.3/#dist","title":"dist","text":"
    • Added dist.Multinomial.
    • Added dist.Normal.
    "},{"location":"releases/0.0.3/#ensemble","title":"ensemble","text":"
    • Added ensemble.BaggingRegressor.
    "},{"location":"releases/0.0.3/#feature_extraction","title":"feature_extraction","text":"
    • Added feature_extraction.TargetGroupBy.
    "},{"location":"releases/0.0.3/#impute","title":"impute","text":"
    • Added impute.CategoricalImputer.
    "},{"location":"releases/0.0.3/#linear_model","title":"linear_model","text":"
    • Added linear_model.FMRegressor.
    • Removed all the passive-aggressive estimators.
    "},{"location":"releases/0.0.3/#metrics","title":"metrics","text":"
    • Added metrics.Accuracy.
    • Added metrics.MAE.
    • Added metrics.MSE.
    • Added metrics.RMSE.
    • Added metrics.RMSLE.
    • Added metrics.SMAPE.
    • Added metrics.Precision.
    • Added metrics.Recall.
    • Added metrics.F1.
    "},{"location":"releases/0.0.3/#model_selection","title":"model_selection","text":"
    • model_selection.online_score can now be passed a metrics.Metric instead of an sklearn metric; it also checks that the provided metric can be used with the accompanying model.
    "},{"location":"releases/0.0.3/#naive_bayes","title":"naive_bayes","text":"
    • Added naive_bayes.GaussianNB.
    "},{"location":"releases/0.0.3/#optim","title":"optim","text":"
    • Added optim.PassiveAggressiveI.
    • Added optim.PassiveAggressiveII.
    "},{"location":"releases/0.0.3/#preprocessing","title":"preprocessing","text":"
    • Added preprocessing.Discarder.
    • Added preprocessing.PolynomialExtender.
    • Added preprocessing.FuncTransformer.
    "},{"location":"releases/0.0.3/#reco","title":"reco","text":"
    • Added reco.SVD.
    "},{"location":"releases/0.0.3/#stats","title":"stats","text":"
    • Added stats.Mode.
    • Added stats.Quantile.
    • Added stats.RollingQuantile.
    • Added stats.Entropy.
    • Added stats.RollingMin.
    • Added stats.RollingMax.
    • Added stats.RollingMode.
    • Added stats.RollingSum.
    • Added stats.RollingPeakToPeak.
    "},{"location":"releases/0.0.3/#stream","title":"stream","text":"
    • Added stream.iter_csv.
    "},{"location":"releases/0.0.3/#tree","title":"tree","text":"
    • Added tree.MondrianTreeClassifier.
    • Added tree.MondrianTreeRegressor.
    "},{"location":"releases/0.1.0/","title":"0.1.0 - 2019-05-08","text":"
    • PyPI
    • GitHub
    "},{"location":"releases/0.1.0/#base","title":"base","text":"
    • Removed the fit_predict_one estimator method.
    • Removed the fit_predict_proba_one estimator method.
    • Removed the fit_transform_one estimator method.
    "},{"location":"releases/0.1.0/#compat","title":"compat","text":"
    • Added compat.convert_sklearn_to_river.
    • compat.convert_river_to_sklearn now returns an sklearn.pipeline.Pipeline when provided with a compose.Pipeline.
    "},{"location":"releases/0.1.0/#compose","title":"compose","text":"
    • Added compose.Discard.
    • Added compose.Select.
    • Added compose.SplitRegressor.
    • The draw method of compose.Pipeline now works properly for arbitrary amounts of nesting, including multiple nested compose.FeatureUnion.
    "},{"location":"releases/0.1.0/#datasets","title":"datasets","text":"
    • Added datasets.fetch_electricity.
    "},{"location":"releases/0.1.0/#dummy","title":"dummy","text":"
    • Added dummy.NoChangeClassifier.
    • Added dummy.PriorClassifier.
    • Added dummy.StatisticRegressor.
    "},{"location":"releases/0.1.0/#feature_extraction","title":"feature_extraction","text":"
    • Added feature_extraction.Differ.
    • Renamed feature_extraction.GroupBy to feature_extraction.Agg.
    • Renamed feature_extraction.TargetGroupBy to feature_extraction.TargetAgg.
    "},{"location":"releases/0.1.0/#feature_selection","title":"feature_selection","text":"
    • Added feature_selection.SelectKBest.
    • Added feature_selection.VarianceThreshold.
    "},{"location":"releases/0.1.0/#impute","title":"impute","text":"
    • Added impute.StatImputer.
    • Removed impute.CategoricalImputer.
    • Removed impute.NumericImputer.
    "},{"location":"releases/0.1.0/#linear_model","title":"linear_model","text":"
    • Added linear_model.PAClassifier.
    • Added linear_model.PARegressor.
    • Added linear_model.SoftmaxRegression.
    "},{"location":"releases/0.1.0/#metrics","title":"metrics","text":"
    • Added metrics.ConfusionMatrix.
    • Added metrics.CrossEntropy.
    • Added metrics.MacroF1.
    • Added metrics.MacroPrecision.
    • Added metrics.MacroRecall.
    • Added metrics.MicroF1.
    • Added metrics.MicroPrecision.
    • Added metrics.MicroRecall.
    • Each metric now has a bigger_is_better property to indicate if a high value is better than a low one or not.
    "},{"location":"releases/0.1.0/#optim","title":"optim","text":"
    • Added optim.OptimalLR.
    • Added optim.CrossEntropy.
    • Removed optim.PassiveAggressiveI.
    • Removed optim.PassiveAggressiveII.
    "},{"location":"releases/0.1.0/#preprocessing","title":"preprocessing","text":"
    • Removed preprocessing.Discarder.
    • Added on and sparse parameters to preprocessing.OneHotEncoder.
    "},{"location":"releases/0.1.0/#stats","title":"stats","text":"
    • Added stats.Covariance.
    • Added stats.PearsonCorrelation.
    • Added stats.SmoothMean.
    "},{"location":"releases/0.1.0/#utils","title":"utils","text":"
    • Added utils.check_estimator.
    • Added utils.Histogram.
    • Added utils.SortedWindow.
    • Added utils.Window.
    "},{"location":"releases/0.10.0/","title":"0.10.0 - 2022-02-04","text":""},{"location":"releases/0.10.0/#base","title":"base","text":"
    • Introduce base.MiniBatchTransformer. Add support for mini-batches to compose.TransformerUnion, compose.Select, and preprocessing.OneHotEncoder.
    "},{"location":"releases/0.10.0/#checks","title":"checks","text":"
    • Created this module to store estimator unit testing, rather than having it in the utils module.
    "},{"location":"releases/0.10.0/#compose","title":"compose","text":"
    • Split compose.Renamer into compose.Prefixer and compose.Suffixer that respectively prepend and append a string to the features' name.
    • Changed compose.Renamer to allow feature renaming following a mapping.
    "},{"location":"releases/0.10.0/#evaluate","title":"evaluate","text":"
    • Refactored evaluate.progressive_validation to work with api.anomaly.base.AnomalyDetectors.
    "},{"location":"releases/0.10.0/#facto","title":"facto","text":"
    • Added debug_one method to BaseFM.
    "},{"location":"releases/0.10.0/#feature_extraction","title":"feature_extraction","text":"
    • Make the by parameter in feature_extraction.Agg and feature_extraction.TargetAgg to be optional, allowing to calculate aggregates over the whole data.
    • Removed feature_extraction.Lagger and feature_extraction.TargetLagger. Their functionality can be reproduced by combining feature_extraction.Agg and stats.Shift.
    • feature_extraction.Agg and feature_extraction.Target now have a state property. It returns a pandas.Series representing the current aggregates values within each group.
    "},{"location":"releases/0.10.0/#metrics","title":"metrics","text":"
    • metrics.ROCAUC works with base.AnomalyDetectorss.
    "},{"location":"releases/0.10.0/#misc","title":"misc","text":"
    • Created this module to store some stuff that was in the utils module but wasn't necessarily shared between modules.
    • Implement misc.CovMatrix.
    "},{"location":"releases/0.10.0/#reco","title":"reco","text":"
    • Renamed the Recommender base class into Ranker.
    • Added a rank method to each recommender.
    • Removed reco.SurpriseWrapper as it wasn't really useful.
    • Added an is_contextual property to each ranker to indicate if a model makes use of contextual features or not.
    "},{"location":"releases/0.10.0/#stats","title":"stats","text":"
    • stats.Mean, stats.Var, and stats.Cov each now have an update_many method which accepts numpy arrays.
    "},{"location":"releases/0.10.0/#utils","title":"utils","text":"
    • Removed utils.Window and use collections.deque instead where necessary.
    "},{"location":"releases/0.10.1/","title":"0.10.1 - 2022-02-05","text":""},{"location":"releases/0.10.1/#evaluate","title":"evaluate","text":"

    evaluate.progressive_val_score can now handle models which use **kwargs in their learn_one and predict_one methods. For instance, this is useful for reco.Ranker models which require passing a user and an item.

    "},{"location":"releases/0.11.0/","title":"0.11.0 - 2022-05-28","text":"
    • Moved all metrics in metrics.cluster except metrics.Silhouette to river-extra.
    "},{"location":"releases/0.11.0/#anomaly","title":"anomaly","text":"
    • There is now a anomaly.base.SupervisedAnomalyDetector base class for supervised anomaly detection.
    • Added api.anomaly.GaussianScorer, which is the first supervised anomaly detector.
    • There is now a anomaly.base.AnomalyFilter base class for anomaly filtering methods. These allow to classify anomaly scores. They can also prevent models from learning on anomalous data, for instance by putting them as an initial step of a pipeline.
    • Added anomaly.ConstantFilter and QuantileFilter, which are the first anomaly filters.
    • Removed anomaly.ConstantThresholder and anomaly.QuantileThresholder, as they overlap with the new anomaly filtering mechanism.
    "},{"location":"releases/0.11.0/#base","title":"base","text":"
    • Fixed an issue where the _raw_memory_usage property would spin into an infinite loop if a model's property was an itertools.count.
    "},{"location":"releases/0.11.0/#dataset","title":"dataset","text":"
    • Added the datasets.WaterFlow dataset.
    "},{"location":"releases/0.11.0/#dist","title":"dist","text":"
    • A revert method has been added to stats.Gaussian.
    • A revert method has been added to stats.Multinomial.
    • Added dist.TimeRolling to measure probability distributions over windows of time.
    "},{"location":"releases/0.11.0/#drift","title":"drift","text":"
    • Add the PeriodicTrigger detector, a baseline capable of producing drift signals in regular or random intervals.
    • The numpy usage was removed in drift.KSWIN in favor of collections.deque. Appending or deleting elements to numpy arrays imply creating another object.
    • Added the seed parameter to drift.KSWIN to control reproducibility.
    • The Kolmogorov-Smirnov test mode was changed to the default (\"auto\") to suppress warnings (drift.KSWIN).
    • Unnecessary usage of numpy was also removed in other concept drift detectors.
    "},{"location":"releases/0.11.0/#ensemble","title":"ensemble","text":"
    • Streamline SRP{Classifier,Regressor}, remove unneeded numpy usage, make SRP variants robust against missing features, and fix bugs.
    • Remove unneeded numpy usage AdaptiveRandomForest{Classifier,Regressor}.
    "},{"location":"releases/0.11.0/#evaluate","title":"evaluate","text":"
    • Added a iter_progressive_val_score function, which does the same as progressive_val_score, except that it yields rather than prints results at each step, which give more control to the user.
    "},{"location":"releases/0.11.0/#imblearn","title":"imblearn","text":"
    • Added imblearn.ChebyshevUnderSampler and imblearn.ChebyshevOverSampler for imbalanced regression.
    "},{"location":"releases/0.11.0/#linear_model","title":"linear_model","text":"
    • linear_model.LinearRegression and linear_model.LogisticRegression now correctly apply the l2 regularization when their learn_many method is used.
    • Added l1 regularization (implementation with cumulative penalty, see paper) for linear_model.LinearRegression and linear_model.LogisticRegression
    "},{"location":"releases/0.11.0/#neighbors","title":"neighbors","text":"
    • neighbors.KNNADWINClassifier and neighbors.SAMKNNClassifier have been deprecated.
    • Introduced neighbors.NearestNeighbors for searching nearest neighbors.
    • Vastly refactored and simplified the nearest neighbors logic.
    "},{"location":"releases/0.11.0/#proba","title":"proba","text":"
    • Added proba.Rolling to measure a probability distribution over a window.
    "},{"location":"releases/0.11.0/#rules","title":"rules","text":"
    • AMRules's debug_one explicitly indicates the prediction strategy used by each rule.
    • Fix bug in debug_one (AMRules) where prediction explanations were incorrectly displayed when ordered_rule_set=True.
    "},{"location":"releases/0.11.0/#time_series","title":"time_series","text":"
    • Added an iter_evaluate function to trace the evaluation at each sample in a dataset.
    "},{"location":"releases/0.11.0/#tree","title":"tree","text":"
    • Fix bug in Naive Bayes-based leaf prediction.
    • Remove unneeded numpy usage in HoeffdingAdaptiveTree{Classifier,Regressor}.
    "},{"location":"releases/0.11.0/#stats","title":"stats","text":"
    • A revert method has been added to stats.Var.
    "},{"location":"releases/0.11.1/","title":"0.11.1 - 2022-06-06","text":"

    A small release to introduce benchmarks.

    "},{"location":"releases/0.11.1/#anomaly","title":"anomaly","text":"
    • Fixed a bug where anomaly filters were never updated.
    "},{"location":"releases/0.12.0/","title":"0.12.0 - 2022-09-02","text":"
    • Moved all the public modules imports from river/__init__.py to river/api.py and removed unnecessary dependencies between modules enabling faster cherry-picked import times (~3x).
    • Adding wheels for Python 3.11.
    "},{"location":"releases/0.12.0/#base","title":"base","text":"
    • Introduced an mutate method to the base.Base class. This allows setting attributes in a controlled manner, which paves the way for online AutoML. See the recipe for more information.
    "},{"location":"releases/0.12.0/#compat","title":"compat","text":"
    • Moved the PyTorch wrappers to river-extra.
    "},{"location":"releases/0.12.0/#covariance","title":"covariance","text":"
    • Created a new covariance module to hold everything related to covariance and inversion covariance matrix estimation.
    • Moved misc.CovarianceMatrix to covariance.EmpiricalCovariance.
    • Added covariance.EmpiricalPrecision to estimate the inverse covariance matrix.
    "},{"location":"releases/0.12.0/#compose","title":"compose","text":"
    • Moved utils.pure_inference_mode to compose.pure_inference_mode and utils.warm_up_mode to compose.warm_up_mode.
    • Pipeline parts can now be accessed by integer positions as well as by name.
    "},{"location":"releases/0.12.0/#datasets","title":"datasets","text":"
    • Imports synth, enabling `from river import datasets; datasets.synth.
    "},{"location":"releases/0.12.0/#drift","title":"drift","text":"
    • Refactor the concept drift detectors to match the remaining of River's API. Warnings are only issued by detectors that support this feature.
    • Drifts can be assessed via the property drift_detected. Warning signals can be acessed by the property warning_detected. The update now returns self.
    • Ensure all detectors automatically reset their inner states after a concept drift detection.
    • Streamline DDM, EDDM, HDDM_A, and HDDM_W. Make the configurable parameters names match their respective papers.
    • Fix bugs in EDDM and HDDM_W.
    • Enable two-sided tests in PageHinkley.
    • Improve documentation and update tests.
    "},{"location":"releases/0.12.0/#feature_extraction","title":"feature_extraction","text":"
    • Added a tokenizer_pattern parameter to feature_extraction.BagOfWords and feature_extraction.TFIDF to override the default pattern used for tokenizing text.
    • Added a stop_words parameter to feature_extraction.BagOfWords and feature_extraction.TFIDF for removing stop words once the text has been tokenized.
    "},{"location":"releases/0.12.0/#linear_model","title":"linear_model","text":"
    • After long ado, we've finally implemented linear_model.BayesianLinearRegression.
    "},{"location":"releases/0.12.0/#metrics","title":"metrics","text":"
    • Removed dependency to optim.
    • Removed metrics.Rolling, due to the addition of utils.Rolling.
    • Removed metrics.TimeRolling, due to the addition of utils.Rolling.
    "},{"location":"releases/0.12.0/#proba","title":"proba","text":"
    • Removed proba.Rolling, due to the addition of utils.Rolling.
    • Removed proba.TimeRolling, due to the addition of utils.Rolling.
    "},{"location":"releases/0.12.0/#rule","title":"rule","text":"
    • The default splitter was changed to tree.splitter.TEBST for memory and running time efficiency.
    "},{"location":"releases/0.12.0/#stats","title":"stats","text":"
    • Removed stats.RollingMean, due to the addition of utils.Rolling.
    • Removed stats.RollingVar, due to the addition of utils.Rolling.
    • Removed stats.RollingCov, due to the addition of utils.Rolling.
    • Removed stats.RollingPearsonCorr, due to the addition of utils.Rolling.
    "},{"location":"releases/0.12.0/#stream","title":"stream","text":"
    • stream.iter_array now handles text data.
    • Added stream.TwitterLiveStream, to listen to a filtered live stream of Tweets.
    "},{"location":"releases/0.12.0/#time_series","title":"time_series","text":"
    • Added time_series.HorizonAggMetric.
    • Fixed a bug in time_series.SNARIMAX where the number of seasonal components was not correct when sp or sq were specified.
    • Fixed the differencing logic in time_series.SNARIMAX when d or sd were specified.
    "},{"location":"releases/0.12.0/#tree","title":"tree","text":"
    • Rename split_confidence and tie_threshold to delta and tau, respectively. This way, the parameters are not misleading and match what the research papers have used for decades.
    • Refactor HoeffdingAdaptiveTree{Classifier,Regressor} to allow the usage of any drift detector. Expose the significance level of the test used to switch between subtrees as a user-defined parameter.
    • Correct test used to switch between foreground and background subtrees in HoeffdingAdaptiveTreeRegressor. Due to the continuous and unbounded nature of the monitored errors, a z-test is now performed to decide which subtree to keep.
    • The default leaf_prediction value was changed to \"adaptive\", as this often results in the smallest errors in practice.
    • The default splitter was changed to tree.splitter.TEBST for memory and running time efficiency.
    "},{"location":"releases/0.12.0/#utils","title":"utils","text":"
    • Removed dependencies to anomaly and compose.
    • Added utils.Rolling and utils.TimeRolling, which are generic wrappers for computing over a window (of time).
    • Use binary search to speed-up element removal in utils.SortedWindow.
    "},{"location":"releases/0.12.1/","title":"0.12.1 - 2022-09-02","text":""},{"location":"releases/0.12.1/#base","title":"base","text":"
    • Fix the way the clone method handles positional arguments.
    "},{"location":"releases/0.13.0/","title":"0.13.0 - 2022-09-15","text":""},{"location":"releases/0.13.0/#compose","title":"compose","text":"
    • compose.TransformerUnion parts can now be accessed by index as well as by name.
    "},{"location":"releases/0.13.0/#stats","title":"stats","text":"
    • Added the LossyCount for tracking frequent itemsets. This implementation also supports a forgetting factor to reduce the influence of old elements.
    • The following statistics are now implemented in Rust:
    • Quantile
    • EWMean
    • EWVar
    • IQR
    • Kurtosis
    • PeaktoPeak
    • Skew
    • RollingQuantile
    • RollingIQR
    "},{"location":"releases/0.13.0/#stream","title":"stream","text":"
    • Implemented stream.TwitchChatStream.
    "},{"location":"releases/0.14.0/","title":"0.14.0 - 2022-10-26","text":"
    • Introducing the bandit module for running multi-armed bandits
    • Introducing the sketch module with summarization tools and data sketches working in a streaming fashion!
    "},{"location":"releases/0.14.0/#bandit","title":"bandit","text":"
    • Added bandit.EpsilonGreedy.
    • Added bandit.UCB.
    • Added bandit.ThomsonSampling.
    • Added a bandit.base module.
    • Added bandit.envs.CandyCaneContest, which implements the Gym interface.
    • Added bandit.envs.KArmedTestbed, which implements the Gym interface.
    • Added bandit.evaluate for basic benchmarking of bandit policies on a Gym environment.
    "},{"location":"releases/0.14.0/#drift","title":"drift","text":"
    • Exposed more parameters in ADWIN: clock, max_buckets, min_window_length, and grace_period.
    "},{"location":"releases/0.14.0/#model_selection","title":"model_selection","text":"
    • Added model_selection.BanditRegressor, which is a generic model selection method that works with any bandit policy.
    • Removed model_selection.EpsilonGreedyRegressor due to the addition of model_selection.BanditRegressor.
    • Removed model_selection.UCBRegressor due to the addition of model_selection.BanditRegressor.
    "},{"location":"releases/0.14.0/#proba","title":"proba","text":"
    • Added proba.Beta.
    • Added a sample method to each distribution.
    • Added a mode property to each distribution.
    • Replaced the pmf and pdf methods with a __call__ method.
    "},{"location":"releases/0.14.0/#sketch","title":"sketch","text":"
    • Moved misc.Histogram to sketch.Histogram.
    • Moved stats.LossyCount to sketch.HeavyHitters and update its API to better match collections.Counter.
    • Added missing return self in HeavyHitters.
    • Added the Count-Min Sketch (sketch.Counter) algorithm for approximate element counting.
    • Added an implementation of Bloom filter (sketch.Set) to provide approximate set-like operations.
    "},{"location":"releases/0.15.0/","title":"0.15.0 - 2023-01-29","text":""},{"location":"releases/0.15.0/#active","title":"active","text":"
    • Created this module dedicated to online active learning.
    • Added api.active.EntropySampler.
    "},{"location":"releases/0.15.0/#base","title":"base","text":"
    • Fixed an issue where an estimator that has attribute a pipeline could not be cloned.
    • Added a base.DriftAndWarningDetector to clarify the difference between drift detectors that have a warning_detected property and those that don't.
    • Added MultiLabelClassifier.
    • Added MultiTargetRegressor.
    • Added drift.BinaryDriftDetector.
    • Added drift.BinaryDriftAndWarningDetector.
    "},{"location":"releases/0.15.0/#conf","title":"conf","text":"
    • Introduced this new module to perform conformal predictions.
    • Added a conf.Interval dataclass to represent predictive intervals.
    • Added conf.RegressionJackknife.
    "},{"location":"releases/0.15.0/#datasets","title":"datasets","text":"
    • Removed unnecessary Numpy usage in the synth submodule.
    • Changed np.random.RandomState to np.random.default_rng where necessary.
    "},{"location":"releases/0.15.0/#drift","title":"drift","text":"
    • Added drift.DriftRetrainingClassifier.
    • Renamed drift.PeriodicTrigger to drift.DummyDriftDetector to clarify it is a naive baseline.
    • Created a binary submodule to organize all drift detectors which only apply to binary inputs.
    "},{"location":"releases/0.15.0/#ensemble","title":"ensemble","text":"
    • Added ensemble.ADWINBoostingClassifier.
    • Added ensemble.BOLEClassifier.
    "},{"location":"releases/0.15.0/#evaluate","title":"evaluate","text":"
    • evaluate.progressive_val_score and evaluate.iter_progressive_val_score will now also produce a report once the last sample has been processed, in addition to every print_every steps.
    "},{"location":"releases/0.15.0/#feature_extraction","title":"feature_extraction","text":"
    • feature_extraction.BagOfWords now outputs a dictionary, and not a collections.Counter.
    "},{"location":"releases/0.15.0/#forest","title":"forest","text":"
    • Created this new module to host all models based on an ensemble of decision trees.
    • Moved ensemble.AdaptiveRandomForestClassifier to forest.ARFClassifier.
    • Moved ensemble.AdaptiveRandomForestRegressor to forest.ARFRegressor.
    • Added forest.AMFClassifier.
    • Added forest.OXTRegressor.
    "},{"location":"releases/0.15.0/#linear_model","title":"linear_model","text":"
    • Renamed use_dist to with_dist in linear_model.BayesianLinearRegression's predict_one method.
    "},{"location":"releases/0.15.0/#multiclass","title":"multiclass","text":"
    • Added a coding_method method to multiclass.OCC to control how the codes are randomly generated.
    "},{"location":"releases/0.15.0/#multioutput","title":"multioutput","text":"
    • Added MultiClassEncoder to convert multi-label tasks into multi-class problems.
    "},{"location":"releases/0.15.0/#preprocessing","title":"preprocessing","text":"
    • Renamed alpha to fading_factor in preprocessing.AdaptiveStandardScaler.
    "},{"location":"releases/0.15.0/#rules","title":"rules","text":"
    • Renamed alpha to fading_factor in rules.AMRules.
    "},{"location":"releases/0.15.0/#sketch","title":"sketch","text":"
    • Renamed alpha to fading_factor in sketch.HeavyHitters.
    "},{"location":"releases/0.15.0/#stats","title":"stats","text":"
    • Renamed alpha to fading_factor in stats.Entropy.
    • Renamed alpha to fading_factor in stats.EWMean.
    • Renamed alpha to fading_factor in stats.EWVar.
    "},{"location":"releases/0.15.0/#stream","title":"stream","text":"
    • Upgraded stream.iter_sql to SQLAlchemy 2.0.
    "},{"location":"releases/0.15.0/#tree","title":"tree","text":"
    • Remove LabelCombinationHoeffdingTreeClassifier. New code should use multioutput.MulticlassEncoder instead.
    "},{"location":"releases/0.15.0/#utils","title":"utils","text":"
    • Removed artifacts from the merger.
    "},{"location":"releases/0.16.0/","title":"0.16.0 - 2023-05-08","text":"

    Added wheels for Python 3.11.

    "},{"location":"releases/0.16.0/#feature_extraction","title":"feature_extraction","text":"
    • feature_extraction.Agg and feature_extraction.TargetAgg can now be passed an optional t in its learn_one method, which allows it to work with utils.TimeRolling.
    "},{"location":"releases/0.16.0/#metrics","title":"metrics","text":"
    • Added metrics.MAPE.
    • Added metrics.RollingROCAUC.
    "},{"location":"releases/0.16.0/#preprocessing","title":"preprocessing","text":"
    • Added preprocessing.GaussianRandomProjector.
    • Added preprocessing.SparseRandomProjector.
    "},{"location":"releases/0.16.0/#stats","title":"stats","text":"
    • Fixed randomness issue with the first few outputs of stats.Quantile.
    "},{"location":"releases/0.17.0/","title":"0.17.0 - 2023-05-27","text":""},{"location":"releases/0.17.0/#bandit","title":"bandit","text":"
    • Bandit policies now return a single arm when the pull method is called, instead of yielding or one more arms at a time. This is simpler to understand. We will move back to multi-armed pulls in the future.
    • Added bandit.Exp3.
    • bandit.UCB and bandit.Exp3 have an extra reward_scaler parameter, which can be any object that inherits from compose.TargetTransformRegressor. This allows scaling rewards before updating arms.
    "},{"location":"releases/0.17.0/#compose","title":"compose","text":"
    • compose.TransformerProduct now correctly returns a compose.TransformerUnion when a transformer is added to it.
    • Fixed compose.TransformerProduct's transform_many behavior.
    • compose.TransformerUnion and compose.TransformerProduct will now clone the provided estimators, so that shallow copies aren't shared in different places.
    "},{"location":"releases/0.17.0/#model_selection","title":"model_selection","text":"
    • Added model_selection.BanditClassifier, which is the classification equivalent to bandit.BanditRegressor. Both are methods to perform online model selection via a bandit policy.
    "},{"location":"releases/0.17.0/#multioutput","title":"multioutput","text":"
    • metrics.multioutput.MacroAverage and metrics.multioutput.MicroAverage now loop over the keys of y_true instead of y_pred. This ensures a KeyError is correctly raised if y_pred is missing an output that is present in y_true.
    "},{"location":"releases/0.17.0/#preprocessing","title":"preprocessing","text":"
    • Added preprocessing.TargetMinMaxScaler, which operates the same as preprocessing.TargetStandardScaler, but instead uses min-max scaling.
    "},{"location":"releases/0.18.0/","title":"0.18.0 - 2023-06-26","text":""},{"location":"releases/0.18.0/#bandit","title":"bandit","text":"
    • Added bandit.BayesUCB.
    • Added bandit.evaluate_offline, for evaluating bandits on historical (logged) data.
    "},{"location":"releases/0.18.0/#cluster","title":"cluster","text":"
    • DBStream will now only recluster on demand, rather than at every call to learn_one.
    "},{"location":"releases/0.18.0/#compat","title":"compat","text":"
    • The predict_many method scikit-learn models wrapped with compat.convert_sklearn_to_river raised an exception if the model had not been fitted on any data yet. Instead, default predictions will be produced, which is consistent with the rest of River.
    • compat.SKL2RiverRegressor and compat.SKL2RiverClassifier didn't check whether features were ordered in the same way at each method call. They now store the list of feature names at the first function call, and align subsequent inputs in the same order.
    "},{"location":"releases/0.18.0/#compose","title":"compose","text":"
    • compose.TransformerProduct will now preserve the density of sparse columns.
    • Added a transform_many method to compose.FuncTransformer, allowing it to be used in mini-batch pipelines.
    • The compose.pure_inference_mode now works with mini-batching.
    "},{"location":"releases/0.18.0/#neighbors","title":"neighbors","text":"
    • Added neighbors.SWINN to power-up approximate nearest neighbor search. SWINN uses graphs to speed up nearest neighbor search in large sliding windows of data.
    • Renamed neighbors.NearestNeighbors to neighbors.LazySearch.
    • Standardize and create base classes for generic nearest neighbor search utilities.
    • The user can now select the nearest neighbor search engine to use in neighbors.KNNClassifier and neighbors.KNNRegressor.
    "},{"location":"releases/0.18.0/#preprocessing","title":"preprocessing","text":"
    • Rename sparse parameter to drop_zeros in preprocessing.OneHotEncoder.
    • The transform_many method of preprocessing.OneHotEncoder will now return a sparse dataframe, rather than a dense one, which will consume much less memory.
    "},{"location":"releases/0.18.0/#proba","title":"proba","text":"
    • Added a cdf method to proba.Beta.
    "},{"location":"releases/0.18.0/#tree","title":"tree","text":"
    • Expose the min_branch_fraction parameter to avoid splits where most of the data goes to a single branch. Affects classification trees.
    • Added the max_share_to_split parameter to Hoeffding Tree classifiers. This parameters avoids splitting when the majority class has most of the data.
    "},{"location":"releases/0.18.0/#utils","title":"utils","text":"
    • Fixed utils.math.minkowski_distance.
    "},{"location":"releases/0.19.0/","title":"0.19.0 - 2023-08-02","text":"

    Calling learn_one in a pipeline will now update each part of the pipeline in turn. Before the unsupervised parts of the pipeline were updated during predict_one. This is more intuitive for new users. The old behavior, which yields better results, can be restored by calling learn_one with the new compose.learn_during_predict context manager.

    "},{"location":"releases/0.19.0/#bandit","title":"bandit","text":"
    • Added a bandit.datasets submodule, which is meant to contain contextual bandit datasets.
    • Added bandit.base.ContextualPolicy.
    • Added bandit.datasets.NewsArticles.
    • Added bandit.LinUCBDisjoint, which is River's first contextual bandit policy.
    • Added bandit.RandomPolicy.
    "},{"location":"releases/0.19.0/#compose","title":"compose","text":"
    • Removed the compose.warm_up_mode context manager.
    • Removed the compose.pure_inference_mode context manager.
    • The last step of a pipeline will be correctly updated if it is unsupervised, which wasn't the case before.
    • Fixed an edge-case where compose.TransformerProduct would not work when chained more than twice.
    "},{"location":"releases/0.19.0/#drift","title":"drift","text":"
    • Added a datasets submodule, which contains datasets that are useful for concept drift experiments.
    • Fix bugs in drift.binary.HDDM_A and drift.binary.HDDM_W.
    "},{"location":"releases/0.19.0/#linear_model","title":"linear_model","text":"
    • Added a predict_many method to linear_model.BayesianLinearRegression.
    • Added a smoothing parameter to linear_model.BayesianLinearRegression, which allows it to cope with concept drift.
    "},{"location":"releases/0.19.0/#forest","title":"forest","text":"
    • Fixed issue with forest.ARFClassifier which couldn't be passed a CrossEntropy metric.
    • Fixed a bug in forest.AMFClassifier which slightly improves predictive accurary.
    • Added forest.AMFRegressor.
    "},{"location":"releases/0.19.0/#multioutput","title":"multioutput","text":"
    • Added metrics.multioutput.SampleAverage, which is equivalent to using average='samples' in scikit-learn.
    "},{"location":"releases/0.19.0/#preprocessing","title":"preprocessing","text":"
    • Added preprocessing.OrdinalEncoder, to map string features to integers.
    • The transform_many method of preprocessing.StandardScaler now uses the dtype of the input for the output.
    "},{"location":"releases/0.19.0/#proba","title":"proba","text":"
    • Added proba.MultivariateGaussian.
    "},{"location":"releases/0.19.0/#stream","title":"stream","text":"
    • stream.iter_arff now supports sparse data.
    • stream.iter_arff now supports multi-output targets.
    • stream.iter_arff now supports missing values indicated with question marks.
    "},{"location":"releases/0.19.0/#utils","title":"utils","text":"
    • Added utils.random.exponential to retrieve random samples following an exponential distribution.
    "},{"location":"releases/0.2.0/","title":"0.2.0 - 2019-05-27","text":"
    • PyPI
    • GitHub
    "},{"location":"releases/0.2.0/#compose","title":"compose","text":"
    • compose.Pipeline now has a debug_one.
    • compose.Discard and compose.Select now take variadic inputs, which means you don't have to provide a list of features to exclude/include.
    "},{"location":"releases/0.2.0/#datasets","title":"datasets","text":"
    • Added datasets.fetch_bikes
    "},{"location":"releases/0.2.0/#feature_extraction","title":"feature_extraction","text":"
    • Classes that inherit from feature_extraction.VectorizerMixin can now directly be passed str instances instead of dict instances.
    • feature_extraction.Agg and feature_extraction.TargetAgg can now aggregate on multiple attributes.
    "},{"location":"releases/0.2.0/#metrics","title":"metrics","text":"
    • Added RollingAccuracy
    • Added RollingCrossEntropy
    • Added RollingF1
    • Added RollingLogLoss
    • Added RollingMacroF1
    • Added RollingMacroPrecision
    • Added RollingMacroRecall
    • Added RollingMAE
    • Added RollingMicroF1
    • Added RollingMicroPrecision
    • Added RollingMicroRecall
    • Added RollingMSE
    • Added RollingPrecision
    • Added RollingRecall
    • Added RollingRMSE
    • Added RollingRMSLE
    • Added RollingSMAPE
    "},{"location":"releases/0.2.0/#model_selection","title":"model_selection","text":"
    • Added model_selection.online_qa_score.
    "},{"location":"releases/0.2.0/#proba","title":"proba","text":"

    The dist module has been renamed to proba and is now public, for the moment it contains a single distribution called proba.Gaussian.

    "},{"location":"releases/0.2.0/#naive_bayes","title":"naive_bayes","text":"
    • Added naive_bayes.BernoulliNB.
    • Added naive_bayes.ComplementNB.
    "},{"location":"releases/0.2.0/#optim","title":"optim","text":"
    • Added optim.AdaBound.
    "},{"location":"releases/0.2.0/#tree","title":"tree","text":"
    • Added tree.DecisionTreeClassifier.
    • Removed tree.MondrianTreeClassifier and tree.MondrianTreeRegressor because their performance wasn't good enough.
    "},{"location":"releases/0.2.0/#stats","title":"stats","text":"
    • Added stats.AutoCorrelation.
    • Added stats.EWVar.
    • Rename stats.Variance to stats.Var and stats.RollingVariance to stats.RollingVar.
    "},{"location":"releases/0.2.0/#stream","title":"stream","text":"
    • Added stream.simulate_qa.
    "},{"location":"releases/0.2.0/#utils","title":"utils","text":"
    • Added utils.SDFT.
    • Added utils.Skyline.
    • Renamed the window_size parameter to size in utils.Window and utils.SortedWindow.
    "},{"location":"releases/0.3.0/","title":"0.3.0 - 2019-06-23","text":"
    • PyPI
    • GitHub
    "},{"location":"releases/0.3.0/#datasets","title":"datasets","text":"
    • Added datasets.load_chick_weights.
    "},{"location":"releases/0.3.0/#decomposition","title":"decomposition","text":"
    • Added decomposition.LDA.
    "},{"location":"releases/0.3.0/#ensemble","title":"ensemble","text":"
    • Added ensemble.HedgeRegressor.
    • Added ensemble.StackingBinaryClassifier.
    "},{"location":"releases/0.3.0/#metrics","title":"metrics","text":"
    • Added metrics.FBeta
    • Added metrics.MacroFBeta
    • Added metrics.MicroFBeta
    • Added metrics.MultiFBeta
    • Added metrics.RollingFBeta
    • Added metrics.RollingMacroFBeta
    • Added metrics.RollingMicroFBeta
    • Added metrics.RollingMultiFBeta
    • Added metrics.Jaccard
    • Added metrics.RollingConfusionMatrix
    • Added metrics.RegressionMultiOutput
    • Added metrics.MCC
    • Added metrics.RollingMCC
    • Added metrics.ROCAUC
    • Renamed metrics.F1Score to metrics.F1.
    "},{"location":"releases/0.3.0/#multioutput","title":"multioutput","text":"
    • Added multioutput.ClassifierChain.
    • Added multioutput.RegressorChain.
    "},{"location":"releases/0.3.0/#optim","title":"optim","text":"
    • Added optim.QuantileLoss
    • Added optim.MiniBatcher.
    "},{"location":"releases/0.3.0/#preprocessing","title":"preprocessing","text":"
    • Added preprocessing.Normalizer.
    "},{"location":"releases/0.3.0/#proba","title":"proba","text":"
    • Added proba.Multinomial.
    "},{"location":"releases/0.4.1/","title":"0.4.1 - 2019-10-23","text":"
    • PyPI
    • GitHub
    "},{"location":"releases/0.4.1/#base","title":"base","text":"
    • Tests are now much more extensive, thanks mostly to the newly added estimator tags.
    "},{"location":"releases/0.4.1/#compose","title":"compose","text":"
    • Added compose.Renamer.
    "},{"location":"releases/0.4.1/#datasets","title":"datasets","text":"
    • Added fetch_kdd99_http.
    • Added fetch_sms.
    • Added fetch_trec07p.
    "},{"location":"releases/0.4.1/#ensemble","title":"ensemble","text":"
    • Removed ensemble.HedgeBinaryClassifier because it's performance was subpar.
    • Removed ensemble.GroupRegressor, as this should be a special case of ensemble.StackingRegressor.
    "},{"location":"releases/0.4.1/#feature_extraction","title":"feature_extraction","text":"
    • Fixed a bug where feature_extraction.CountVectorizer and feature_extraction.TFIDFVectorizer couldn't be pickled.
    "},{"location":"releases/0.4.1/#linear_model","title":"linear_model","text":"
    • linear_model.LogisticRegression and linear_model.LinearRegression now have an intercept_lr parameter.
    "},{"location":"releases/0.4.1/#metrics","title":"metrics","text":"
    • Metrics can now be composed using the + operator, which is useful for evaluating multiple metrics at the same time.
    • Added metrics.Rolling, which eliminates the need for a specific rolling implementation for each metric.
    • Each metric can now be passed a sample_weight argument.
    • Added metrics.WeightedF1.
    • Added metrics.WeightedFBeta.
    • Added metrics.WeightedPrecision.
    • Added metrics.WeightedRecall.
    "},{"location":"releases/0.4.1/#neighbors","title":"neighbors","text":"
    • Added neighbors.KNeighborsRegressor.
    • Added neighbors.KNeighborsClassifier.
    "},{"location":"releases/0.4.1/#optim","title":"optim","text":"
    • Added optim.AdaMax.
    • The optim module has been reorganized into submodules; namely optim.schedulers, optim.initializers, and optim.losses. The top-level now only contains optimizers. Some classes have been renamed accordingly. See the documentation for details.
    • Renamed optim.VanillaSGD to optim.SGD.
    "},{"location":"releases/0.4.1/#stats","title":"stats","text":"
    • Added stats.IQR.
    • Added stats.RollingIQR.
    • Cythonized stats.Mean and stats.Var.
    "},{"location":"releases/0.4.1/#stream","title":"stream","text":"
    • Added stream.shuffle.
    • stream.iter_csv now has fraction and seed parameters to sample rows, deterministically or not.
    • Renamed stream.iter_numpy to stream.iter_array.
    • stream.iter_csv can now read from gzipped files.
    "},{"location":"releases/0.4.1/#time_series","title":"time_series","text":"
    • time_series.Detrender now has a window_size parameter for detrending with a rolling mean.
    "},{"location":"releases/0.4.1/#tree","title":"tree","text":"
    • Added tree.RandomForestClassifier.
    "},{"location":"releases/0.4.1/#utils","title":"utils","text":"
    • Fixed a bug where utils.dot could take longer than necessary.
    "},{"location":"releases/0.4.3/","title":"0.4.3 - 2019-10-27","text":"
    • PyPI
    • GitHub
    "},{"location":"releases/0.4.3/#base","title":"base","text":"
    • Model that inherit from base.Wrapper (e.g. tree.RandomForestClassifier) can now be pickled.
    "},{"location":"releases/0.4.3/#datasets","title":"datasets","text":"
    • Added datasets.fetch_credit_card.
    "},{"location":"releases/0.4.3/#utils","title":"utils","text":"
    • Added the utils.math sub-module.
    "},{"location":"releases/0.4.3/#tree","title":"tree","text":"
    • Fixed the debug_one method of tree.DecisionTreeClassifier.
    "},{"location":"releases/0.4.4/","title":"0.4.4 - 2019-11-11","text":"
    • PyPI
    • GitHub

    This release was mainly made to provide access to wheels <https://pythonwheels.com/>_ for Windows and MacOS.

    "},{"location":"releases/0.4.4/#ensemble","title":"ensemble","text":"
    • Added ensemble.AdaBoostClassifier.
    "},{"location":"releases/0.4.4/#linear_model","title":"linear_model","text":"
    • Added a clip_gradient parameter to linear_model.LinearRegression and linear_model.LogisticRegression. Gradient clipping was already implemented, but the maximum absolute value can now be set by the user.
    • The intercept_lr parameter of linear_model.LinearRegression and linear_model.LogisticRegression can now be passed an instance of optim.schedulers.Scheduler as well as a float.
    "},{"location":"releases/0.4.4/#metrics","title":"metrics","text":"
    • Fixed metrics.SMAPE, the implementation was missing a multiplication by 2.
    "},{"location":"releases/0.4.4/#optim","title":"optim","text":"
    • Added optim.schedulers.Optimal produces results that are identical to sklearn.linear_model.SGDRegressor and sklearn.linear_model.SGDClassifier when setting their learning_rate parameter to 'optimal'.
    "},{"location":"releases/0.4.4/#time_series","title":"time_series","text":"
    • Added time_series.SNARIMAX, a generic model which encompasses well-known time series models such as ARIMA and NARX.
    "},{"location":"releases/0.5.0/","title":"0.5.0 - 2020-03-13","text":"
    • PyPI
    • GitHub
    "},{"location":"releases/0.5.0/#compat","title":"compat","text":"
    • Added compat.PyTorch2CremeRegressor.
    • compat.SKL2CremeRegressor and compat.SKL2CremeClassifier now have an optional batch_size parameter in order to perform mini-batching.
    "},{"location":"releases/0.5.0/#compose","title":"compose","text":"
    • Renamed compose.Whitelister to compose.Select.
    • Renamed compose.Blacklister to compose.Discard.
    "},{"location":"releases/0.5.0/#facto","title":"facto","text":"
    • Added facto.FFMClassifier.
    • Added facto.FFMRegressor.
    • Added facto.FwFMClassifier.
    • Added facto.FwFMRegressor.
    • Added facto.HOFMClassifier.
    • Added facto.HOFMRegressor.
    • Refactored facto.FMClassifier.
    • Refactored facto.FMRegressor.
    "},{"location":"releases/0.5.0/#feature_selection","title":"feature_selection","text":"
    • Added feature_selection.PoissonInclusion.
    • Removed feature_selection.RandomDiscarder as it didn't make much sense.
    "},{"location":"releases/0.5.0/#feature_extraction","title":"feature_extraction","text":"
    • Renamed feature_extraction.CountVectorizer to feature_extraction.BagOfWords.
    • Renamed feature_extraction.TFIDFVectorizer to feature_extraction.TFIDF.
    • Added preprocessor and ngram_range parameters to feature_extraction.BagOfWords.
    • Added preprocessor and ngram_range parameters to feature_extraction.TFIDF.
    "},{"location":"releases/0.5.0/#datasets","title":"datasets","text":"
    • The datasets module has been overhauled. Each dataset is now a class (e.g. fetch_electricity has become datasets.Elec2).
    • Added datasets.TrumpApproval.
    • Added datasets.MaliciousURL.
    • Added datasets.gen.SEA.
    • Added datasets.Higgs.
    • Added datasets.MovieLens100K.
    • Added datasets.Bananas.
    • Added datasets.Taxis.
    • Added datasets.ImageSegments.
    • Added datasets.SMTP
    "},{"location":"releases/0.5.0/#impute","title":"impute","text":"
    • Added impute.PreviousImputer.
    "},{"location":"releases/0.5.0/#linear_model","title":"linear_model","text":"
    • linear_model.FMClassifier has been moved to the facto module.
    • linear_model.FMRegressor has been moved to the facto module.
    • Added linear_model.ALMAClassifier.
    "},{"location":"releases/0.5.0/#metrics","title":"metrics","text":"
    • Added metrics.ClassificationReport.
    • Added metrics.TimeRolling.
    • The implementation of metrics.ROCAUC was incorrect. Using the trapezoidal rule instead of Simpson's rule seems to be more robust.
    • metrics.PerClass has been removed; it is recommended that you use metrics.ClassificationReport instead as it gives a better overview.
    "},{"location":"releases/0.5.0/#meta","title":"meta","text":"
    • Moved meta.TransformedTargetRegressor and meta.BoxCoxRegressor to this module (they were previously in the compose module).
    • Added meta.PredClipper
    "},{"location":"releases/0.5.0/#model_selection","title":"model_selection","text":"
    • Added model_selection.expand_param_grid to generate a list of models from a grid of parameters.
    • Added the model_selection.successive_halving method for selecting hyperparameters.
    • The online_score and online_qa_score methods have been merged into a single method named model_selection.progressive_val_score.
    "},{"location":"releases/0.5.0/#preprocessing","title":"preprocessing","text":"
    • Added preprocessing.RBFSampler.
    • Added preprocessing.MaxAbsScaler.
    • Added preprocessing.RobustScaler.
    • Added preprocessing.Binarizer.
    • Added with_mean and with_std parameters to preprocessing.StandardScaler.
    "},{"location":"releases/0.5.0/#optim","title":"optim","text":"
    • Added optim.losses.BinaryFocalLoss.
    • Added the optim.AMSGrad optimizer.
    • Added the optim.Nadam optimizer.
    • Added optim.losses.Poisson.
    • Fixed a performance bug in optim.NesterovMomentum.
    "},{"location":"releases/0.5.0/#reco","title":"reco","text":"
    • Added reco.FunkMF.
    • Renamed reco.SVD to reco.BiasedMF.
    • Renamed reco.SGDBaseline to reco.Baseline.
    • Models now expect a dict input with user and item fields.
    "},{"location":"releases/0.5.0/#sampling","title":"sampling","text":"
    • Added sampling.RandomUnderSampler.
    • Added sampling.RandomOverSampler.
    • Added sampling.RandomSampler.
    • Added sampling.HardSamplingClassifier.
    • Added sampling.HardSamplingRegressor.
    "},{"location":"releases/0.5.0/#stats","title":"stats","text":"
    • Added stats.AbsMax.
    • Added stats.RollingAbsMax.
    "},{"location":"releases/0.5.0/#stream","title":"stream","text":"
    • Added stream.iter_libsvm.
    • stream.iter_csv now supports reading from '.zip' files.
    • Added stream.Cache.
    • Added a drop parameter to stream.iter_csv to discard fields.
    "},{"location":"releases/0.5.1/","title":"0.5.1 - 2020-03-29","text":"
    • PyPI
    • GitHub
    "},{"location":"releases/0.5.1/#compose","title":"compose","text":"
    • compose.Pipeline and compose.TransformerUnion now variadic arguments as input instead of a list. This doesn't change anything when using the shorthand operators | and +.
    "},{"location":"releases/0.5.1/#model_selection","title":"model_selection","text":"
    • Removed model_selection.successive_halving
    • Added model_selection.SuccessiveHalvingRegressor and model_selection.SuccessiveHalvingClassifier
    "},{"location":"releases/0.5.1/#stream","title":"stream","text":"
    • Added a copy parameter to stream.simulate_qa in order to handle unwanted feature modifications.
    "},{"location":"releases/0.5.1/#tree","title":"tree","text":"
    • Added a curtail_under parameter to tree.DecisionTreeClassifier.
    • The speed and accuracy of both tree.DecisionTreeClassifier and tree.RandomForestClassifier has been slightly improved for numerical attributes.
    • The esthetics of the tree.DecisionTreeClassifier.draw method have been improved.
    "},{"location":"releases/0.6.0/","title":"0.6.0 - 2020-06-09","text":""},{"location":"releases/0.6.0/#base","title":"base","text":"
    • Added a new base class called SupervisedTransformer from which supervised transformers inherit from. Before this, supervised transformers has a is_supervised property.
    "},{"location":"releases/0.6.0/#compose","title":"compose","text":"
    • Added compose.SelectType, which allows selecting feature subsets based on their type.
    • Added a score_one method to compose.Pipeline so that estimators from the anomaly module can be pipelined.
    • Added compose.Grouper, which allows applying transformers within different subgroups.
    "},{"location":"releases/0.6.0/#datasets","title":"datasets","text":"
    • Added datasets.Music, which is a dataset for multi-output binary classification.
    • Added datasets.synth.Friedman, which is synthetic regression dataset.
    • The datasets.gen module has been renamed to datasets.synth
    • Each dataset now has a __repr__ method which displays some descriptive information.
    • Added datasets.Insects, which has 10 variants.
    "},{"location":"releases/0.6.0/#feature_extraction","title":"feature_extraction","text":"
    • feature_extraction.Differ has been deprecated. We might put it back in a future if we find a better design.
    "},{"location":"releases/0.6.0/#impute","title":"impute","text":"
    • impute.StatImputer has been completely refactored.
    "},{"location":"releases/0.6.0/#metrics","title":"metrics","text":"
    • In metrics.SMAPE, instead of raising a ZeroDivisionError, the convention is now to use 0 when both y_true and y_pred are equal to 0.
    "},{"location":"releases/0.6.0/#model_selection","title":"model_selection","text":"
    • Added the possibility to configure how the progress is printed in model_selection.progressive_val_score. For instance, the progress can now be printed to a file by providing the file argument.
    "},{"location":"releases/0.6.0/#multiclass","title":"multiclass","text":"
    • Added multiclass.OutputCodeClassifier.
    • Added multiclass.OneVsOneClassifier.
    "},{"location":"releases/0.6.0/#multioutput","title":"multioutput","text":"
    • Fixed a bug where multioutput.ClassifierChain and multioutput.RegressorChain could not be pickled.
    "},{"location":"releases/0.6.0/#stats","title":"stats","text":"
    • Added stats.Shift, which can be used to compute statistics over a shifted version of a variable.
    • Added stats.Link, which can be used to compose univariate statistics. Univariate statistics can now be composed via the | operator.
    • Renamed stats.Covariance to stats.Cov.
    • Renamed stats.PearsonCorrelation to stats.PearsonCorr.
    • Renamed stats.AutoCorrelation to stats.AutoCorr.
    • Added stats.RollingCov, which computes covariance between two variables over a window.
    • Added stats.RollingPearsonCorr, which computes the Pearson correlation over a window.
    "},{"location":"releases/0.6.0/#stream","title":"stream","text":"
    • Added a stream.iter_sql utility method to work with SQLAlchemy.
    • The target_name parameter of stream.iter_csv has been renamed to target. It can now be passed a list of values in order to support multi-output scenarios.
    • Added stream.iter_arff for handling ARFF files.
    "},{"location":"releases/0.6.0/#tree","title":"tree","text":"
    • Cancelled the behavior where tree.DecisionTreeRegressor would raise an exception when no split was found.
    "},{"location":"releases/0.6.1/","title":"0.6.1 - 2020-06-10","text":""},{"location":"releases/0.6.1/#compose","title":"compose","text":"
    • Fixed a bug that occurred when part of a compose.Transformer was a compose.Pipeline and wasn't properly handled.
    "},{"location":"releases/0.7.0/","title":"0.7.0 - 2021-04-16","text":"

    Alas, no release notes for this one.

    "},{"location":"releases/0.7.1/","title":"0.7.1 - 2021-06-13","text":"

    Fixed an issue where scikit-learn was imported in sam_knn.py but wasn't specified as a dependency.

    "},{"location":"releases/0.7.1/#expert","title":"expert","text":"
    • Each expert model will now raise a NotEnoughModels exception if only a single model is passed.
    "},{"location":"releases/0.7.1/#stream","title":"stream","text":"
    • Added drop_nones parameter to stream.iter_csv.
    "},{"location":"releases/0.8.0/","title":"0.8.0 - 2021-08-31","text":""},{"location":"releases/0.8.0/#base","title":"base","text":"
    • The predict_many and predict_proba_many methods have been removed from base.Classifier. They're part of base.MiniBatchClassifier.
    "},{"location":"releases/0.8.0/#ensemble","title":"ensemble","text":"
    • Implemented ensemble.VotingClassifier.
    • Implemented ensemble.SRPRegressor.
    "},{"location":"releases/0.8.0/#meta","title":"meta","text":"
    • Renamed meta.TransformedTargetRegressor to meta.TargetTransformRegressor.
    • Added meta.TargetStandardScaler.
    "},{"location":"releases/0.8.0/#preprocessing","title":"preprocessing","text":"
    • Added a with_std parameter to StandardScaler.
    "},{"location":"releases/0.8.0/#rules","title":"rules","text":"
    • Added rules.AMRules
    "},{"location":"releases/0.8.0/#stats","title":"stats","text":"
    • Make stats.RollingQuantile match the default behavior of Numpy's quantile function.
    "},{"location":"releases/0.8.0/#tree","title":"tree","text":"
    • Unifed base class structure applied to all tree models.
    • Bug fixes.
    • Added tree.SGTClassifier and tree.SGTRegressor.
    "},{"location":"releases/0.9.0/","title":"0.9.0 - 2021-11-30","text":"
    • Wheels for Python 3.6 have been dropped.
    • Wheels for Python 3.9 have been added.
    "},{"location":"releases/0.9.0/#anomaly","title":"anomaly","text":"
    • Moved api.anomaly.base.AnomalyDetector to anomaly.AnomalyDetector.
    • Implemented anomaly.ConstantThresholder.
    • Implemented anomaly.QuantileThresholder.
    • Implemented api.anomaly.OneClassSVM.
    "},{"location":"releases/0.9.0/#base","title":"base","text":"
    • Renamed base.WrapperMixin to base.Wrapper.
    • Introduced base.WrapperEnsemble.
    • Clarified the difference between a base.typing.Dataset and a base.typing.Stream. A Stream is an instance of a Dataset and is stateful. A Dataset is stateless. It's essentially the same difference between an Iterable and an Iterator in the Python standard library.
    "},{"location":"releases/0.9.0/#compat","title":"compat","text":"
    • Added compat.PyTorch2RiverClassifier
    • Implemented median absolute deviation in stats.MAD.
    • Refactored compat.PyTorch2RiverRegressor
    • Fixed an issue where some statistics could not be printed if they had not seen any data yet.
    "},{"location":"releases/0.9.0/#compose","title":"compose","text":"
    • You can now use a list as a shorthand to build a TransformerUnion.
    • Fixed a visualization issue when using a pipeline with multiple feature unions.
    • The prejudiced terms blacklist and whitelist have both been renamed to keys.
    • Removed learn_unsupervised parameter from pipeline methods.
    • Implemented compose.TransformerProduct.
    "},{"location":"releases/0.9.0/#datasets","title":"datasets","text":"
    • Added datasets.Keystroke.
    "},{"location":"releases/0.9.0/#ensemble","title":"ensemble","text":"
    • Bug fixes in ensemble.SRPClassifier and ensemble.SRPRegressor.
    • Some estimators have been moved into the ensemble module.
    "},{"location":"releases/0.9.0/#feature_extraction","title":"feature_extraction","text":"
    • Implemented feature_extraction.Lagger.
    • Implemented feature_extraction.TargetLagger.
    "},{"location":"releases/0.9.0/#meta","title":"meta","text":"

    This module has been deleted.

    • Move meta.PredClipper to the preprocessing module.
    • Removed meta.BoxCoxRegressor.
    • Moved meta.TargetTransformRegressor to compose.TargetTransformRegressor.
    • Moved meta.TargetStandardScaler to preprocessing.TargetStandardScaler.
    "},{"location":"releases/0.9.0/#model_selection","title":"model_selection","text":"
    • This new module replaces the expert module.
    • Implemented model_selection.GreedyRegressor.
    • Added ModelSelector base class.
    "},{"location":"releases/0.9.0/#optim","title":"optim","text":"
    • optim.Adam and optim.RMSProp now work with utils.VectorDicts as well as numpy.ndarrays.
    • Added optim.losses.Huber.
    "},{"location":"releases/0.9.0/#preprocessing","title":"preprocessing","text":"
    • Enabled preprocessing.OneHotEncoder to one-hot encode values that are list or sets.
    "},{"location":"releases/0.9.0/#reco","title":"reco","text":"
    • Added a debug_one method to reco.FMRegressor.
    "},{"location":"releases/0.9.0/#selection","title":"selection","text":"
    • This new module replaces the expert module.
    • Implemented selection.GreedyExpertRegressor.
    "},{"location":"releases/0.9.0/#stats","title":"stats","text":"
    • Fixed an issue where some statistics could not be printed if they had not seen any data yet.
    • Implemented median absolute deviation in stats.MAD.
    • The stats.Mean and stats.Var implementations have been made more numerically stable.
    "},{"location":"releases/0.9.0/#time_series","title":"time_series","text":"
    • time_series.Detrender and time_series.GroupDetrender have been removed as they overlap with preprocessing.TargetStandardScaler.
    • Implemented a time_series.evaluate method, which performs progressive validation for time series scenarios.
    • Implemented time_series.HorizonMetric class to evaluate the performance of a forecasting model at each time step along a horizon.
    • Implemented time_series.HoltWinters.
    "},{"location":"releases/0.9.0/#utils","title":"utils","text":"
    • Moved model_selection.expand_param_grid to utils.expand_param_grid.
    • Added utils.poisson.
    • Added the utils.log_method_calls context manager.
    • Added the utils.warm_up_mode context manager.
    • Added the utils.pure_inference_model context manager.
    "},{"location":"releases/unreleased/","title":"Unreleased","text":"

    River's mini-batch methods now support pandas v2. In particular, River conforms to pandas' new sparse API.

    "},{"location":"releases/unreleased/#anomaly","title":"anomaly","text":"
    • Added api.anomaly.LocalOutlierFactor, which is an online version of the LOF algorithm for anomaly detection that matches the scikit-learn implementation.
    "},{"location":"releases/unreleased/#forest","title":"forest","text":"
    • Simplify inner the structures of forest.ARFClassifier and forest.ARFRegressor by removing redundant class hierarchy. Simplify how concept drift logging can be accessed in individual trees and in the forest as a whole.
    "}]} \ No newline at end of file diff --git a/dev/sitemap.xml b/dev/sitemap.xml index 5d91f57521..0892cf0830 100644 --- a/dev/sitemap.xml +++ b/dev/sitemap.xml @@ -2,2682 +2,2682 @@ https://riverml.xyz/dev/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/overview/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/active/EntropySampler/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/active/base/ActiveLearningClassifier/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/anomaly/GaussianScorer/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/anomaly/HalfSpaceTrees/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/anomaly/LocalOutlierFactor/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/anomaly/OneClassSVM/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/anomaly/QuantileFilter/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/anomaly/ThresholdFilter/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/anomaly/base/AnomalyDetector/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/anomaly/base/AnomalyFilter/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/anomaly/base/SupervisedAnomalyDetector/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/bandit/BayesUCB/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/bandit/EpsilonGreedy/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/bandit/Exp3/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/bandit/LinUCBDisjoint/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/bandit/RandomPolicy/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/bandit/ThompsonSampling/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/bandit/UCB/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/bandit/evaluate-offline/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/bandit/evaluate/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/bandit/base/ContextualPolicy/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/bandit/base/Policy/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/bandit/datasets/BanditDataset/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/bandit/datasets/NewsArticles/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/bandit/envs/CandyCaneContest/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/bandit/envs/KArmedTestbed/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/base/Base/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/base/BinaryDriftAndWarningDetector/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/base/BinaryDriftDetector/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/base/Classifier/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/base/Clusterer/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/base/DriftAndWarningDetector/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/base/DriftDetector/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/base/Ensemble/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/base/Estimator/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/base/MiniBatchClassifier/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/base/MiniBatchRegressor/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/base/MiniBatchSupervisedTransformer/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/base/MiniBatchTransformer/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/base/MultiLabelClassifier/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/base/MultiTargetRegressor/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/base/Regressor/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/base/SupervisedTransformer/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/base/Transformer/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/base/Wrapper/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/base/WrapperEnsemble/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/cluster/CluStream/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/cluster/DBSTREAM/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/cluster/DenStream/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/cluster/KMeans/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/cluster/STREAMKMeans/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/cluster/TextClust/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/compat/River2SKLClassifier/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/compat/River2SKLClusterer/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/compat/River2SKLRegressor/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/compat/River2SKLTransformer/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/compat/SKL2RiverClassifier/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/compat/SKL2RiverRegressor/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/compat/convert-river-to-sklearn/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/compat/convert-sklearn-to-river/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/compose/Discard/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/compose/FuncTransformer/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/compose/Grouper/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/compose/Pipeline/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/compose/Prefixer/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/compose/Renamer/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/compose/Select/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/compose/SelectType/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/compose/Suffixer/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/compose/TargetTransformRegressor/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/compose/TransformerProduct/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/compose/TransformerUnion/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/compose/learn-during-predict/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/conf/Interval/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/conf/RegressionJackknife/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/covariance/EmpiricalCovariance/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/covariance/EmpiricalPrecision/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/datasets/AirlinePassengers/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/datasets/Bananas/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/datasets/Bikes/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/datasets/ChickWeights/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/datasets/CreditCard/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/datasets/Elec2/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/datasets/HTTP/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/datasets/Higgs/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/datasets/ImageSegments/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/datasets/Insects/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/datasets/Keystroke/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/datasets/MaliciousURL/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/datasets/MovieLens100K/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/datasets/Music/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/datasets/Phishing/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/datasets/Restaurants/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/datasets/SMSSpam/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/datasets/SMTP/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/datasets/SolarFlare/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/datasets/TREC07/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/datasets/Taxis/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/datasets/TrumpApproval/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/datasets/WaterFlow/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/datasets/base/Dataset/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/datasets/base/FileDataset/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/datasets/base/RemoteDataset/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/datasets/base/SyntheticDataset/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/datasets/synth/Agrawal/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/datasets/synth/AnomalySine/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/datasets/synth/ConceptDriftStream/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/datasets/synth/Friedman/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/datasets/synth/FriedmanDrift/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/datasets/synth/Hyperplane/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/datasets/synth/LED/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/datasets/synth/LEDDrift/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/datasets/synth/Logical/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/datasets/synth/Mixed/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/datasets/synth/Mv/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/datasets/synth/Planes2D/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/datasets/synth/RandomRBF/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/datasets/synth/RandomRBFDrift/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/datasets/synth/RandomTree/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/datasets/synth/SEA/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/datasets/synth/STAGGER/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/datasets/synth/Sine/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/datasets/synth/Waveform/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/drift/ADWIN/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/drift/DriftRetrainingClassifier/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/drift/DummyDriftDetector/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/drift/KSWIN/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/drift/PageHinkley/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/drift/binary/DDM/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/drift/binary/EDDM/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/drift/binary/HDDM-A/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/drift/binary/HDDM-W/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/drift/datasets/AirlinePassengers/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/drift/datasets/Apple/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/drift/datasets/Bitcoin/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/drift/datasets/BrentSpotPrice/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/drift/datasets/Occupancy/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/drift/datasets/RunLog/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/drift/datasets/UKCoalEmploy/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/dummy/NoChangeClassifier/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/dummy/PriorClassifier/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/dummy/StatisticRegressor/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/ensemble/ADWINBaggingClassifier/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/ensemble/ADWINBoostingClassifier/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/ensemble/AdaBoostClassifier/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/ensemble/BOLEClassifier/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/ensemble/BaggingClassifier/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/ensemble/BaggingRegressor/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/ensemble/EWARegressor/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/ensemble/LeveragingBaggingClassifier/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/ensemble/SRPClassifier/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/ensemble/SRPRegressor/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/ensemble/StackingClassifier/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/ensemble/VotingClassifier/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/evaluate/BinaryClassificationTrack/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/evaluate/MultiClassClassificationTrack/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/evaluate/RegressionTrack/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/evaluate/Track/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/evaluate/iter-progressive-val-score/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/evaluate/progressive-val-score/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/facto/FFMClassifier/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/facto/FFMRegressor/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/facto/FMClassifier/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/facto/FMRegressor/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/facto/FwFMClassifier/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/facto/FwFMRegressor/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/facto/HOFMClassifier/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/facto/HOFMRegressor/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/feature-extraction/Agg/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/feature-extraction/BagOfWords/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/feature-extraction/PolynomialExtender/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/feature-extraction/RBFSampler/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/feature-extraction/TFIDF/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/feature-extraction/TargetAgg/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/feature-selection/PoissonInclusion/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/feature-selection/SelectKBest/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/feature-selection/VarianceThreshold/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/forest/AMFClassifier/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/forest/AMFRegressor/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/forest/ARFClassifier/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/forest/ARFRegressor/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/forest/OXTRegressor/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/imblearn/ChebyshevOverSampler/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/imblearn/ChebyshevUnderSampler/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/imblearn/HardSamplingClassifier/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/imblearn/HardSamplingRegressor/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/imblearn/RandomOverSampler/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/imblearn/RandomSampler/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/imblearn/RandomUnderSampler/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/linear-model/ALMAClassifier/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/linear-model/BayesianLinearRegression/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/linear-model/LinearRegression/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/linear-model/LogisticRegression/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/linear-model/PAClassifier/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/linear-model/PARegressor/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/linear-model/Perceptron/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/linear-model/SoftmaxRegression/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/linear-model/base/GLM/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/metrics/Accuracy/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/metrics/AdjustedMutualInfo/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/metrics/AdjustedRand/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/metrics/BalancedAccuracy/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/metrics/ClassificationReport/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/metrics/CohenKappa/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/metrics/Completeness/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/metrics/ConfusionMatrix/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/metrics/CrossEntropy/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/metrics/F1/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/metrics/FBeta/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/metrics/FowlkesMallows/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/metrics/GeometricMean/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/metrics/Homogeneity/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/metrics/Jaccard/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/metrics/LogLoss/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/metrics/MAE/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/metrics/MAPE/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/metrics/MCC/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/metrics/MSE/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/metrics/MacroF1/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/metrics/MacroFBeta/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/metrics/MacroJaccard/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/metrics/MacroPrecision/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/metrics/MacroRecall/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/metrics/MicroF1/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/metrics/MicroFBeta/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/metrics/MicroJaccard/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/metrics/MicroPrecision/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/metrics/MicroRecall/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/metrics/MultiFBeta/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/metrics/MutualInfo/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/metrics/NormalizedMutualInfo/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/metrics/Precision/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/metrics/R2/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/metrics/RMSE/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/metrics/RMSLE/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/metrics/ROCAUC/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/metrics/Rand/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/metrics/Recall/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/metrics/RollingROCAUC/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/metrics/SMAPE/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/metrics/Silhouette/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/metrics/VBeta/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/metrics/WeightedF1/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/metrics/WeightedFBeta/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/metrics/WeightedJaccard/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/metrics/WeightedPrecision/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/metrics/WeightedRecall/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/metrics/base/BinaryMetric/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/metrics/base/ClassificationMetric/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/metrics/base/Metric/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/metrics/base/Metrics/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/metrics/base/MultiClassMetric/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/metrics/base/RegressionMetric/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/metrics/base/WrapperMetric/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/metrics/multioutput/ExactMatch/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/metrics/multioutput/MacroAverage/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/metrics/multioutput/MicroAverage/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/metrics/multioutput/MultiLabelConfusionMatrix/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/metrics/multioutput/PerOutput/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/metrics/multioutput/SampleAverage/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/metrics/multioutput/base/MultiOutputClassificationMetric/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/metrics/multioutput/base/MultiOutputRegressionMetric/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/misc/SDFT/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/misc/Skyline/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/model-selection/BanditClassifier/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/model-selection/BanditRegressor/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/model-selection/GreedyRegressor/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/model-selection/SuccessiveHalvingClassifier/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/model-selection/SuccessiveHalvingRegressor/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/model-selection/base/ModelSelectionClassifier/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/model-selection/base/ModelSelectionRegressor/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/multiclass/OneVsOneClassifier/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/multiclass/OneVsRestClassifier/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/multiclass/OutputCodeClassifier/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/multioutput/ClassifierChain/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/multioutput/MonteCarloClassifierChain/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/multioutput/MultiClassEncoder/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/multioutput/ProbabilisticClassifierChain/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/multioutput/RegressorChain/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/naive-bayes/BernoulliNB/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/naive-bayes/ComplementNB/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/naive-bayes/GaussianNB/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/naive-bayes/MultinomialNB/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/neighbors/KNNClassifier/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/neighbors/KNNRegressor/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/neighbors/LazySearch/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/neighbors/SWINN/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/neural-net/MLPRegressor/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/neural-net/activations/Identity/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/neural-net/activations/ReLU/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/neural-net/activations/Sigmoid/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/optim/AMSGrad/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/optim/AdaBound/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/optim/AdaDelta/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/optim/AdaGrad/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/optim/AdaMax/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/optim/Adam/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/optim/Averager/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/optim/FTRLProximal/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/optim/Momentum/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/optim/Nadam/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/optim/NesterovMomentum/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/optim/RMSProp/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/optim/SGD/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/optim/base/Initializer/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/optim/base/Loss/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/optim/base/Optimizer/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/optim/base/Scheduler/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/optim/initializers/Constant/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/optim/initializers/Normal/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/optim/initializers/Zeros/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/optim/losses/Absolute/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/optim/losses/BinaryFocalLoss/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/optim/losses/BinaryLoss/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/optim/losses/Cauchy/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/optim/losses/CrossEntropy/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/optim/losses/EpsilonInsensitiveHinge/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/optim/losses/Hinge/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/optim/losses/Huber/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/optim/losses/Log/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/optim/losses/MultiClassLoss/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/optim/losses/Poisson/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/optim/losses/Quantile/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/optim/losses/RegressionLoss/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/optim/losses/Squared/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/optim/schedulers/Constant/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/optim/schedulers/InverseScaling/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/optim/schedulers/Optimal/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/preprocessing/AdaptiveStandardScaler/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/preprocessing/Binarizer/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/preprocessing/FeatureHasher/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/preprocessing/GaussianRandomProjector/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/preprocessing/LDA/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/preprocessing/MaxAbsScaler/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/preprocessing/MinMaxScaler/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/preprocessing/Normalizer/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/preprocessing/OneHotEncoder/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/preprocessing/OrdinalEncoder/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/preprocessing/PredClipper/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/preprocessing/PreviousImputer/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/preprocessing/RobustScaler/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/preprocessing/SparseRandomProjector/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/preprocessing/StandardScaler/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/preprocessing/StatImputer/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/preprocessing/TargetMinMaxScaler/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/preprocessing/TargetStandardScaler/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/proba/Beta/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/proba/Gaussian/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/proba/Multinomial/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/proba/MultivariateGaussian/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/proba/base/BinaryDistribution/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/proba/base/ContinuousDistribution/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/proba/base/DiscreteDistribution/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/proba/base/Distribution/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/reco/Baseline/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/reco/BiasedMF/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/reco/FunkMF/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/reco/RandomNormal/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/reco/base/Ranker/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/rules/AMRules/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/sketch/Counter/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/sketch/HeavyHitters/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/sketch/Histogram/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/sketch/Set/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/stats/AbsMax/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/stats/AutoCorr/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/stats/BayesianMean/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/stats/Count/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/stats/Cov/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/stats/EWMean/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/stats/EWVar/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/stats/Entropy/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/stats/IQR/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/stats/Kurtosis/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/stats/Link/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/stats/MAD/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/stats/Max/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/stats/Mean/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/stats/Min/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/stats/Mode/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/stats/NUnique/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/stats/PeakToPeak/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/stats/PearsonCorr/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/stats/Quantile/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/stats/RollingAbsMax/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/stats/RollingIQR/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/stats/RollingMax/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/stats/RollingMin/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/stats/RollingMode/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/stats/RollingPeakToPeak/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/stats/RollingQuantile/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/stats/SEM/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/stats/Shift/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/stats/Skew/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/stats/Sum/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/stats/Var/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/stats/base/Bivariate/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/stats/base/Univariate/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/stream/Cache/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/stream/TwitchChatStream/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/stream/TwitterLiveStream/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/stream/iter-arff/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/stream/iter-array/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/stream/iter-csv/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/stream/iter-libsvm/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/stream/iter-pandas/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/stream/iter-sklearn-dataset/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/stream/iter-sql/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/stream/shuffle/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/stream/simulate-qa/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/time-series/ForecastingMetric/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/time-series/HoltWinters/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/time-series/HorizonAggMetric/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/time-series/HorizonMetric/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/time-series/SNARIMAX/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/time-series/evaluate/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/time-series/iter-evaluate/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/time-series/base/Forecaster/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/tree/ExtremelyFastDecisionTreeClassifier/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/tree/HoeffdingAdaptiveTreeClassifier/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/tree/HoeffdingAdaptiveTreeRegressor/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/tree/HoeffdingTreeClassifier/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/tree/HoeffdingTreeRegressor/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/tree/SGTClassifier/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/tree/SGTRegressor/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/tree/iSOUPTreeRegressor/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/tree/base/Branch/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/tree/base/Leaf/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/tree/splitter/DynamicQuantizer/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/tree/splitter/EBSTSplitter/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/tree/splitter/ExhaustiveSplitter/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/tree/splitter/GaussianSplitter/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/tree/splitter/HistogramSplitter/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/tree/splitter/QOSplitter/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/tree/splitter/Quantizer/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/tree/splitter/Splitter/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/tree/splitter/StaticQuantizer/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/tree/splitter/TEBSTSplitter/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/utils/Rolling/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/utils/SortedWindow/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/utils/TimeRolling/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/utils/VectorDict/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/utils/dict2numpy/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/utils/expand-param-grid/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/utils/log-method-calls/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/utils/numpy2dict/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/utils/math/argmax/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/utils/math/chain-dot/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/utils/math/clamp/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/utils/math/dot/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/utils/math/dotvecmat/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/utils/math/log-sum-2-exp/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/utils/math/matmul2d/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/utils/math/minkowski-distance/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/utils/math/norm/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/utils/math/outer/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/utils/math/prod/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/utils/math/sherman-morrison/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/utils/math/sigmoid/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/utils/math/sign/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/utils/math/softmax/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/utils/math/woodbury-matrix/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/utils/norm/normalize-values-in-dict/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/utils/norm/scale-values-in-dict/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/utils/pretty/humanize-bytes/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/utils/pretty/print-table/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/utils/random/exponential/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/api/utils/random/poisson/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/benchmarks/Binary%20classification/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/benchmarks/Multiclass%20classification/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/benchmarks/Regression/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/examples/batch-to-online/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/examples/bike-sharing-forecasting/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/examples/building-a-simple-nowcasting-model/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/examples/content-personalization/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/examples/debugging-a-pipeline/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/examples/imbalanced-learning/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/examples/quantile-regression-uncertainty/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/examples/sentence-classification/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/examples/the-art-of-using-pipelines/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/examples/matrix-factorization-for-recommender-systems/part-1/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/examples/matrix-factorization-for-recommender-systems/part-2/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/examples/matrix-factorization-for-recommender-systems/part-3/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/faq/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/introduction/basic-concepts/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/introduction/installation/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/introduction/next-steps/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/introduction/related-projects/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/introduction/why-use-river/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/introduction/getting-started/binary-classification/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/introduction/getting-started/concept-drift-detection/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/introduction/getting-started/multiclass-classification/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/introduction/getting-started/regression/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/recipes/active-learning/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/recipes/bandits-101/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/recipes/cloning-and-mutating/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/recipes/feature-extraction/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/recipes/hyperparameter-tuning/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/recipes/mini-batching/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/recipes/model-evaluation/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/recipes/on-hoeffding-trees/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/recipes/pipelines/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/recipes/reading-data/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/recipes/rolling-computations/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/releases/0.0.2/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/releases/0.0.3/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/releases/0.1.0/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/releases/0.10.0/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/releases/0.10.1/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/releases/0.11.0/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/releases/0.11.1/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/releases/0.12.0/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/releases/0.12.1/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/releases/0.13.0/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/releases/0.14.0/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/releases/0.15.0/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/releases/0.16.0/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/releases/0.17.0/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/releases/0.18.0/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/releases/0.19.0/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/releases/0.2.0/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/releases/0.3.0/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/releases/0.4.1/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/releases/0.4.3/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/releases/0.4.4/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/releases/0.5.0/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/releases/0.5.1/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/releases/0.6.0/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/releases/0.6.1/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/releases/0.7.0/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/releases/0.7.1/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/releases/0.8.0/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/releases/0.9.0/ - 2023-09-12 + 2023-09-19 daily https://riverml.xyz/dev/releases/unreleased/ - 2023-09-12 + 2023-09-19 daily \ No newline at end of file diff --git a/dev/sitemap.xml.gz b/dev/sitemap.xml.gz index 52d78836512dc434e4117b4ce8a468af3c33057b..7ecc74cbcc7575311884b27916034d8747065a12 100644 GIT binary patch delta 4166 zcmV-M5V`O7A@?B%ABzYGit7lG2ikvs4j(xjJNsB(3caS4rEgyfA=UnKdUm9vNKY95 z7a&o~)PzG@Nw1i6BKP5FI?~>ndY_4^R%D@3m%>rnYoAX@SRM3j5Kx?4Nmz1iDxvo2 zt!M(L&{=LQI_sTKtE?6|Ei944W}TQ^sHeuOpaJ8cCI(!#Pnuel&Av{tL4SX3g!WR= zSSpXMu#3`?D~&2Rz;;Hgdod(W_&R$|%tpdlQ%C3Ps9+)MrO<`+X^5F$duqK|dj z3e^ZtiCL|JFo^{$G-T_Y21b8-+WAH(1_P=kX24kpA4t6mUNpWQb}xA#Khmq~Sub;i znsFV+nBKZ{5T5VD1GReWJU;P5&sJ0{0_&0rRNOGk`tbo6PikTJLn~Llr;C?v4hB84 zzT{>=i$1A!fNWey1)&-FDJmBg9cf3^QX$)_k@K<=*kYVYE&IafC5wL{f1;h!+LCiT zMEf=qw6H?El?jmm3ved5jq-9LmZW&-aKv2*yC%;;i#afI8(=&rU#zHW4usgDK;DI^ zF=EIc2NXKUEE;k3#1?_yB^nqfUsun(-$G2{>hY5N# z@_SzBeEK~ucwq7#7v6v8AAkeuO$*ILeNVhR^dUDoyyuhlBZ)VO+o)Jc#sd1l8nN3? z<3gJ_lO$-wiQ%UPe9HARE2vuC`|S{jW_+eIctW2JvyC$*(Q;>uUA(t90J zt*ubNh=*YnuKYhlzS(HGf;C=SVWrSHrK75rR^~zt-j1}`R0Y>y7cg<(FOPcYyx-!9fip72AuJ3v6!JF%Jmvu zC+{nYd*+N_dx<871sE{71K*i}`49^%FXz*h%!E zv#?E7_v+dj*r|sT>x=Ldw9~wTb9PnJ$V+`LQO8mdBiw&ERmO7y&i2}u^;7ByaS_is(rh#0=a1I9roBtDk`B_!L~wXL43Eb$<|6RQR2yH z$c2-S4ou8C&ik`qoyr{*JV>`JFlTY>yBi+Es_VTm?~i%W@-YP+a_o1;0yi7=cyKOy zx3{A?4_1FJ0qu*}QmhLBg>A=tZ}@Wmhz!`8ra5^lB!@PV2k54%I5aWnXdg0g9hiwV z-ATPsv`5BVw!JIT;c&`;a>N7kE?_1-E}$>SpzP|B-ci>z>eAJBqX_vC!5sPbaLHN65dE$+`|YQoDuUXZd;U&$5V9Mb^-!V64u_Cz(jI)9L}!4`5!tNIi(z;`&vT`R#rJw;c<{zh0-LliRGbzmc<8}pb-$wx)$i1G1B#CNZ^YzJtkD;-7 zRU2|sp_F+AA4Ruhx~FjuFl^L2Q<-%zOwxl7p?`=Rck`JeWXJ?32HnO_dn0tf89L!$ z01lX;OHkz2pu5Em65ltroGc7vOA5X%!v*V{@(^)=gGvr#NirErlFe9>jK+VG650fER_Q`6~C?y~J_^{nt zoQkyPKzl117Z-AKU~PZxMAtqH7B}7?pM#7@b{JxRK z&CDKAAsZ`HqA7Ua$qfG5pI~FJxNSQt=hFKk*jU(QMgIbNPLpR=z?ocEMuOH5nA*!K z?M<^=E3gsco+XV&XB?73t!+6mGPu-Ju>&Wpp!vG{M;4iJyqJGaLK3)>73yyA9p}`< zOr(Ln-j4!YAs@+kvS!OcnDc2C^s+`q%x;Iscq;y$mF#gaZOa%)-F6}LyQ4Q*vgbUQJDtT;jq0r6O z1u+xYfoSq_&jEiFzQxi7?x=QB#h(9;v9oXry3^By>1x6;0#e7m*E z#Bd!p)nGvSYZD?asW^kOiG6;_d`N(5No$lDrLZadUt4OW1L@OVFKgB@f_ZsYSO# z!OWW9DEEd1`efMSfH@VrkOK1=Z&RrQG9TVkuN@Y&PnzzGhb}362W*RAfE6R+SqRbw zO2!4GOmMf-H158t8_xrIZkCPnd`v`Eix68fbaDKmwLt6{X>zx}CpVYZQS(p^vlJ>tUyVgTOZ%{)M+o)KbFrBx*Arak% zVlSbAX*6s+ePFyJvEAQE@8i5UG-xp9W^GYxI}d-98G+uoaUs8JYkFYI$q$5$Hv?lW zGUIW-THO7TI?mSqU;}QE@kTG`S9tXDRX`6F=whr6@&4C2GTuXDy>ZeZGft%5AoERo zLufo~myB)%rxF{_6okz8_^jo(1{!fjVzV&u5uDOGqtT;R>{7#y@vy%He!6l11&<*P zlstcA32x4hl;BqVfXTyh9$-0Grff8Y#w+CtI_1wc^a{;~x*)s515fu4cV!Rvk>N>0 z%+rlp^NYidY|Q%{?5>9Rm2(c!}4I>2?`96I(5Ri*-zX;}R=|v|R_**&I^1hf$-KL( z@*R3aotQ`cY>^#baNymQqH%454o1|+1zwz(PxarNpnrX0Ma#)ja1v}>T+Pf7FYqmK z@(h2(`L%5WF_gJSrYr*AjczK`;`fe{cJT&IQj6eN zleIhI_yWz9sA_mh z4w4LvTgt;zc+zN_TvH`CNieDKwBey@)J=g8D)q*^x{XW<8QY@f1dptZt>AwlO#>d* zx0JOt_$nGZhE_r+m9bXBcAGv2M&5+)l9Pcj&B}9#o?eY9mW|yf;gyxpysZJ%$L^p> zy!lNoiLoDeG~I%`*wu{@bZyD|eOflbhDC8N_nsUB;Mz*<6EBtuIwY`d*PobY8y$C; zMeY4_L0hBlo3JFEmzA)a;|_m1f4;fO54VhSf!XKljJa*H*Yfh>e%tZ^50iEw*;Z}s zkH>rd5_I2jBK9RE!urIlk|wr5K54T9hVD|NWF_K#rh~DFb~=d-603@I5!Ywz zou?{7p)@s)^->$Rb^JyX_^x8Cn5qD2}Ybbilu$1O62q@F#S@zo7&E3=W8H zr_rt8kKk_PNANaa`p1tyLi3^fkiUTIAG(YCE4cj7J@8N9@|$ QIr3lr2kPAIpR;EH0Gq=i{{R30 delta 4166 zcmV-M5V`O7A@?B%ABzYG{__Bl2ikvc!bc9r&OVlxLa%9M>D!k=NVWf*o*n5Z(i6u2 z1xVB~HQ~@!(kmvN$bC4Pj1QaJ%5|&(>N~nE$ zE1JM5be0>7&Uz=*Dyv0K3rpm%StlkJ>Z$Q6XuvqAi2;}GlcrW>v#(QZ&|iNWp}kZz zmdc|m?4q>fN}~!6u$>X>UJS_-zRsQ#vypJt)Y17mDp<&RDRd!y+829r&x1ZR zLN&ruVpgjlOkx2G4cU69fzf}ScD@mc!GLOs8E_WD2U72X7mcrn-Af+GkM!z#*2`R> zW?aWHrnfE~gy;M4K&>7-k5By2vlSJKz`CRY6*mmCetZDNlUmsQ(8`tX>EflEgF%n1 zFS!}eqEBiaAR8A_L1;#PipoVrN7_-fRLHh!{9B~)IuE}%IVh)Vl1{e>@7c1(T10i-OkauBf z3|BL6NmSUF!+y4yk9t#ZO41yJ`7ot0(yofWJ#9`TVO+u8=bQ-4H5R%nx;mFoXMJSZxo*>Ev@7ZU6ymw0$wXKQj~iRjEj8@ zu9jJ|VxX4Y?Agg|GVuK7a@J>NP0^t1>>2N~mPTXKc9BWZSgC*4No}c+xboGj^j?Ql zYbz8m;$c{YEB_CXZ#G)4V2#&SSSfT)>8PrumAO!Zw_|LWLrjoM2RqVEr9wSvV}Laa z`5}cbw+@`QFLd|^PboKq&NwJ#zIh?JEUwX?`(s|Td`v-y9Q&QIz|BTI9-NEb z?d@pJgOz_vK>H%L6zf7jVcRj^8@}8>A_KOjX-?h>$)Szp0lKLw4owU?+J_8W2WDbT zcT#T@?U6B;ZSRV7IGi$|9Pz-s3z$id3+M|nD7(6(chq%_x^(p&XNX>)QlJ;3` z{LIRnhj^dzys3rG_bh1Vjh>h_3)JU_fe~s{)yjXM$~MMT>7yC6UkM)zwzOej8OsAT z^TJE#We&blfR2p}+U%t-)@uft!8>SUf}lB37oufm1KfX_jQGGZ)8PXz7dtU}>2X1w zP^x&&u;ntnXT-dU+ZLtc@f6**oj}2T+<}veJei2|m5o4O znTYi6iHk@hDY{5W_{LH~J8$E5fkvmj^_mT9$jY8pFUA&*4f4#Wy*8B;YP9pD3*w_0 z^Zpomzso;BUX1UDV?MNPix~I;=-R$OoicyRVB7S)j@>R{dqDi2yX$Q9(1+cG<@d;6 z51ltoyEcM?hlC*rwKpBZ`{)xXx{p49&e@1`&P1e7|6G8B$~cJ^J8ce1TJBv*+o$j$ zXfOPG5SoiVcctTel)2;O#EL)aV%?jtN%)hb(%xj+Lmz)} z7r^bN!yzP_v4~!VE$&lMOq;lS@x5Ld9=tJ>z$Wbr6{kfC9(r(D9WbZb zSPxEYO41Lat_3=0jC4KeU|-Qm^{RiJEuD&RU)>%RAwCvll0(2=pQ1--F)T<88X3%LAUYK-UuCVhE6yb zfCFae5)`>L=x(ut#P^LYCkq4Fl7er`aKSpKJVYGeppwH_l1#>uWHXi|qp^P^xs0W_ zSb~F^GMTJ}4g6_%4t{5DR9hm$;DTrP z`FSQ)X&UmLusOft2O8bxWK^K%7bi|WB4v?f9ATM+MMqo?Hoq-m=vI|a+d~X+FK-gF z!Da^PIFO#&3a}CGTZt1JY6ME-YP1DX?)%IbhHz>p9-Ise3;w$-d@{OhZs>y zmele2L@Y_MsyI16g&EnW?im~8uHv4Zvb?#QPT>4vkbieJOq`N*d72H!3uL^_Uct8f zZ|j!N*39;Wj@xO2*Wmv#HFzohd+1wBRP32SXHA}W-Hz^r+A>)3_rHIh0J96w+39#N zJ09b93I5;|&@V#ti+L1t`lRWr3lH!@e}^3Redfg6g;YUFSU?|ws5jO@DNsA;UnD$6 zdm0F<3_tYKM9)a)rr@9+Gi#P&DOI~Hlbyg%HX4{$i`a*88P^%7eX`m#O3BAQK5VxZ zry}h+(B6v1#f97)SX+NP(X|hQ#f^7|md3jD;qejO-(A3h$8B${6Y+hYk+7&Uzi*^* zGqXoj$i@nlXbRqUGK0VNC)n64Zrjevx%9pWHWqeS(Z7J6)8v^Ia3c8Hc1$Yg8LC8Ha`sl>)J1tIf2K5O}{fkvE>*epzZ1gEslX!Ph6yVS5_JnS!lpROE0!DEO6 zB@cgDf}8UrCAd{TVDhk>2Urf4DH~0p@k+UZPWiJ9y+ZS$F39fiz|;N1UD?BZWO&jL z`GTWb5>+}7d5KN9c$Gdc2c_^?cK{vMlby6x_3*SZpeDkuRvaj;*yCg7Zi^48! zXl5O2*&IHn+o+J9>!c7qSbom{_?v=;bY*|DS}AO|OUuK zT>&55Hmf=sGStea;2S6%?ZU5$)P5ZP`tYz>=pwv>-e*0AusoPIIe34| z;7f3RqUIvv>~_IIuz{0Z#RXe|x4)jBoCF&eS2Huj3w%r5 zc&Sdc1IUP(v7U+-sSERp%()1eJOh7mer?-83}x<-DT}~&qniq~_`Rc~UA%#l)FL?6 zWNlHmX6ay+Stk|w(iBN9lyb zgCqmvmh$ivo;2Df*Hp<(5=<&QZFr~}byMJjO1&|!ZX=UI#gpO#IqVNu-6y(h;2xVBRJ#EYeZ4hd}A^(W@pM#mjy zQG5Sf(AKE?CM-$kWhLz9xPyPrpKq@6!!6@nVD|YsV{V)5wYUdi8ikX>=5}Dq=4abqkXWw zNlkHmc8n@Ohun#{pk=dOck-@fP2dVGtEEseIWLm7${t&nza5sDCiZ`1){-W`TQ5RL zu?Hr16cstA1l@O>h0m6Polatd#Hu1)#Pu0_ z=cx+eXgs~^|EROM`HwoAPk3ju65sHkWNg({GlZ!;Z%Q-)ZMlV=(w-tJ~)`*(&LA~S=Z#-usJAgtP2V0K+I&}efqyW zhl8a^3M*IPkv!!L&!Rfz=e)0m1mSmK^0<&Hi8ZngmlZgs*P@in>xZ_p)pgD$~O=o0)99q=#cfPX~?{0SZKZ|HzOg9D=5 zX>=?2Be)y+5j-}S{_*3F(0u4V