From bbdb2dbc939d7a1840a4c2685924b824d0575650 Mon Sep 17 00:00:00 2001 From: Matthew Taylor Date: Fri, 14 Apr 2017 10:54:40 -0700 Subject: [PATCH 1/2] Nuked 10field_many_balanced --- .../10field_many_balanced/UNDER_DEVELOPMENT | 423 ----------------- .../spatial/10field_many_balanced/data.csv | 3 - .../10field_many_balanced/description.py | 426 ------------------ 3 files changed, 852 deletions(-) delete mode 100644 examples/opf/experiments/anomaly/spatial/10field_many_balanced/UNDER_DEVELOPMENT delete mode 100644 examples/opf/experiments/anomaly/spatial/10field_many_balanced/data.csv delete mode 100644 examples/opf/experiments/anomaly/spatial/10field_many_balanced/description.py diff --git a/examples/opf/experiments/anomaly/spatial/10field_many_balanced/UNDER_DEVELOPMENT b/examples/opf/experiments/anomaly/spatial/10field_many_balanced/UNDER_DEVELOPMENT deleted file mode 100644 index 95a2f9c2f1..0000000000 --- a/examples/opf/experiments/anomaly/spatial/10field_many_balanced/UNDER_DEVELOPMENT +++ /dev/null @@ -1,423 +0,0 @@ -# ---------------------------------------------------------------------- -# Numenta Platform for Intelligent Computing (NuPIC) -# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement -# with Numenta, Inc., for a separate license for this software code, the -# following terms and conditions apply: -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU Affero Public License version 3 as -# published by the Free Software Foundation. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -# See the GNU Affero Public License for more details. -# -# You should have received a copy of the GNU Affero Public License -# along with this program. If not, see http://www.gnu.org/licenses. -# -# http://numenta.org/licenses/ -# ---------------------------------------------------------------------- - -""" -Template file used by the OPF Experiment Generator to generate the actual -description.py file by replacing $XXXXXXXX tokens with desired values. - -This description.py file was generated by: -'~/nta/eng/lib/python2.6/site-packages/nupic/frameworks/opf/expGenerator/ExpGenerator.py' -""" - -from nupic.frameworks.opf.expdescriptionapi import ExperimentDescriptionAPI - -from nupic.frameworks.opf.expdescriptionhelpers import ( - updateConfigFromSubConfig, - applyValueGettersToContainer, - DeferredDictLookup) - -from nupic.frameworks.opf.htmpredictionmodelcallbacks import * -from nupic.frameworks.opf.metrics import MetricSpec -from nupic.frameworks.opf.opfutils import (InferenceType, - InferenceElement) -from nupic.support import aggregationDivide - -from nupic.frameworks.opf.opftaskdriver import ( - IterationPhaseSpecLearnOnly, - IterationPhaseSpecInferOnly, - IterationPhaseSpecLearnAndInfer) - - -# Model Configuration Dictionary: -# -# Define the model parameters and adjust for any modifications if imported -# from a sub-experiment. -# -# These fields might be modified by a sub-experiment; this dict is passed -# between the sub-experiment and base experiment -# -# -# NOTE: Use of DEFERRED VALUE-GETTERs: dictionary fields and list elements -# within the config dictionary may be assigned futures derived from the -# ValueGetterBase class, such as DeferredDictLookup. -# This facility is particularly handy for enabling substitution of values in -# the config dictionary from other values in the config dictionary, which is -# needed by permutation.py-based experiments. These values will be resolved -# during the call to applyValueGettersToContainer(), -# which we call after the base experiment's config dictionary is updated from -# the sub-experiment. See ValueGetterBase and -# DeferredDictLookup for more details about value-getters. -# -# For each custom encoder parameter to be exposed to the sub-experiment/ -# permutation overrides, define a variable in this section, using key names -# beginning with a single underscore character to avoid collisions with -# pre-defined keys (e.g., _dsEncoderFieldName2_N). -# -# Example: -# config = dict( -# _dsEncoderFieldName2_N = 70, -# _dsEncoderFieldName2_W = 5, -# dsEncoderSchema = [ -# base=dict( -# fieldname='Name2', type='ScalarEncoder', -# name='Name2', minval=0, maxval=270, clipInput=True, -# n=DeferredDictLookup('_dsEncoderFieldName2_N'), -# w=DeferredDictLookup('_dsEncoderFieldName2_W')), -# ], -# ) -# updateConfigFromSubConfig(config) -# applyValueGettersToContainer(config) -config = { - # Type of model that the rest of these parameters apply to. - 'model': "HTMPrediction", - - # Version that specifies the format of the config. - 'version': 1, - - # Intermediate variables used to compute fields in modelParams and also - # referenced from the control section. - 'aggregationInfo': { 'fields': [ ('numericFieldNameA', 'mean'), - ('numericFieldNameB', 'sum'), - ('categoryFieldNameC', 'first')], - 'hours': 0}, - - 'predictAheadTime': None, - - # Model parameter dictionary. - 'modelParams': { - # The type of inference that this model will perform - 'inferenceType': 'NontemporalAnomaly', - - 'sensorParams': { - # Sensor diagnostic output verbosity control; - # if > 0: sensor region will print out on screen what it's sensing - # at each step 0: silent; >=1: some info; >=2: more info; - # >=3: even more info (see compute() in py/regions/RecordSensor.py) - 'verbosity' : 0, - - # Example: - # dsEncoderSchema = [ - # DeferredDictLookup('__field_name_encoder'), - # ], - # - # (value generated from DS_ENCODER_SCHEMA) - 'encoders': { - 'f0': dict(fieldname='f0', n=100, name='f0', type='SDRCategoryEncoder', w=21), - 'f1': dict(fieldname='f1', n=100, name='f1', type='SDRCategoryEncoder', w=21), - 'f2': dict(fieldname='f2', n=100, name='f2', type='SDRCategoryEncoder', w=21), - 'f3': dict(fieldname='f3', n=100, name='f3', type='SDRCategoryEncoder', w=21), - 'f4': dict(fieldname='f4', n=100, name='f4', type='SDRCategoryEncoder', w=21), - 'f5': dict(fieldname='f5', n=100, name='f5', type='SDRCategoryEncoder', w=21), - 'f6': dict(fieldname='f6', n=100, name='f6', type='SDRCategoryEncoder', w=21), - 'f7': dict(fieldname='f7', n=100, name='f7', type='SDRCategoryEncoder', w=21), - 'f8': dict(fieldname='f8', n=100, name='f8', type='SDRCategoryEncoder', w=21), - 'f9': dict(fieldname='f9', n=100, name='f9', type='SDRCategoryEncoder', w=21), - }, - - # A dictionary specifying the period for automatically-generated - # resets from a RecordSensor; - # - # None = disable automatically-generated resets (also disabled if - # all of the specified values evaluate to 0). - # Valid keys is the desired combination of the following: - # days, hours, minutes, seconds, milliseconds, microseconds, weeks - # - # Example for 1.5 days: sensorAutoReset = dict(days=1,hours=12), - # - # (value generated from SENSOR_AUTO_RESET) - 'sensorAutoReset' : None, - }, - - 'spEnable': True, - - 'spParams': { - # SP diagnostic output verbosity control; - # 0: silent; >=1: some info; >=2: more info; - 'spVerbosity' : 0, - - 'globalInhibition': 1, - - # Number of cell columns in the cortical region (same number for - # SP and TP) - # (see also tpNCellsPerCol) - 'columnCount': 2048, - - 'inputWidth': 0, - - # SP inhibition control (absolute value); - # Maximum number of active columns in the SP region's output (when - # there are more, the weaker ones are suppressed) - 'numActiveColumnsPerInhArea': 40, - - 'seed': 1956, - - # potentialPct - # What percent of the columns's receptive field is available - # for potential synapses. At initialization time, we will - # choose potentialPct * (2*potentialRadius+1)^2 - 'potentialPct': 0.5, - - # The default connected threshold. Any synapse whose - # permanence value is above the connected threshold is - # a "connected synapse", meaning it can contribute to the - # cell's firing. Typical value is 0.10. Cells whose activity - # level before inhibition falls below minDutyCycleBeforeInh - # will have their own internal synPermConnectedCell - # threshold set below this default value. - # (This concept applies to both SP and TP and so 'cells' - # is correct here as opposed to 'columns') - 'synPermConnected': 0.1, - - 'synPermActiveInc': 0.1, - - 'synPermInactiveDec': 0.01, - }, - - # Controls whether TP is enabled or disabled; - # TP is necessary for making temporal predictions, such as predicting - # the next inputs. Without TP, the model is only capable of - # reconstructing missing sensor inputs (via SP). - 'tmEnable' : True, - - 'tmParams': { - # TP diagnostic output verbosity control; - # 0: silent; [1..6]: increasing levels of verbosity - # (see verbosity in nta/trunk/py/nupic/research/TP.py and TP10X*.py) - 'verbosity': 0, - - # Number of cell columns in the cortical region (same number for - # SP and TP) - # (see also tpNCellsPerCol) - 'columnCount': 2048, - - # The number of cells (i.e., states), allocated per column. - 'cellsPerColumn': 32, - - 'inputWidth': 2048, - - 'seed': 1960, - - # Temporal Pooler implementation selector (see _getTPClass in - # CLARegion.py). - 'temporalImp': 'cpp', - - # New Synapse formation count - # NOTE: If None, use spNumActivePerInhArea - # - # TODO: need better explanation - 'newSynapseCount': 20, - - # Maximum number of synapses per segment - # > 0 for fixed-size CLA - # -1 for non-fixed-size CLA - # - # TODO: for Ron: once the appropriate value is placed in TP - # constructor, see if we should eliminate this parameter from - # description.py. - 'maxSynapsesPerSegment': 32, - - # Maximum number of segments per cell - # > 0 for fixed-size CLA - # -1 for non-fixed-size CLA - # - # TODO: for Ron: once the appropriate value is placed in TP - # constructor, see if we should eliminate this parameter from - # description.py. - 'maxSegmentsPerCell': 128, - - # Initial Permanence - # TODO: need better explanation - 'initialPerm': 0.21, - - # Permanence Increment - 'permanenceInc': 0.1, - - # Permanence Decrement - # If set to None, will automatically default to tpPermanenceInc - # value. - 'permanenceDec' : 0.1, - - 'globalDecay': 0.0, - - 'maxAge': 0, - - # Minimum number of active synapses for a segment to be considered - # during search for the best-matching segments. - # None=use default - # Replaces: tpMinThreshold - 'minThreshold': 12, - - # Segment activation threshold. - # A segment is active if it has >= tpSegmentActivationThreshold - # connected synapses that are active due to infActiveState - # None=use default - # Replaces: tpActivationThreshold - 'activationThreshold': 16, - - 'outputType': 'normal', - - # "Pay Attention Mode" length. This tells the TP how many new - # elements to append to the end of a learned sequence at a time. - # Smaller values are better for datasets with short sequences, - # higher values are better for datasets with long sequences. - 'pamLength': 1, - }, - - 'clParams': { - 'regionName' : 'SDRClassifierRegion', - - # Classifier diagnostic output verbosity control; - # 0: silent; [1..6]: increasing levels of verbosity - 'verbosity' : 0, - - # This controls how fast the classifier learns/forgets. Higher values - # make it adapt faster and forget older patterns faster. - 'alpha': 0.001, - - # This is set after the call to updateConfigFromSubConfig and is - # computed from the aggregationInfo and predictAheadTime. - 'steps': '1', - - - }, - - 'trainSPNetOnlyIfRequested': False, - }, - - -} -# end of config dictionary - - -# Adjust base config dictionary for any modifications if imported from a -# sub-experiment -updateConfigFromSubConfig(config) - - -# Compute predictionSteps based on the predictAheadTime and the aggregation -# period, which may be permuted over. -if config['predictAheadTime'] is not None: - predictionSteps = int(round(aggregationDivide( - config['predictAheadTime'], config['aggregationInfo']))) - assert (predictionSteps >= 1) - config['modelParams']['clParams']['steps'] = str(predictionSteps) - - -# Adjust config by applying ValueGetterBase-derived -# futures. NOTE: this MUST be called after updateConfigFromSubConfig() in order -# to support value-getter-based substitutions from the sub-experiment (if any) -applyValueGettersToContainer(config) - - - -# [optional] A sequence of one or more tasks that describe what to do with the -# model. Each task consists of a task label, an input spec., iteration count, -# and a task-control spec per opfTaskSchema.json -# -# NOTE: The tasks are intended for OPF clients that make use of OPFTaskDriver. -# Clients that interact with OPFExperiment directly do not make use of -# the tasks specification. -# -control = dict( - environment='opfExperiment', - -tasks = [ - { - # Task label; this label string may be used for diagnostic logging and for - # constructing filenames or directory pathnames for task-specific files, etc. - 'taskLabel' : "Anomaly", - - # Input stream specification per py/nupic/cluster/database/StreamDef.json. - # - 'dataset' : { - 'info': 'test_NoProviders', - 'version': 1, - - 'streams': [ - { - 'columns': ['*'], - 'info': 'my simple dataset', - 'source': 'file://'+os.path.join(os.path.dirname(__file__), 'data.csv'), - } - ], - - # TODO: Aggregation is not supported yet by run_opf_experiment.py - #'aggregation' : config['aggregationInfo'] - }, - - # Iteration count: maximum number of iterations. Each iteration corresponds - # to one record from the (possibly aggregated) dataset. The task is - # terminated when either number of iterations reaches iterationCount or - # all records in the (possibly aggregated) database have been processed, - # whichever occurs first. - # - # iterationCount of -1 = iterate over the entire dataset - 'iterationCount' : -1, - - - # Task Control parameters for OPFTaskDriver (per opfTaskControlSchema.json) - 'taskControl' : { - - # Iteration cycle list consisting of opftaskdriver.IterationPhaseSpecXXXXX - # instances. - 'iterationCycle' : [ - #IterationPhaseSpecLearnOnly(1000), - IterationPhaseSpecLearnAndInfer(1000, inferenceArgs=None), - #IterationPhaseSpecInferOnly(10, inferenceArgs=None), - ], - - 'metrics' : [ - ], - - # Logged Metrics: A sequence of regular expressions that specify which of - # the metrics from the Inference Specifications section MUST be logged for - # every prediction. The regex's correspond to the automatically generated - # metric labels. This is similar to the way the optimization metric is - # specified in permutations.py. - 'loggedMetrics': ['.*nupicScore.*'], - - - # Callbacks for experimentation/research (optional) - 'callbacks' : { - # Callbacks to be called at the beginning of a task, before model iterations. - # Signature: callback(); returns nothing -# 'setup' : [htmPredictionModelControlEnableSPLearningCb, htmPredictionModelControlEnableTPLearningCb], -# 'setup' : [htmPredictionModelControlDisableTPLearningCb], - 'setup' : [], - - # Callbacks to be called after every learning/inference iteration - # Signature: callback(); returns nothing - 'postIter' : [], - - # Callbacks to be called when the experiment task is finished - # Signature: callback(); returns nothing - 'finish' : [] - } - } # End of taskControl - }, # End of task -] - -) - - - -descriptionInterface = ExperimentDescriptionAPI(modelConfig=config, - control=control) diff --git a/examples/opf/experiments/anomaly/spatial/10field_many_balanced/data.csv b/examples/opf/experiments/anomaly/spatial/10field_many_balanced/data.csv deleted file mode 100644 index bb3074cba2..0000000000 --- a/examples/opf/experiments/anomaly/spatial/10field_many_balanced/data.csv +++ /dev/null @@ -1,3 +0,0 @@ -f0, f1 -string, string -, \ No newline at end of file diff --git a/examples/opf/experiments/anomaly/spatial/10field_many_balanced/description.py b/examples/opf/experiments/anomaly/spatial/10field_many_balanced/description.py deleted file mode 100644 index 9ce50fc1be..0000000000 --- a/examples/opf/experiments/anomaly/spatial/10field_many_balanced/description.py +++ /dev/null @@ -1,426 +0,0 @@ -# ---------------------------------------------------------------------- -# Numenta Platform for Intelligent Computing (NuPIC) -# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement -# with Numenta, Inc., for a separate license for this software code, the -# following terms and conditions apply: -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU Affero Public License version 3 as -# published by the Free Software Foundation. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -# See the GNU Affero Public License for more details. -# -# You should have received a copy of the GNU Affero Public License -# along with this program. If not, see http://www.gnu.org/licenses. -# -# http://numenta.org/licenses/ -# ---------------------------------------------------------------------- - -""" -Template file used by the OPF Experiment Generator to generate the actual -description.py file by replacing $XXXXXXXX tokens with desired values. - -This description.py file was generated by: -'~/nupic/eng/lib/python2.6/site-packages/nupic/frameworks/opf/expGenerator/ExpGenerator.py' -""" - -from nupic.frameworks.opf.expdescriptionapi import ExperimentDescriptionAPI - -from nupic.frameworks.opf.expdescriptionhelpers import ( - updateConfigFromSubConfig, - applyValueGettersToContainer, - DeferredDictLookup) - -from nupic.frameworks.opf.htmpredictionmodelcallbacks import * -from nupic.frameworks.opf.metrics import MetricSpec -from nupic.frameworks.opf.opfutils import (InferenceType, - InferenceElement) -from nupic.support import aggregationDivide - -from nupic.frameworks.opf.opftaskdriver import ( - IterationPhaseSpecLearnOnly, - IterationPhaseSpecInferOnly, - IterationPhaseSpecLearnAndInfer) - - -# Model Configuration Dictionary: -# -# Define the model parameters and adjust for any modifications if imported -# from a sub-experiment. -# -# These fields might be modified by a sub-experiment; this dict is passed -# between the sub-experiment and base experiment -# -# -# NOTE: Use of DEFERRED VALUE-GETTERs: dictionary fields and list elements -# within the config dictionary may be assigned futures derived from the -# ValueGetterBase class, such as DeferredDictLookup. -# This facility is particularly handy for enabling substitution of values in -# the config dictionary from other values in the config dictionary, which is -# needed by permutation.py-based experiments. These values will be resolved -# during the call to applyValueGettersToContainer(), -# which we call after the base experiment's config dictionary is updated from -# the sub-experiment. See ValueGetterBase and -# DeferredDictLookup for more details about value-getters. -# -# For each custom encoder parameter to be exposed to the sub-experiment/ -# permutation overrides, define a variable in this section, using key names -# beginning with a single underscore character to avoid collisions with -# pre-defined keys (e.g., _dsEncoderFieldName2_N). -# -# Example: -# config = dict( -# _dsEncoderFieldName2_N = 70, -# _dsEncoderFieldName2_W = 5, -# dsEncoderSchema = [ -# base=dict( -# fieldname='Name2', type='ScalarEncoder', -# name='Name2', minval=0, maxval=270, clipInput=True, -# n=DeferredDictLookup('_dsEncoderFieldName2_N'), -# w=DeferredDictLookup('_dsEncoderFieldName2_W')), -# ], -# ) -# updateConfigFromSubConfig(config) -# applyValueGettersToContainer(config) -config = { - # Type of model that the rest of these parameters apply to. - 'model': "HTMPrediction", - - # Version that specifies the format of the config. - 'version': 1, - - # Intermediate variables used to compute fields in modelParams and also - # referenced from the control section. - 'aggregationInfo': { 'fields': [ ('numericFieldNameA', 'mean'), - ('numericFieldNameB', 'sum'), - ('categoryFieldNameC', 'first')], - 'hours': 0}, - - 'predictAheadTime': None, - - # Model parameter dictionary. - 'modelParams': { - # The type of inference that this model will perform - 'inferenceType': 'NontemporalAnomaly', - - 'sensorParams': { - # Sensor diagnostic output verbosity control; - # if > 0: sensor region will print out on screen what it's sensing - # at each step 0: silent; >=1: some info; >=2: more info; - # >=3: even more info (see compute() in py/regions/RecordSensor.py) - 'verbosity' : 0, - - # Example: - # dsEncoderSchema = [ - # DeferredDictLookup('__field_name_encoder'), - # ], - # - # (value generated from DS_ENCODER_SCHEMA) - 'encoders': { - 'f0': dict(fieldname='f0', n=100, name='f0', type='SDRCategoryEncoder', w=21), - 'f1': dict(fieldname='f1', n=100, name='f1', type='SDRCategoryEncoder', w=21), - 'f2': dict(fieldname='f2', n=100, name='f2', type='SDRCategoryEncoder', w=21), - 'f3': dict(fieldname='f3', n=100, name='f3', type='SDRCategoryEncoder', w=21), - 'f4': dict(fieldname='f4', n=100, name='f4', type='SDRCategoryEncoder', w=21), - 'f5': dict(fieldname='f5', n=100, name='f5', type='SDRCategoryEncoder', w=21), - 'f6': dict(fieldname='f6', n=100, name='f6', type='SDRCategoryEncoder', w=21), - 'f7': dict(fieldname='f7', n=100, name='f7', type='SDRCategoryEncoder', w=21), - 'f8': dict(fieldname='f8', n=100, name='f8', type='SDRCategoryEncoder', w=21), - 'f9': dict(fieldname='f9', n=100, name='f9', type='SDRCategoryEncoder', w=21), - }, - - # A dictionary specifying the period for automatically-generated - # resets from a RecordSensor; - # - # None = disable automatically-generated resets (also disabled if - # all of the specified values evaluate to 0). - # Valid keys is the desired combination of the following: - # days, hours, minutes, seconds, milliseconds, microseconds, weeks - # - # Example for 1.5 days: sensorAutoReset = dict(days=1,hours=12), - # - # (value generated from SENSOR_AUTO_RESET) - 'sensorAutoReset' : None, - }, - - 'spEnable': True, - - 'spParams': { - # SP diagnostic output verbosity control; - # 0: silent; >=1: some info; >=2: more info; - 'spVerbosity' : 0, - - 'globalInhibition': 1, - - # Number of cell columns in the cortical region (same number for - # SP and TP) - # (see also tpNCellsPerCol) - 'columnCount': 2048, - - 'inputWidth': 0, - - # SP inhibition control (absolute value); - # Maximum number of active columns in the SP region's output (when - # there are more, the weaker ones are suppressed) - 'numActiveColumnsPerInhArea': 40, - - 'seed': 1956, - - # potentialPct - # What percent of the columns's receptive field is available - # for potential synapses. At initialization time, we will - # choose potentialPct * (2*potentialRadius+1)^2 - 'potentialPct': 0.5, - - # The default connected threshold. Any synapse whose - # permanence value is above the connected threshold is - # a "connected synapse", meaning it can contribute to the - # cell's firing. Typical value is 0.10. Cells whose activity - # level before inhibition falls below minDutyCycleBeforeInh - # will have their own internal synPermConnectedCell - # threshold set below this default value. - # (This concept applies to both SP and TP and so 'cells' - # is correct here as opposed to 'columns') - 'synPermConnected': 0.1, - - 'synPermActiveInc': 0.1, - - 'synPermInactiveDec': 0.01, - }, - - # Controls whether TP is enabled or disabled; - # TP is necessary for making temporal predictions, such as predicting - # the next inputs. Without TP, the model is only capable of - # reconstructing missing sensor inputs (via SP). - 'tmEnable' : True, - - 'tmParams': { - # TP diagnostic output verbosity control; - # 0: silent; [1..6]: increasing levels of verbosity - # (see verbosity in nupic/trunk/py/nupic/research/TP.py and TP10X*.py) - 'verbosity': 0, - - # Number of cell columns in the cortical region (same number for - # SP and TP) - # (see also tpNCellsPerCol) - 'columnCount': 2048, - - # The number of cells (i.e., states), allocated per column. - 'cellsPerColumn': 32, - - 'inputWidth': 2048, - - 'seed': 1960, - - # Temporal Pooler implementation selector (see _getTPClass in - # CLARegion.py). - 'temporalImp': 'cpp', - - # New Synapse formation count - # NOTE: If None, use spNumActivePerInhArea - # - # TODO: need better explanation - 'newSynapseCount': 20, - - # Maximum number of synapses per segment - # > 0 for fixed-size CLA - # -1 for non-fixed-size CLA - # - # TODO: for Ron: once the appropriate value is placed in TP - # constructor, see if we should eliminate this parameter from - # description.py. - 'maxSynapsesPerSegment': 32, - - # Maximum number of segments per cell - # > 0 for fixed-size CLA - # -1 for non-fixed-size CLA - # - # TODO: for Ron: once the appropriate value is placed in TP - # constructor, see if we should eliminate this parameter from - # description.py. - 'maxSegmentsPerCell': 128, - - # Initial Permanence - # TODO: need better explanation - 'initialPerm': 0.21, - - # Permanence Increment - 'permanenceInc': 0.1, - - # Permanence Decrement - # If set to None, will automatically default to tpPermanenceInc - # value. - 'permanenceDec' : 0.1, - - 'globalDecay': 0.0, - - 'maxAge': 0, - - # Minimum number of active synapses for a segment to be considered - # during search for the best-matching segments. - # None=use default - # Replaces: tpMinThreshold - 'minThreshold': 12, - - # Segment activation threshold. - # A segment is active if it has >= tpSegmentActivationThreshold - # connected synapses that are active due to infActiveState - # None=use default - # Replaces: tpActivationThreshold - 'activationThreshold': 16, - - 'outputType': 'normal', - - # "Pay Attention Mode" length. This tells the TP how many new - # elements to append to the end of a learned sequence at a time. - # Smaller values are better for datasets with short sequences, - # higher values are better for datasets with long sequences. - 'pamLength': 1, - }, - - 'clParams': { - # Classifier implementation selection. - 'implementation': 'py', - - 'regionName' : 'SDRClassifierRegion', - - # Classifier diagnostic output verbosity control; - # 0: silent; [1..6]: increasing levels of verbosity - 'verbosity' : 0, - - # This controls how fast the classifier learns/forgets. Higher values - # make it adapt faster and forget older patterns faster. - 'alpha': 0.001, - - # This is set after the call to updateConfigFromSubConfig and is - # computed from the aggregationInfo and predictAheadTime. - 'steps': '1', - - - }, - - 'trainSPNetOnlyIfRequested': False, - }, - - -} -# end of config dictionary - - -# Adjust base config dictionary for any modifications if imported from a -# sub-experiment -updateConfigFromSubConfig(config) - - -# Compute predictionSteps based on the predictAheadTime and the aggregation -# period, which may be permuted over. -if config['predictAheadTime'] is not None: - predictionSteps = int(round(aggregationDivide( - config['predictAheadTime'], config['aggregationInfo']))) - assert (predictionSteps >= 1) - config['modelParams']['clParams']['steps'] = str(predictionSteps) - - -# Adjust config by applying ValueGetterBase-derived -# futures. NOTE: this MUST be called after updateConfigFromSubConfig() in order -# to support value-getter-based substitutions from the sub-experiment (if any) -applyValueGettersToContainer(config) - - - -# [optional] A sequence of one or more tasks that describe what to do with the -# model. Each task consists of a task label, an input spec., iteration count, -# and a task-control spec per opfTaskSchema.json -# -# NOTE: The tasks are intended for OPF clients that make use of OPFTaskDriver. -# Clients that interact with OPFExperiment directly do not make use of -# the tasks specification. -# -control = dict( - environment='opfExperiment', - -tasks = [ - { - # Task label; this label string may be used for diagnostic logging and for - # constructing filenames or directory pathnames for task-specific files, etc. - 'taskLabel' : "Anomaly", - - # Input stream specification per py/nupic/cluster/database/StreamDef.json. - # - 'dataset' : { - 'info': 'test_NoProviders', - 'version': 1, - - 'streams': [ - { - 'columns': ['*'], - 'info': 'my simple dataset', - 'source': 'file://'+os.path.join(os.path.dirname(__file__), 'data.csv'), - } - ], - - # TODO: Aggregation is not supported yet by run_opf_experiment.py - #'aggregation' : config['aggregationInfo'] - }, - - # Iteration count: maximum number of iterations. Each iteration corresponds - # to one record from the (possibly aggregated) dataset. The task is - # terminated when either number of iterations reaches iterationCount or - # all records in the (possibly aggregated) database have been processed, - # whichever occurs first. - # - # iterationCount of -1 = iterate over the entire dataset - 'iterationCount' : -1, - - - # Task Control parameters for OPFTaskDriver (per opfTaskControlSchema.json) - 'taskControl' : { - - # Iteration cycle list consisting of opftaskdriver.IterationPhaseSpecXXXXX - # instances. - 'iterationCycle' : [ - #IterationPhaseSpecLearnOnly(1000), - IterationPhaseSpecLearnAndInfer(1000, inferenceArgs=None), - #IterationPhaseSpecInferOnly(10, inferenceArgs=None), - ], - - 'metrics' : [ - ], - - # Logged Metrics: A sequence of regular expressions that specify which of - # the metrics from the Inference Specifications section MUST be logged for - # every prediction. The regex's correspond to the automatically generated - # metric labels. This is similar to the way the optimization metric is - # specified in permutations.py. - 'loggedMetrics': ['.*nupicScore.*'], - - - # Callbacks for experimentation/research (optional) - 'callbacks' : { - # Callbacks to be called at the beginning of a task, before model iterations. - # Signature: callback(); returns nothing -# 'setup' : [htmPredictionModelControlEnableSPLearningCb, htmPredictionModelControlEnableTPLearningCb], -# 'setup' : [htmPredictionModelControlDisableTPLearningCb], - 'setup' : [], - - # Callbacks to be called after every learning/inference iteration - # Signature: callback(); returns nothing - 'postIter' : [], - - # Callbacks to be called when the experiment task is finished - # Signature: callback(); returns nothing - 'finish' : [] - } - } # End of taskControl - }, # End of task -] - -) - - - -descriptionInterface = ExperimentDescriptionAPI(modelConfig=config, - control=control) From b1288499bebeb7b104c2f27e4806811f1650a157 Mon Sep 17 00:00:00 2001 From: Matthew Taylor Date: Fri, 14 Apr 2017 11:19:47 -0700 Subject: [PATCH 2/2] Deleted several examples: examples/opf/experiments/anomaly/temporal/noisy_saw/UNDER_DEVELOPMENT examples/opf/experiments/anomaly/temporal/noisy_saw/description.py examples/opf/experiments/anomaly/temporal/saw_200_category/UNDER_DEVELOPMENT examples/opf/experiments/anomaly/temporal/saw_200_category/data.csv examples/opf/experiments/anomaly/temporal/saw_200_category/description.py examples/opf/experiments/classification/category_SP_0/UNDER_DEVELOPMENT examples/opf/experiments/classification/category_SP_0/description.py examples/opf/experiments/classification/category_SP_1/UNDER_DEVELOPMENT examples/opf/experiments/classification/category_SP_1/description.py --- .../temporal/noisy_saw/UNDER_DEVELOPMENT | 423 ------------------ .../anomaly/temporal/noisy_saw/description.py | 377 ---------------- .../saw_200_category/UNDER_DEVELOPMENT | 423 ------------------ .../temporal/saw_200_category/data.csv | 1 - .../temporal/saw_200_category/description.py | 417 ----------------- .../category_SP_0/UNDER_DEVELOPMENT | 0 .../category_SP_0/description.py | 40 -- .../category_SP_1/UNDER_DEVELOPMENT | 0 .../category_SP_1/description.py | 40 -- 9 files changed, 1721 deletions(-) delete mode 100644 examples/opf/experiments/anomaly/temporal/noisy_saw/UNDER_DEVELOPMENT delete mode 100644 examples/opf/experiments/anomaly/temporal/noisy_saw/description.py delete mode 100644 examples/opf/experiments/anomaly/temporal/saw_200_category/UNDER_DEVELOPMENT delete mode 100644 examples/opf/experiments/anomaly/temporal/saw_200_category/data.csv delete mode 100644 examples/opf/experiments/anomaly/temporal/saw_200_category/description.py delete mode 100644 examples/opf/experiments/classification/category_SP_0/UNDER_DEVELOPMENT delete mode 100644 examples/opf/experiments/classification/category_SP_0/description.py delete mode 100644 examples/opf/experiments/classification/category_SP_1/UNDER_DEVELOPMENT delete mode 100644 examples/opf/experiments/classification/category_SP_1/description.py diff --git a/examples/opf/experiments/anomaly/temporal/noisy_saw/UNDER_DEVELOPMENT b/examples/opf/experiments/anomaly/temporal/noisy_saw/UNDER_DEVELOPMENT deleted file mode 100644 index 95a2f9c2f1..0000000000 --- a/examples/opf/experiments/anomaly/temporal/noisy_saw/UNDER_DEVELOPMENT +++ /dev/null @@ -1,423 +0,0 @@ -# ---------------------------------------------------------------------- -# Numenta Platform for Intelligent Computing (NuPIC) -# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement -# with Numenta, Inc., for a separate license for this software code, the -# following terms and conditions apply: -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU Affero Public License version 3 as -# published by the Free Software Foundation. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -# See the GNU Affero Public License for more details. -# -# You should have received a copy of the GNU Affero Public License -# along with this program. If not, see http://www.gnu.org/licenses. -# -# http://numenta.org/licenses/ -# ---------------------------------------------------------------------- - -""" -Template file used by the OPF Experiment Generator to generate the actual -description.py file by replacing $XXXXXXXX tokens with desired values. - -This description.py file was generated by: -'~/nta/eng/lib/python2.6/site-packages/nupic/frameworks/opf/expGenerator/ExpGenerator.py' -""" - -from nupic.frameworks.opf.expdescriptionapi import ExperimentDescriptionAPI - -from nupic.frameworks.opf.expdescriptionhelpers import ( - updateConfigFromSubConfig, - applyValueGettersToContainer, - DeferredDictLookup) - -from nupic.frameworks.opf.htmpredictionmodelcallbacks import * -from nupic.frameworks.opf.metrics import MetricSpec -from nupic.frameworks.opf.opfutils import (InferenceType, - InferenceElement) -from nupic.support import aggregationDivide - -from nupic.frameworks.opf.opftaskdriver import ( - IterationPhaseSpecLearnOnly, - IterationPhaseSpecInferOnly, - IterationPhaseSpecLearnAndInfer) - - -# Model Configuration Dictionary: -# -# Define the model parameters and adjust for any modifications if imported -# from a sub-experiment. -# -# These fields might be modified by a sub-experiment; this dict is passed -# between the sub-experiment and base experiment -# -# -# NOTE: Use of DEFERRED VALUE-GETTERs: dictionary fields and list elements -# within the config dictionary may be assigned futures derived from the -# ValueGetterBase class, such as DeferredDictLookup. -# This facility is particularly handy for enabling substitution of values in -# the config dictionary from other values in the config dictionary, which is -# needed by permutation.py-based experiments. These values will be resolved -# during the call to applyValueGettersToContainer(), -# which we call after the base experiment's config dictionary is updated from -# the sub-experiment. See ValueGetterBase and -# DeferredDictLookup for more details about value-getters. -# -# For each custom encoder parameter to be exposed to the sub-experiment/ -# permutation overrides, define a variable in this section, using key names -# beginning with a single underscore character to avoid collisions with -# pre-defined keys (e.g., _dsEncoderFieldName2_N). -# -# Example: -# config = dict( -# _dsEncoderFieldName2_N = 70, -# _dsEncoderFieldName2_W = 5, -# dsEncoderSchema = [ -# base=dict( -# fieldname='Name2', type='ScalarEncoder', -# name='Name2', minval=0, maxval=270, clipInput=True, -# n=DeferredDictLookup('_dsEncoderFieldName2_N'), -# w=DeferredDictLookup('_dsEncoderFieldName2_W')), -# ], -# ) -# updateConfigFromSubConfig(config) -# applyValueGettersToContainer(config) -config = { - # Type of model that the rest of these parameters apply to. - 'model': "HTMPrediction", - - # Version that specifies the format of the config. - 'version': 1, - - # Intermediate variables used to compute fields in modelParams and also - # referenced from the control section. - 'aggregationInfo': { 'fields': [ ('numericFieldNameA', 'mean'), - ('numericFieldNameB', 'sum'), - ('categoryFieldNameC', 'first')], - 'hours': 0}, - - 'predictAheadTime': None, - - # Model parameter dictionary. - 'modelParams': { - # The type of inference that this model will perform - 'inferenceType': 'NontemporalAnomaly', - - 'sensorParams': { - # Sensor diagnostic output verbosity control; - # if > 0: sensor region will print out on screen what it's sensing - # at each step 0: silent; >=1: some info; >=2: more info; - # >=3: even more info (see compute() in py/regions/RecordSensor.py) - 'verbosity' : 0, - - # Example: - # dsEncoderSchema = [ - # DeferredDictLookup('__field_name_encoder'), - # ], - # - # (value generated from DS_ENCODER_SCHEMA) - 'encoders': { - 'f0': dict(fieldname='f0', n=100, name='f0', type='SDRCategoryEncoder', w=21), - 'f1': dict(fieldname='f1', n=100, name='f1', type='SDRCategoryEncoder', w=21), - 'f2': dict(fieldname='f2', n=100, name='f2', type='SDRCategoryEncoder', w=21), - 'f3': dict(fieldname='f3', n=100, name='f3', type='SDRCategoryEncoder', w=21), - 'f4': dict(fieldname='f4', n=100, name='f4', type='SDRCategoryEncoder', w=21), - 'f5': dict(fieldname='f5', n=100, name='f5', type='SDRCategoryEncoder', w=21), - 'f6': dict(fieldname='f6', n=100, name='f6', type='SDRCategoryEncoder', w=21), - 'f7': dict(fieldname='f7', n=100, name='f7', type='SDRCategoryEncoder', w=21), - 'f8': dict(fieldname='f8', n=100, name='f8', type='SDRCategoryEncoder', w=21), - 'f9': dict(fieldname='f9', n=100, name='f9', type='SDRCategoryEncoder', w=21), - }, - - # A dictionary specifying the period for automatically-generated - # resets from a RecordSensor; - # - # None = disable automatically-generated resets (also disabled if - # all of the specified values evaluate to 0). - # Valid keys is the desired combination of the following: - # days, hours, minutes, seconds, milliseconds, microseconds, weeks - # - # Example for 1.5 days: sensorAutoReset = dict(days=1,hours=12), - # - # (value generated from SENSOR_AUTO_RESET) - 'sensorAutoReset' : None, - }, - - 'spEnable': True, - - 'spParams': { - # SP diagnostic output verbosity control; - # 0: silent; >=1: some info; >=2: more info; - 'spVerbosity' : 0, - - 'globalInhibition': 1, - - # Number of cell columns in the cortical region (same number for - # SP and TP) - # (see also tpNCellsPerCol) - 'columnCount': 2048, - - 'inputWidth': 0, - - # SP inhibition control (absolute value); - # Maximum number of active columns in the SP region's output (when - # there are more, the weaker ones are suppressed) - 'numActiveColumnsPerInhArea': 40, - - 'seed': 1956, - - # potentialPct - # What percent of the columns's receptive field is available - # for potential synapses. At initialization time, we will - # choose potentialPct * (2*potentialRadius+1)^2 - 'potentialPct': 0.5, - - # The default connected threshold. Any synapse whose - # permanence value is above the connected threshold is - # a "connected synapse", meaning it can contribute to the - # cell's firing. Typical value is 0.10. Cells whose activity - # level before inhibition falls below minDutyCycleBeforeInh - # will have their own internal synPermConnectedCell - # threshold set below this default value. - # (This concept applies to both SP and TP and so 'cells' - # is correct here as opposed to 'columns') - 'synPermConnected': 0.1, - - 'synPermActiveInc': 0.1, - - 'synPermInactiveDec': 0.01, - }, - - # Controls whether TP is enabled or disabled; - # TP is necessary for making temporal predictions, such as predicting - # the next inputs. Without TP, the model is only capable of - # reconstructing missing sensor inputs (via SP). - 'tmEnable' : True, - - 'tmParams': { - # TP diagnostic output verbosity control; - # 0: silent; [1..6]: increasing levels of verbosity - # (see verbosity in nta/trunk/py/nupic/research/TP.py and TP10X*.py) - 'verbosity': 0, - - # Number of cell columns in the cortical region (same number for - # SP and TP) - # (see also tpNCellsPerCol) - 'columnCount': 2048, - - # The number of cells (i.e., states), allocated per column. - 'cellsPerColumn': 32, - - 'inputWidth': 2048, - - 'seed': 1960, - - # Temporal Pooler implementation selector (see _getTPClass in - # CLARegion.py). - 'temporalImp': 'cpp', - - # New Synapse formation count - # NOTE: If None, use spNumActivePerInhArea - # - # TODO: need better explanation - 'newSynapseCount': 20, - - # Maximum number of synapses per segment - # > 0 for fixed-size CLA - # -1 for non-fixed-size CLA - # - # TODO: for Ron: once the appropriate value is placed in TP - # constructor, see if we should eliminate this parameter from - # description.py. - 'maxSynapsesPerSegment': 32, - - # Maximum number of segments per cell - # > 0 for fixed-size CLA - # -1 for non-fixed-size CLA - # - # TODO: for Ron: once the appropriate value is placed in TP - # constructor, see if we should eliminate this parameter from - # description.py. - 'maxSegmentsPerCell': 128, - - # Initial Permanence - # TODO: need better explanation - 'initialPerm': 0.21, - - # Permanence Increment - 'permanenceInc': 0.1, - - # Permanence Decrement - # If set to None, will automatically default to tpPermanenceInc - # value. - 'permanenceDec' : 0.1, - - 'globalDecay': 0.0, - - 'maxAge': 0, - - # Minimum number of active synapses for a segment to be considered - # during search for the best-matching segments. - # None=use default - # Replaces: tpMinThreshold - 'minThreshold': 12, - - # Segment activation threshold. - # A segment is active if it has >= tpSegmentActivationThreshold - # connected synapses that are active due to infActiveState - # None=use default - # Replaces: tpActivationThreshold - 'activationThreshold': 16, - - 'outputType': 'normal', - - # "Pay Attention Mode" length. This tells the TP how many new - # elements to append to the end of a learned sequence at a time. - # Smaller values are better for datasets with short sequences, - # higher values are better for datasets with long sequences. - 'pamLength': 1, - }, - - 'clParams': { - 'regionName' : 'SDRClassifierRegion', - - # Classifier diagnostic output verbosity control; - # 0: silent; [1..6]: increasing levels of verbosity - 'verbosity' : 0, - - # This controls how fast the classifier learns/forgets. Higher values - # make it adapt faster and forget older patterns faster. - 'alpha': 0.001, - - # This is set after the call to updateConfigFromSubConfig and is - # computed from the aggregationInfo and predictAheadTime. - 'steps': '1', - - - }, - - 'trainSPNetOnlyIfRequested': False, - }, - - -} -# end of config dictionary - - -# Adjust base config dictionary for any modifications if imported from a -# sub-experiment -updateConfigFromSubConfig(config) - - -# Compute predictionSteps based on the predictAheadTime and the aggregation -# period, which may be permuted over. -if config['predictAheadTime'] is not None: - predictionSteps = int(round(aggregationDivide( - config['predictAheadTime'], config['aggregationInfo']))) - assert (predictionSteps >= 1) - config['modelParams']['clParams']['steps'] = str(predictionSteps) - - -# Adjust config by applying ValueGetterBase-derived -# futures. NOTE: this MUST be called after updateConfigFromSubConfig() in order -# to support value-getter-based substitutions from the sub-experiment (if any) -applyValueGettersToContainer(config) - - - -# [optional] A sequence of one or more tasks that describe what to do with the -# model. Each task consists of a task label, an input spec., iteration count, -# and a task-control spec per opfTaskSchema.json -# -# NOTE: The tasks are intended for OPF clients that make use of OPFTaskDriver. -# Clients that interact with OPFExperiment directly do not make use of -# the tasks specification. -# -control = dict( - environment='opfExperiment', - -tasks = [ - { - # Task label; this label string may be used for diagnostic logging and for - # constructing filenames or directory pathnames for task-specific files, etc. - 'taskLabel' : "Anomaly", - - # Input stream specification per py/nupic/cluster/database/StreamDef.json. - # - 'dataset' : { - 'info': 'test_NoProviders', - 'version': 1, - - 'streams': [ - { - 'columns': ['*'], - 'info': 'my simple dataset', - 'source': 'file://'+os.path.join(os.path.dirname(__file__), 'data.csv'), - } - ], - - # TODO: Aggregation is not supported yet by run_opf_experiment.py - #'aggregation' : config['aggregationInfo'] - }, - - # Iteration count: maximum number of iterations. Each iteration corresponds - # to one record from the (possibly aggregated) dataset. The task is - # terminated when either number of iterations reaches iterationCount or - # all records in the (possibly aggregated) database have been processed, - # whichever occurs first. - # - # iterationCount of -1 = iterate over the entire dataset - 'iterationCount' : -1, - - - # Task Control parameters for OPFTaskDriver (per opfTaskControlSchema.json) - 'taskControl' : { - - # Iteration cycle list consisting of opftaskdriver.IterationPhaseSpecXXXXX - # instances. - 'iterationCycle' : [ - #IterationPhaseSpecLearnOnly(1000), - IterationPhaseSpecLearnAndInfer(1000, inferenceArgs=None), - #IterationPhaseSpecInferOnly(10, inferenceArgs=None), - ], - - 'metrics' : [ - ], - - # Logged Metrics: A sequence of regular expressions that specify which of - # the metrics from the Inference Specifications section MUST be logged for - # every prediction. The regex's correspond to the automatically generated - # metric labels. This is similar to the way the optimization metric is - # specified in permutations.py. - 'loggedMetrics': ['.*nupicScore.*'], - - - # Callbacks for experimentation/research (optional) - 'callbacks' : { - # Callbacks to be called at the beginning of a task, before model iterations. - # Signature: callback(); returns nothing -# 'setup' : [htmPredictionModelControlEnableSPLearningCb, htmPredictionModelControlEnableTPLearningCb], -# 'setup' : [htmPredictionModelControlDisableTPLearningCb], - 'setup' : [], - - # Callbacks to be called after every learning/inference iteration - # Signature: callback(); returns nothing - 'postIter' : [], - - # Callbacks to be called when the experiment task is finished - # Signature: callback(); returns nothing - 'finish' : [] - } - } # End of taskControl - }, # End of task -] - -) - - - -descriptionInterface = ExperimentDescriptionAPI(modelConfig=config, - control=control) diff --git a/examples/opf/experiments/anomaly/temporal/noisy_saw/description.py b/examples/opf/experiments/anomaly/temporal/noisy_saw/description.py deleted file mode 100644 index f6e7c2cfbd..0000000000 --- a/examples/opf/experiments/anomaly/temporal/noisy_saw/description.py +++ /dev/null @@ -1,377 +0,0 @@ -# ---------------------------------------------------------------------- -# Numenta Platform for Intelligent Computing (NuPIC) -# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement -# with Numenta, Inc., for a separate license for this software code, the -# following terms and conditions apply: -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU Affero Public License version 3 as -# published by the Free Software Foundation. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -# See the GNU Affero Public License for more details. -# -# You should have received a copy of the GNU Affero Public License -# along with this program. If not, see http://www.gnu.org/licenses. -# -# http://numenta.org/licenses/ -# ---------------------------------------------------------------------- - -""" -Template file used by the OPF Experiment Generator to generate the actual -description.py file by replacing $XXXXXXXX tokens with desired values. - -This description.py file was generated by: -'~/nupic/eng/lib/python2.6/site-packages/nupic/frameworks/opf/expGenerator/ExpGenerator.py' -""" - -from nupic.frameworks.opf.expdescriptionapi import ExperimentDescriptionAPI - -from nupic.frameworks.opf.expdescriptionhelpers import ( - updateConfigFromSubConfig, - applyValueGettersToContainer, - DeferredDictLookup) - -from nupic.frameworks.opf.htmpredictionmodelcallbacks import * -from nupic.frameworks.opf.metrics import MetricSpec -from nupic.frameworks.opf.opfutils import (InferenceType, - InferenceElement) -from nupic.support import aggregationDivide - -from nupic.frameworks.opf.opftaskdriver import ( - IterationPhaseSpecLearnOnly, - IterationPhaseSpecInferOnly, - IterationPhaseSpecLearnAndInfer) - - -# Model Configuration Dictionary: -# -# Define the model parameters and adjust for any modifications if imported -# from a sub-experiment. -# -# These fields might be modified by a sub-experiment; this dict is passed -# between the sub-experiment and base experiment -# -# -# NOTE: Use of DEFERRED VALUE-GETTERs: dictionary fields and list elements -# within the config dictionary may be assigned futures derived from the -# ValueGetterBase class, such as DeferredDictLookup. -# This facility is particularly handy for enabling substitution of values in -# the config dictionary from other values in the config dictionary, which is -# needed by permutation.py-based experiments. These values will be resolved -# during the call to applyValueGettersToContainer(), -# which we call after the base experiment's config dictionary is updated from -# the sub-experiment. See ValueGetterBase and -# DeferredDictLookup for more details about value-getters. -# -# For each custom encoder parameter to be exposed to the sub-experiment/ -# permutation overrides, define a variable in this section, using key names -# beginning with a single underscore character to avoid collisions with -# pre-defined keys (e.g., _dsEncoderFieldName2_N). -# -# Example: -# config = dict( -# _dsEncoderFieldName2_N = 70, -# _dsEncoderFieldName2_W = 5, -# dsEncoderSchema = [ -# base=dict( -# fieldname='Name2', type='ScalarEncoder', -# name='Name2', minval=0, maxval=270, clipInput=True, -# n=DeferredDictLookup('_dsEncoderFieldName2_N'), -# w=DeferredDictLookup('_dsEncoderFieldName2_W')), -# ], -# ) -# updateConfigFromSubConfig(config) -# applyValueGettersToContainer(config) -config = { - # Type of model that the rest of these parameters apply to. - 'model': "HTMPrediction", - - # Version that specifies the format of the config. - 'version': 1, - - # Intermediate variables used to compute fields in modelParams and also - # referenced from the control section. - 'aggregationInfo': { 'days': 0, - 'fields': [], - 'hours': 0, - 'microseconds': 0, - 'milliseconds': 0, - 'minutes': 0, - 'months': 0, - 'seconds': 0, - 'weeks': 0, - 'years': 0}, - - 'predictAheadTime': None, - - # Model parameter dictionary. - 'modelParams': { - # The type of inference that this model will perform - 'inferenceType': 'TemporalAnomaly', - - 'sensorParams': { - # Sensor diagnostic output verbosity control; - # if > 0: sensor region will print out on screen what it's sensing - # at each step 0: silent; >=1: some info; >=2: more info; - # >=3: even more info (see compute() in py/regions/RecordSensor.py) - 'verbosity' : 0, - - # Example: - # dsEncoderSchema = [ - # DeferredDictLookup('__field_name_encoder'), - # ], - # - # (value generated from DS_ENCODER_SCHEMA) - 'encoders': { 'f': { 'clipInput': True, - 'fieldname': u'f', - 'maxval': 520, - 'minval': 0, - 'n': 500, - 'name': u'f', - 'type': 'ScalarEncoder', - 'w': 21}}, - - # A dictionary specifying the period for automatically-generated - # resets from a RecordSensor; - # - # None = disable automatically-generated resets (also disabled if - # all of the specified values evaluate to 0). - # Valid keys is the desired combination of the following: - # days, hours, minutes, seconds, milliseconds, microseconds, weeks - # - # Example for 1.5 days: sensorAutoReset = dict(days=1,hours=12), - # - # (value generated from SENSOR_AUTO_RESET) - 'sensorAutoReset' : None, - }, - - 'spEnable': True, - - 'spParams': { - # SP diagnostic output verbosity control; - # 0: silent; >=1: some info; >=2: more info; - 'spVerbosity' : 0, - - 'globalInhibition': 1, - - # Number of cell columns in the cortical region (same number for - # SP and TP) - # (see also tpNCellsPerCol) - 'columnCount': 2048, - - 'inputWidth': 0, - - # SP inhibition control (absolute value); - # Maximum number of active columns in the SP region's output (when - # there are more, the weaker ones are suppressed) - 'numActiveColumnsPerInhArea': 40, - - 'seed': 1956, - - # potentialPct - # What percent of the columns's receptive field is available - # for potential synapses. At initialization time, we will - # choose potentialPct * (2*potentialRadius+1)^2 - 'potentialPct': 0.5, - - # The default connected threshold. Any synapse whose - # permanence value is above the connected threshold is - # a "connected synapse", meaning it can contribute to the - # cell's firing. Typical value is 0.10. Cells whose activity - # level before inhibition falls below minDutyCycleBeforeInh - # will have their own internal synPermConnectedCell - # threshold set below this default value. - # (This concept applies to both SP and TP and so 'cells' - # is correct here as opposed to 'columns') - 'synPermConnected': 0.1, - - 'synPermActiveInc': 0.1, - - 'synPermInactiveDec': 0.01, - }, - - # Controls whether TP is enabled or disabled; - # TP is necessary for making temporal predictions, such as predicting - # the next inputs. Without TP, the model is only capable of - # reconstructing missing sensor inputs (via SP). - 'tmEnable' : True, - - 'tmParams': { - # TP diagnostic output verbosity control; - # 0: silent; [1..6]: increasing levels of verbosity - # (see verbosity in nupic/trunk/py/nupic/research/TP.py and TP10X*.py) - 'verbosity': 0, - - # Number of cell columns in the cortical region (same number for - # SP and TP) - # (see also tpNCellsPerCol) - 'columnCount': 2048, - - # The number of cells (i.e., states), allocated per column. - 'cellsPerColumn': 32, - - 'inputWidth': 2048, - - 'seed': 1960, - - # Temporal Pooler implementation selector (see _getTPClass in - # CLARegion.py). - 'temporalImp': 'cpp', - - # New Synapse formation count - # NOTE: If None, use spNumActivePerInhArea - # - # TODO: need better explanation - 'newSynapseCount': 20, - - # Maximum number of synapses per segment - # > 0 for fixed-size CLA - # -1 for non-fixed-size CLA - # - # TODO: for Ron: once the appropriate value is placed in TP - # constructor, see if we should eliminate this parameter from - # description.py. - 'maxSynapsesPerSegment': 32, - - # Maximum number of segments per cell - # > 0 for fixed-size CLA - # -1 for non-fixed-size CLA - # - # TODO: for Ron: once the appropriate value is placed in TP - # constructor, see if we should eliminate this parameter from - # description.py. - 'maxSegmentsPerCell': 128, - - # Initial Permanence - # TODO: need better explanation - 'initialPerm': 0.21, - - # Permanence Increment - 'permanenceInc': 0.1, - - # Permanence Decrement - # If set to None, will automatically default to tpPermanenceInc - # value. - 'permanenceDec' : 0.1, - - 'globalDecay': 0.0, - - 'maxAge': 0, - - # Minimum number of active synapses for a segment to be considered - # during search for the best-matching segments. - # None=use default - # Replaces: tpMinThreshold - 'minThreshold': 12, - - # Segment activation threshold. - # A segment is active if it has >= tpSegmentActivationThreshold - # connected synapses that are active due to infActiveState - # None=use default - # Replaces: tpActivationThreshold - 'activationThreshold': 16, - - 'outputType': 'normal', - - # "Pay Attention Mode" length. This tells the TP how many new - # elements to append to the end of a learned sequence at a time. - # Smaller values are better for datasets with short sequences, - # higher values are better for datasets with long sequences. - 'pamLength': 1, - }, - - 'clParams': { - # Classifier implementation selection. - 'implementation': 'py', - - 'regionName' : 'SDRClassifierRegion', - - # Classifier diagnostic output verbosity control; - # 0: silent; [1..6]: increasing levels of verbosity - 'verbosity' : 0, - - # This controls how fast the classifier learns/forgets. Higher values - # make it adapt faster and forget older patterns faster. - 'alpha': 0.0001, - - # This is set after the call to updateConfigFromSubConfig and is - # computed from the aggregationInfo and predictAheadTime. - 'steps': '1', - - - }, - - 'trainSPNetOnlyIfRequested': False, - }, - - -} -# end of config dictionary - - -# Adjust base config dictionary for any modifications if imported from a -# sub-experiment -updateConfigFromSubConfig(config) - - -# Compute predictionSteps based on the predictAheadTime and the aggregation -# period, which may be permuted over. -if config['predictAheadTime'] is not None: - predictionSteps = int(round(aggregationDivide( - config['predictAheadTime'], config['aggregationInfo']))) - assert (predictionSteps >= 1) - config['modelParams']['clParams']['steps'] = str(predictionSteps) - - -# Adjust config by applying ValueGetterBase-derived -# futures. NOTE: this MUST be called after updateConfigFromSubConfig() in order -# to support value-getter-based substitutions from the sub-experiment (if any) -applyValueGettersToContainer(config) -control = { - # The environment that the current model is being run in - "environment": 'nupic', - - # Input stream specification per py/nupic/cluster/database/StreamDef.json. - # - 'dataset' : { u'info': u'cerebro_dummy', - u'streams': [ { u'columns': [u'*'], - u'info': u'test data', - u'source': u'file://'+os.path.join(os.path.dirname(__file__), 'data.csv'), - } - ], - u'version': 1}, - - # Iteration count: maximum number of iterations. Each iteration corresponds - # to one record from the (possibly aggregated) dataset. The task is - # terminated when either number of iterations reaches iterationCount or - # all records in the (possibly aggregated) database have been processed, - # whichever occurs first. - # - # iterationCount of -1 = iterate over the entire dataset - 'iterationCount' : -1, - - - # A dictionary containing all the supplementary parameters for inference - "inferenceArgs":None, - - # Metrics: A list of MetricSpecs that instantiate the metrics that are - # computed for this experiment - 'metrics':[ - MetricSpec(field=u'f', metric='aae', inferenceElement='prediction', params={'window': 1000}), - ], - - # Logged Metrics: A sequence of regular expressions that specify which of - # the metrics from the Inference Specifications section MUST be logged for - # every prediction. The regex's correspond to the automatically generated - # metric labels. This is similar to the way the optimization metric is - # specified in permutations.py. - 'loggedMetrics': ['.*nupicScore.*'], -} - - - -descriptionInterface = ExperimentDescriptionAPI(modelConfig=config, - control=control) diff --git a/examples/opf/experiments/anomaly/temporal/saw_200_category/UNDER_DEVELOPMENT b/examples/opf/experiments/anomaly/temporal/saw_200_category/UNDER_DEVELOPMENT deleted file mode 100644 index 95a2f9c2f1..0000000000 --- a/examples/opf/experiments/anomaly/temporal/saw_200_category/UNDER_DEVELOPMENT +++ /dev/null @@ -1,423 +0,0 @@ -# ---------------------------------------------------------------------- -# Numenta Platform for Intelligent Computing (NuPIC) -# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement -# with Numenta, Inc., for a separate license for this software code, the -# following terms and conditions apply: -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU Affero Public License version 3 as -# published by the Free Software Foundation. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -# See the GNU Affero Public License for more details. -# -# You should have received a copy of the GNU Affero Public License -# along with this program. If not, see http://www.gnu.org/licenses. -# -# http://numenta.org/licenses/ -# ---------------------------------------------------------------------- - -""" -Template file used by the OPF Experiment Generator to generate the actual -description.py file by replacing $XXXXXXXX tokens with desired values. - -This description.py file was generated by: -'~/nta/eng/lib/python2.6/site-packages/nupic/frameworks/opf/expGenerator/ExpGenerator.py' -""" - -from nupic.frameworks.opf.expdescriptionapi import ExperimentDescriptionAPI - -from nupic.frameworks.opf.expdescriptionhelpers import ( - updateConfigFromSubConfig, - applyValueGettersToContainer, - DeferredDictLookup) - -from nupic.frameworks.opf.htmpredictionmodelcallbacks import * -from nupic.frameworks.opf.metrics import MetricSpec -from nupic.frameworks.opf.opfutils import (InferenceType, - InferenceElement) -from nupic.support import aggregationDivide - -from nupic.frameworks.opf.opftaskdriver import ( - IterationPhaseSpecLearnOnly, - IterationPhaseSpecInferOnly, - IterationPhaseSpecLearnAndInfer) - - -# Model Configuration Dictionary: -# -# Define the model parameters and adjust for any modifications if imported -# from a sub-experiment. -# -# These fields might be modified by a sub-experiment; this dict is passed -# between the sub-experiment and base experiment -# -# -# NOTE: Use of DEFERRED VALUE-GETTERs: dictionary fields and list elements -# within the config dictionary may be assigned futures derived from the -# ValueGetterBase class, such as DeferredDictLookup. -# This facility is particularly handy for enabling substitution of values in -# the config dictionary from other values in the config dictionary, which is -# needed by permutation.py-based experiments. These values will be resolved -# during the call to applyValueGettersToContainer(), -# which we call after the base experiment's config dictionary is updated from -# the sub-experiment. See ValueGetterBase and -# DeferredDictLookup for more details about value-getters. -# -# For each custom encoder parameter to be exposed to the sub-experiment/ -# permutation overrides, define a variable in this section, using key names -# beginning with a single underscore character to avoid collisions with -# pre-defined keys (e.g., _dsEncoderFieldName2_N). -# -# Example: -# config = dict( -# _dsEncoderFieldName2_N = 70, -# _dsEncoderFieldName2_W = 5, -# dsEncoderSchema = [ -# base=dict( -# fieldname='Name2', type='ScalarEncoder', -# name='Name2', minval=0, maxval=270, clipInput=True, -# n=DeferredDictLookup('_dsEncoderFieldName2_N'), -# w=DeferredDictLookup('_dsEncoderFieldName2_W')), -# ], -# ) -# updateConfigFromSubConfig(config) -# applyValueGettersToContainer(config) -config = { - # Type of model that the rest of these parameters apply to. - 'model': "HTMPrediction", - - # Version that specifies the format of the config. - 'version': 1, - - # Intermediate variables used to compute fields in modelParams and also - # referenced from the control section. - 'aggregationInfo': { 'fields': [ ('numericFieldNameA', 'mean'), - ('numericFieldNameB', 'sum'), - ('categoryFieldNameC', 'first')], - 'hours': 0}, - - 'predictAheadTime': None, - - # Model parameter dictionary. - 'modelParams': { - # The type of inference that this model will perform - 'inferenceType': 'NontemporalAnomaly', - - 'sensorParams': { - # Sensor diagnostic output verbosity control; - # if > 0: sensor region will print out on screen what it's sensing - # at each step 0: silent; >=1: some info; >=2: more info; - # >=3: even more info (see compute() in py/regions/RecordSensor.py) - 'verbosity' : 0, - - # Example: - # dsEncoderSchema = [ - # DeferredDictLookup('__field_name_encoder'), - # ], - # - # (value generated from DS_ENCODER_SCHEMA) - 'encoders': { - 'f0': dict(fieldname='f0', n=100, name='f0', type='SDRCategoryEncoder', w=21), - 'f1': dict(fieldname='f1', n=100, name='f1', type='SDRCategoryEncoder', w=21), - 'f2': dict(fieldname='f2', n=100, name='f2', type='SDRCategoryEncoder', w=21), - 'f3': dict(fieldname='f3', n=100, name='f3', type='SDRCategoryEncoder', w=21), - 'f4': dict(fieldname='f4', n=100, name='f4', type='SDRCategoryEncoder', w=21), - 'f5': dict(fieldname='f5', n=100, name='f5', type='SDRCategoryEncoder', w=21), - 'f6': dict(fieldname='f6', n=100, name='f6', type='SDRCategoryEncoder', w=21), - 'f7': dict(fieldname='f7', n=100, name='f7', type='SDRCategoryEncoder', w=21), - 'f8': dict(fieldname='f8', n=100, name='f8', type='SDRCategoryEncoder', w=21), - 'f9': dict(fieldname='f9', n=100, name='f9', type='SDRCategoryEncoder', w=21), - }, - - # A dictionary specifying the period for automatically-generated - # resets from a RecordSensor; - # - # None = disable automatically-generated resets (also disabled if - # all of the specified values evaluate to 0). - # Valid keys is the desired combination of the following: - # days, hours, minutes, seconds, milliseconds, microseconds, weeks - # - # Example for 1.5 days: sensorAutoReset = dict(days=1,hours=12), - # - # (value generated from SENSOR_AUTO_RESET) - 'sensorAutoReset' : None, - }, - - 'spEnable': True, - - 'spParams': { - # SP diagnostic output verbosity control; - # 0: silent; >=1: some info; >=2: more info; - 'spVerbosity' : 0, - - 'globalInhibition': 1, - - # Number of cell columns in the cortical region (same number for - # SP and TP) - # (see also tpNCellsPerCol) - 'columnCount': 2048, - - 'inputWidth': 0, - - # SP inhibition control (absolute value); - # Maximum number of active columns in the SP region's output (when - # there are more, the weaker ones are suppressed) - 'numActiveColumnsPerInhArea': 40, - - 'seed': 1956, - - # potentialPct - # What percent of the columns's receptive field is available - # for potential synapses. At initialization time, we will - # choose potentialPct * (2*potentialRadius+1)^2 - 'potentialPct': 0.5, - - # The default connected threshold. Any synapse whose - # permanence value is above the connected threshold is - # a "connected synapse", meaning it can contribute to the - # cell's firing. Typical value is 0.10. Cells whose activity - # level before inhibition falls below minDutyCycleBeforeInh - # will have their own internal synPermConnectedCell - # threshold set below this default value. - # (This concept applies to both SP and TP and so 'cells' - # is correct here as opposed to 'columns') - 'synPermConnected': 0.1, - - 'synPermActiveInc': 0.1, - - 'synPermInactiveDec': 0.01, - }, - - # Controls whether TP is enabled or disabled; - # TP is necessary for making temporal predictions, such as predicting - # the next inputs. Without TP, the model is only capable of - # reconstructing missing sensor inputs (via SP). - 'tmEnable' : True, - - 'tmParams': { - # TP diagnostic output verbosity control; - # 0: silent; [1..6]: increasing levels of verbosity - # (see verbosity in nta/trunk/py/nupic/research/TP.py and TP10X*.py) - 'verbosity': 0, - - # Number of cell columns in the cortical region (same number for - # SP and TP) - # (see also tpNCellsPerCol) - 'columnCount': 2048, - - # The number of cells (i.e., states), allocated per column. - 'cellsPerColumn': 32, - - 'inputWidth': 2048, - - 'seed': 1960, - - # Temporal Pooler implementation selector (see _getTPClass in - # CLARegion.py). - 'temporalImp': 'cpp', - - # New Synapse formation count - # NOTE: If None, use spNumActivePerInhArea - # - # TODO: need better explanation - 'newSynapseCount': 20, - - # Maximum number of synapses per segment - # > 0 for fixed-size CLA - # -1 for non-fixed-size CLA - # - # TODO: for Ron: once the appropriate value is placed in TP - # constructor, see if we should eliminate this parameter from - # description.py. - 'maxSynapsesPerSegment': 32, - - # Maximum number of segments per cell - # > 0 for fixed-size CLA - # -1 for non-fixed-size CLA - # - # TODO: for Ron: once the appropriate value is placed in TP - # constructor, see if we should eliminate this parameter from - # description.py. - 'maxSegmentsPerCell': 128, - - # Initial Permanence - # TODO: need better explanation - 'initialPerm': 0.21, - - # Permanence Increment - 'permanenceInc': 0.1, - - # Permanence Decrement - # If set to None, will automatically default to tpPermanenceInc - # value. - 'permanenceDec' : 0.1, - - 'globalDecay': 0.0, - - 'maxAge': 0, - - # Minimum number of active synapses for a segment to be considered - # during search for the best-matching segments. - # None=use default - # Replaces: tpMinThreshold - 'minThreshold': 12, - - # Segment activation threshold. - # A segment is active if it has >= tpSegmentActivationThreshold - # connected synapses that are active due to infActiveState - # None=use default - # Replaces: tpActivationThreshold - 'activationThreshold': 16, - - 'outputType': 'normal', - - # "Pay Attention Mode" length. This tells the TP how many new - # elements to append to the end of a learned sequence at a time. - # Smaller values are better for datasets with short sequences, - # higher values are better for datasets with long sequences. - 'pamLength': 1, - }, - - 'clParams': { - 'regionName' : 'SDRClassifierRegion', - - # Classifier diagnostic output verbosity control; - # 0: silent; [1..6]: increasing levels of verbosity - 'verbosity' : 0, - - # This controls how fast the classifier learns/forgets. Higher values - # make it adapt faster and forget older patterns faster. - 'alpha': 0.001, - - # This is set after the call to updateConfigFromSubConfig and is - # computed from the aggregationInfo and predictAheadTime. - 'steps': '1', - - - }, - - 'trainSPNetOnlyIfRequested': False, - }, - - -} -# end of config dictionary - - -# Adjust base config dictionary for any modifications if imported from a -# sub-experiment -updateConfigFromSubConfig(config) - - -# Compute predictionSteps based on the predictAheadTime and the aggregation -# period, which may be permuted over. -if config['predictAheadTime'] is not None: - predictionSteps = int(round(aggregationDivide( - config['predictAheadTime'], config['aggregationInfo']))) - assert (predictionSteps >= 1) - config['modelParams']['clParams']['steps'] = str(predictionSteps) - - -# Adjust config by applying ValueGetterBase-derived -# futures. NOTE: this MUST be called after updateConfigFromSubConfig() in order -# to support value-getter-based substitutions from the sub-experiment (if any) -applyValueGettersToContainer(config) - - - -# [optional] A sequence of one or more tasks that describe what to do with the -# model. Each task consists of a task label, an input spec., iteration count, -# and a task-control spec per opfTaskSchema.json -# -# NOTE: The tasks are intended for OPF clients that make use of OPFTaskDriver. -# Clients that interact with OPFExperiment directly do not make use of -# the tasks specification. -# -control = dict( - environment='opfExperiment', - -tasks = [ - { - # Task label; this label string may be used for diagnostic logging and for - # constructing filenames or directory pathnames for task-specific files, etc. - 'taskLabel' : "Anomaly", - - # Input stream specification per py/nupic/cluster/database/StreamDef.json. - # - 'dataset' : { - 'info': 'test_NoProviders', - 'version': 1, - - 'streams': [ - { - 'columns': ['*'], - 'info': 'my simple dataset', - 'source': 'file://'+os.path.join(os.path.dirname(__file__), 'data.csv'), - } - ], - - # TODO: Aggregation is not supported yet by run_opf_experiment.py - #'aggregation' : config['aggregationInfo'] - }, - - # Iteration count: maximum number of iterations. Each iteration corresponds - # to one record from the (possibly aggregated) dataset. The task is - # terminated when either number of iterations reaches iterationCount or - # all records in the (possibly aggregated) database have been processed, - # whichever occurs first. - # - # iterationCount of -1 = iterate over the entire dataset - 'iterationCount' : -1, - - - # Task Control parameters for OPFTaskDriver (per opfTaskControlSchema.json) - 'taskControl' : { - - # Iteration cycle list consisting of opftaskdriver.IterationPhaseSpecXXXXX - # instances. - 'iterationCycle' : [ - #IterationPhaseSpecLearnOnly(1000), - IterationPhaseSpecLearnAndInfer(1000, inferenceArgs=None), - #IterationPhaseSpecInferOnly(10, inferenceArgs=None), - ], - - 'metrics' : [ - ], - - # Logged Metrics: A sequence of regular expressions that specify which of - # the metrics from the Inference Specifications section MUST be logged for - # every prediction. The regex's correspond to the automatically generated - # metric labels. This is similar to the way the optimization metric is - # specified in permutations.py. - 'loggedMetrics': ['.*nupicScore.*'], - - - # Callbacks for experimentation/research (optional) - 'callbacks' : { - # Callbacks to be called at the beginning of a task, before model iterations. - # Signature: callback(); returns nothing -# 'setup' : [htmPredictionModelControlEnableSPLearningCb, htmPredictionModelControlEnableTPLearningCb], -# 'setup' : [htmPredictionModelControlDisableTPLearningCb], - 'setup' : [], - - # Callbacks to be called after every learning/inference iteration - # Signature: callback(); returns nothing - 'postIter' : [], - - # Callbacks to be called when the experiment task is finished - # Signature: callback(); returns nothing - 'finish' : [] - } - } # End of taskControl - }, # End of task -] - -) - - - -descriptionInterface = ExperimentDescriptionAPI(modelConfig=config, - control=control) diff --git a/examples/opf/experiments/anomaly/temporal/saw_200_category/data.csv b/examples/opf/experiments/anomaly/temporal/saw_200_category/data.csv deleted file mode 100644 index 2bceb36d27..0000000000 --- a/examples/opf/experiments/anomaly/temporal/saw_200_category/data.csv +++ /dev/null @@ -1 +0,0 @@ -noise,f string,string , 0,0 1,1 2,2 3,3 4,4 5,5 6,6 7,7 8,8 9,9 10,10 11,11 12,12 13,13 14,14 15,15 16,16 17,17 18,18 19,19 20,20 21,21 22,22 23,23 24,24 25,25 26,26 27,27 28,28 29,29 30,30 31,31 32,32 33,33 34,34 35,35 36,36 37,37 38,38 39,39 40,40 41,41 42,42 43,43 44,44 45,45 46,46 47,47 48,48 49,49 50,50 51,51 52,52 53,53 54,54 55,55 56,56 57,57 58,58 59,59 60,60 61,61 62,62 63,63 64,64 65,65 66,66 67,67 68,68 69,69 70,70 71,71 72,72 73,73 74,74 75,75 76,76 77,77 78,78 79,79 80,80 81,81 82,82 83,83 84,84 85,85 86,86 87,87 88,88 89,89 90,90 91,91 92,92 93,93 94,94 95,95 96,96 97,97 98,98 99,99 100,100 101,101 102,102 103,103 104,104 105,105 106,106 107,107 108,108 109,109 110,110 111,111 112,112 113,113 114,114 115,115 116,116 117,117 118,118 119,119 120,120 121,121 122,122 123,123 124,124 125,125 126,126 127,127 128,128 129,129 130,130 131,131 132,132 133,133 134,134 135,135 136,136 137,137 138,138 139,139 140,140 141,141 142,142 143,143 144,144 145,145 146,146 147,147 148,148 149,149 150,150 151,151 152,152 153,153 154,154 155,155 156,156 157,157 158,158 159,159 160,160 161,161 162,162 163,163 164,164 165,165 166,166 167,167 168,168 169,169 170,170 171,171 172,172 173,173 174,174 175,175 176,176 177,177 178,178 179,179 180,180 181,181 182,182 183,183 184,184 185,185 186,186 187,187 188,188 189,189 190,190 191,191 192,192 193,193 194,194 195,195 196,196 197,197 198,198 199,199 0,0 1,1 2,2 3,3 4,4 5,5 6,6 7,7 8,8 9,9 10,10 11,11 12,12 13,13 14,14 15,15 16,16 17,17 18,18 19,19 20,20 21,21 22,22 23,23 24,24 25,25 26,26 27,27 28,28 29,29 30,30 31,31 32,32 33,33 34,34 35,35 36,36 37,37 38,38 39,39 40,40 41,41 42,42 43,43 44,44 45,45 46,46 47,47 48,48 49,49 50,50 51,51 52,52 53,53 54,54 55,55 56,56 57,57 58,58 59,59 60,60 61,61 62,62 63,63 64,64 65,65 66,66 67,67 68,68 69,69 70,70 71,71 72,72 73,73 74,74 75,75 76,76 77,77 78,78 79,79 80,80 81,81 82,82 83,83 84,84 85,85 86,86 87,87 88,88 89,89 90,90 91,91 92,92 93,93 94,94 95,95 96,96 97,97 98,98 99,99 100,100 101,101 102,102 103,103 104,104 105,105 106,106 107,107 108,108 109,109 110,110 111,111 112,112 113,113 114,114 115,115 116,116 117,117 118,118 119,119 120,120 121,121 122,122 123,123 124,124 125,125 126,126 127,127 128,128 129,129 130,130 131,131 132,132 133,133 134,134 135,135 136,136 137,137 138,138 139,139 140,140 141,141 142,142 143,143 144,144 145,145 146,146 147,147 148,148 149,149 150,150 151,151 152,152 153,153 154,154 155,155 156,156 157,157 158,158 159,159 160,160 161,161 162,162 163,163 164,164 165,165 166,166 167,167 168,168 169,169 170,170 171,171 172,172 173,173 174,174 175,175 176,176 177,177 178,178 179,179 180,180 181,181 182,182 183,183 184,184 185,185 186,186 187,187 188,188 189,189 190,190 191,191 192,192 193,193 194,194 195,195 196,196 197,197 198,198 199,199 0,0 1,1 2,2 3,3 4,4 5,5 6,6 7,7 8,8 9,9 10,10 11,11 12,12 13,13 14,14 15,15 16,16 17,17 18,18 19,19 20,20 21,21 22,22 23,23 24,24 25,25 26,26 27,27 28,28 29,29 30,30 31,31 32,32 33,33 34,34 35,35 36,36 37,37 38,38 39,39 40,40 41,41 42,42 43,43 44,44 45,45 46,46 47,47 48,48 49,49 50,50 51,51 52,52 53,53 54,54 55,55 56,56 57,57 58,58 59,59 60,60 61,61 62,62 63,63 64,64 65,65 66,66 67,67 68,68 69,69 70,70 71,71 72,72 73,73 74,74 75,75 76,76 77,77 78,78 79,79 80,80 81,81 82,82 83,83 84,84 85,85 86,86 87,87 88,88 89,89 90,90 91,91 92,92 93,93 94,94 95,95 96,96 97,97 98,98 99,99 100,100 101,101 102,102 103,103 104,104 105,105 106,106 107,107 108,108 109,109 110,110 111,111 112,112 113,113 114,114 115,115 116,116 117,117 118,118 119,119 120,120 121,121 122,122 123,123 124,124 125,125 126,126 127,127 128,128 129,129 130,130 131,131 132,132 133,133 134,134 135,135 136,136 137,137 138,138 139,139 140,140 141,141 142,142 143,143 144,144 145,145 146,146 147,147 148,148 149,149 150,150 151,151 152,152 153,153 154,154 155,155 156,156 157,157 158,158 159,159 160,160 161,161 162,162 163,163 164,164 165,165 166,166 167,167 168,168 169,169 170,170 171,171 172,172 173,173 174,174 175,175 176,176 177,177 178,178 179,179 180,180 181,181 182,182 183,183 184,184 185,185 186,186 187,187 188,188 189,189 190,190 191,191 192,192 193,193 194,194 195,195 196,196 197,197 198,198 199,199 0,0 1,1 2,2 3,3 4,4 5,5 6,6 7,7 8,8 9,9 10,10 11,11 12,12 13,13 14,14 15,15 16,16 17,17 18,18 19,19 20,20 21,21 22,22 23,23 24,24 25,25 26,26 27,27 28,28 29,29 30,30 31,31 32,32 33,33 34,34 35,35 36,36 37,37 38,38 39,39 40,40 41,41 42,42 43,43 44,44 45,45 46,46 47,47 48,48 49,49 50,50 51,51 52,52 53,53 54,54 55,55 56,56 57,57 58,58 59,59 60,60 61,61 62,62 63,63 64,64 65,65 66,66 67,67 68,68 69,69 70,70 71,71 72,72 73,73 74,74 75,75 76,76 77,77 78,78 79,79 80,80 81,81 82,82 83,83 84,84 85,85 86,86 87,87 88,88 89,89 90,90 91,91 92,92 93,93 94,94 95,95 96,96 97,97 98,98 99,99 100,100 101,101 102,102 103,103 104,104 105,105 106,106 107,107 108,108 109,109 110,110 111,111 112,112 113,113 114,114 115,115 116,116 117,117 118,118 119,119 120,120 121,121 122,122 123,123 124,124 125,125 126,126 127,127 128,128 129,129 130,130 131,131 132,132 133,133 134,134 135,135 136,136 137,137 138,138 139,139 140,140 141,141 142,142 143,143 144,144 145,145 146,146 147,147 148,148 149,149 150,150 151,151 152,152 153,153 154,154 155,155 156,156 157,157 158,158 159,159 160,160 161,161 162,162 163,163 164,164 165,165 166,166 167,167 168,168 169,169 170,170 171,171 172,172 173,173 174,174 175,175 176,176 177,177 178,178 179,179 180,180 181,181 182,182 183,183 184,184 185,185 186,186 187,187 188,188 189,189 190,190 191,191 192,192 193,193 194,194 195,195 196,196 197,197 198,198 199,199 0,0 1,1 2,2 3,3 4,4 5,5 6,6 7,7 8,8 9,9 10,10 11,11 12,12 13,13 14,14 15,15 16,16 17,17 18,18 19,19 20,20 21,21 22,22 23,23 24,24 25,25 26,26 27,27 28,28 29,29 30,30 31,31 32,32 33,33 34,34 35,35 36,36 37,37 38,38 39,39 40,40 41,41 42,42 43,43 44,44 45,45 46,46 47,47 48,48 49,49 50,50 51,51 52,52 53,53 54,54 55,55 56,56 57,57 58,58 59,59 60,60 61,61 62,62 63,63 64,64 65,65 66,66 67,67 68,68 69,69 70,70 71,71 72,72 73,73 74,74 75,75 76,76 77,77 78,78 79,79 80,80 81,81 82,82 83,83 84,84 85,85 86,86 87,87 88,88 89,89 90,90 91,91 92,92 93,93 94,94 95,95 96,96 97,97 98,98 99,99 100,100 101,101 102,102 103,103 104,104 105,105 106,106 107,107 108,108 109,109 110,110 111,111 112,112 113,113 114,114 115,115 116,116 117,117 118,118 119,119 120,120 121,121 122,122 123,123 124,124 125,125 126,126 127,127 128,128 129,129 130,130 131,131 132,132 133,133 134,134 135,135 136,136 137,137 138,138 139,139 140,140 141,141 142,142 143,143 144,144 145,145 146,146 147,147 148,148 149,149 150,150 151,151 152,152 153,153 154,154 155,155 156,156 157,157 158,158 159,159 160,160 161,161 162,162 163,163 164,164 165,165 166,166 167,167 168,168 169,169 170,170 171,171 172,172 173,173 174,174 175,175 176,176 177,177 178,178 179,179 180,180 181,181 182,182 183,183 184,184 185,185 186,186 187,187 188,188 189,189 190,190 191,191 192,192 193,193 194,194 195,195 196,196 197,197 198,198 199,199 \ No newline at end of file diff --git a/examples/opf/experiments/anomaly/temporal/saw_200_category/description.py b/examples/opf/experiments/anomaly/temporal/saw_200_category/description.py deleted file mode 100644 index 30edaee65b..0000000000 --- a/examples/opf/experiments/anomaly/temporal/saw_200_category/description.py +++ /dev/null @@ -1,417 +0,0 @@ -# ---------------------------------------------------------------------- -# Numenta Platform for Intelligent Computing (NuPIC) -# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement -# with Numenta, Inc., for a separate license for this software code, the -# following terms and conditions apply: -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU Affero Public License version 3 as -# published by the Free Software Foundation. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -# See the GNU Affero Public License for more details. -# -# You should have received a copy of the GNU Affero Public License -# along with this program. If not, see http://www.gnu.org/licenses. -# -# http://numenta.org/licenses/ -# ---------------------------------------------------------------------- - -""" -Template file used by the OPF Experiment Generator to generate the actual -description.py file by replacing $XXXXXXXX tokens with desired values. - -This description.py file was generated by: -'~/nupic/eng/lib/python2.6/site-packages/nupic/frameworks/opf/expGenerator/ExpGenerator.py' -""" - -from nupic.frameworks.opf.expdescriptionapi import ExperimentDescriptionAPI - -from nupic.frameworks.opf.expdescriptionhelpers import ( - updateConfigFromSubConfig, - applyValueGettersToContainer, - DeferredDictLookup) - -from nupic.frameworks.opf.htmpredictionmodelcallbacks import * -from nupic.frameworks.opf.metrics import MetricSpec -from nupic.frameworks.opf.opfutils import (InferenceType, - InferenceElement) -from nupic.support import aggregationDivide - -from nupic.frameworks.opf.opftaskdriver import ( - IterationPhaseSpecLearnOnly, - IterationPhaseSpecInferOnly, - IterationPhaseSpecLearnAndInfer) - - -# Model Configuration Dictionary: -# -# Define the model parameters and adjust for any modifications if imported -# from a sub-experiment. -# -# These fields might be modified by a sub-experiment; this dict is passed -# between the sub-experiment and base experiment -# -# -# NOTE: Use of DEFERRED VALUE-GETTERs: dictionary fields and list elements -# within the config dictionary may be assigned futures derived from the -# ValueGetterBase class, such as DeferredDictLookup. -# This facility is particularly handy for enabling substitution of values in -# the config dictionary from other values in the config dictionary, which is -# needed by permutation.py-based experiments. These values will be resolved -# during the call to applyValueGettersToContainer(), -# which we call after the base experiment's config dictionary is updated from -# the sub-experiment. See ValueGetterBase and -# DeferredDictLookup for more details about value-getters. -# -# For each custom encoder parameter to be exposed to the sub-experiment/ -# permutation overrides, define a variable in this section, using key names -# beginning with a single underscore character to avoid collisions with -# pre-defined keys (e.g., _dsEncoderFieldName2_N). -# -# Example: -# config = dict( -# _dsEncoderFieldName2_N = 70, -# _dsEncoderFieldName2_W = 5, -# dsEncoderSchema = [ -# base=dict( -# fieldname='Name2', type='ScalarEncoder', -# name='Name2', minval=0, maxval=270, clipInput=True, -# n=DeferredDictLookup('_dsEncoderFieldName2_N'), -# w=DeferredDictLookup('_dsEncoderFieldName2_W')), -# ], -# ) -# updateConfigFromSubConfig(config) -# applyValueGettersToContainer(config) -config = { - # Type of model that the rest of these parameters apply to. - 'model': "HTMPrediction", - - # Version that specifies the format of the config. - 'version': 1, - - # Intermediate variables used to compute fields in modelParams and also - # referenced from the control section. - 'aggregationInfo': { 'fields': [ ('numericFieldNameA', 'mean'), - ('numericFieldNameB', 'sum'), - ('categoryFieldNameC', 'first')], - 'hours': 0}, - - 'predictAheadTime': None, - - # Model parameter dictionary. - 'modelParams': { - # The type of inference that this model will perform - 'inferenceType': 'TemporalAnomaly', - - 'sensorParams': { - # Sensor diagnostic output verbosity control; - # if > 0: sensor region will print out on screen what it's sensing - # at each step 0: silent; >=1: some info; >=2: more info; - # >=3: even more info (see compute() in py/regions/RecordSensor.py) - 'verbosity' : 0, - - # Example: - # dsEncoderSchema = [ - # DeferredDictLookup('__field_name_encoder'), - # ], - # - # (value generated from DS_ENCODER_SCHEMA) - 'encoders': { - 'f': dict(fieldname='f', n=300, name='f', type='SDRCategoryEncoder', w=21), - }, - - # A dictionary specifying the period for automatically-generated - # resets from a RecordSensor; - # - # None = disable automatically-generated resets (also disabled if - # all of the specified values evaluate to 0). - # Valid keys is the desired combination of the following: - # days, hours, minutes, seconds, milliseconds, microseconds, weeks - # - # Example for 1.5 days: sensorAutoReset = dict(days=1,hours=12), - # - # (value generated from SENSOR_AUTO_RESET) - 'sensorAutoReset' : None, - }, - - 'spEnable': True, - - 'spParams': { - # SP diagnostic output verbosity control; - # 0: silent; >=1: some info; >=2: more info; - 'spVerbosity' : 0, - - 'globalInhibition': 1, - - # Number of cell columns in the cortical region (same number for - # SP and TP) - # (see also tpNCellsPerCol) - 'columnCount': 2048, - - 'inputWidth': 0, - - # SP inhibition control (absolute value); - # Maximum number of active columns in the SP region's output (when - # there are more, the weaker ones are suppressed) - 'numActiveColumnsPerInhArea': 40, - - 'seed': 1956, - - # potentialPct - # What percent of the columns's receptive field is available - # for potential synapses. At initialization time, we will - # choose potentialPct * (2*potentialRadius+1)^2 - 'potentialPct': 0.5, - - # The default connected threshold. Any synapse whose - # permanence value is above the connected threshold is - # a "connected synapse", meaning it can contribute to the - # cell's firing. Typical value is 0.10. Cells whose activity - # level before inhibition falls below minDutyCycleBeforeInh - # will have their own internal synPermConnectedCell - # threshold set below this default value. - # (This concept applies to both SP and TP and so 'cells' - # is correct here as opposed to 'columns') - 'synPermConnected': 0.1, - - 'synPermActiveInc': 0.1, - - 'synPermInactiveDec': 0.01, - }, - - # Controls whether TP is enabled or disabled; - # TP is necessary for making temporal predictions, such as predicting - # the next inputs. Without TP, the model is only capable of - # reconstructing missing sensor inputs (via SP). - 'tmEnable' : True, - - 'tmParams': { - # TP diagnostic output verbosity control; - # 0: silent; [1..6]: increasing levels of verbosity - # (see verbosity in nupic/trunk/py/nupic/research/TP.py and TP10X*.py) - 'verbosity': 0, - - # Number of cell columns in the cortical region (same number for - # SP and TP) - # (see also tpNCellsPerCol) - 'columnCount': 2048, - - # The number of cells (i.e., states), allocated per column. - 'cellsPerColumn': 32, - - 'inputWidth': 2048, - - 'seed': 1960, - - # Temporal Pooler implementation selector (see _getTPClass in - # CLARegion.py). - 'temporalImp': 'cpp', - - # New Synapse formation count - # NOTE: If None, use spNumActivePerInhArea - # - # TODO: need better explanation - 'newSynapseCount': 20, - - # Maximum number of synapses per segment - # > 0 for fixed-size CLA - # -1 for non-fixed-size CLA - # - # TODO: for Ron: once the appropriate value is placed in TP - # constructor, see if we should eliminate this parameter from - # description.py. - 'maxSynapsesPerSegment': 32, - - # Maximum number of segments per cell - # > 0 for fixed-size CLA - # -1 for non-fixed-size CLA - # - # TODO: for Ron: once the appropriate value is placed in TP - # constructor, see if we should eliminate this parameter from - # description.py. - 'maxSegmentsPerCell': 128, - - # Initial Permanence - # TODO: need better explanation - 'initialPerm': 0.21, - - # Permanence Increment - 'permanenceInc': 0.1, - - # Permanence Decrement - # If set to None, will automatically default to tpPermanenceInc - # value. - 'permanenceDec' : 0.1, - - 'globalDecay': 0.0, - - 'maxAge': 0, - - # Minimum number of active synapses for a segment to be considered - # during search for the best-matching segments. - # None=use default - # Replaces: tpMinThreshold - 'minThreshold': 12, - - # Segment activation threshold. - # A segment is active if it has >= tpSegmentActivationThreshold - # connected synapses that are active due to infActiveState - # None=use default - # Replaces: tpActivationThreshold - 'activationThreshold': 16, - - 'outputType': 'normal', - - # "Pay Attention Mode" length. This tells the TP how many new - # elements to append to the end of a learned sequence at a time. - # Smaller values are better for datasets with short sequences, - # higher values are better for datasets with long sequences. - 'pamLength': 1, - }, - - 'clParams': { - # Classifier implementation selection. - 'implementation': 'py', - - 'regionName' : 'SDRClassifierRegion', - - # Classifier diagnostic output verbosity control; - # 0: silent; [1..6]: increasing levels of verbosity - 'verbosity' : 0, - - # This controls how fast the classifier learns/forgets. Higher values - # make it adapt faster and forget older patterns faster. - 'alpha': 0.001, - - # This is set after the call to updateConfigFromSubConfig and is - # computed from the aggregationInfo and predictAheadTime. - 'steps': '1', - - - }, - - 'trainSPNetOnlyIfRequested': False, - }, - - -} -# end of config dictionary - - -# Adjust base config dictionary for any modifications if imported from a -# sub-experiment -updateConfigFromSubConfig(config) - - -# Compute predictionSteps based on the predictAheadTime and the aggregation -# period, which may be permuted over. -if config['predictAheadTime'] is not None: - predictionSteps = int(round(aggregationDivide( - config['predictAheadTime'], config['aggregationInfo']))) - assert (predictionSteps >= 1) - config['modelParams']['clParams']['steps'] = str(predictionSteps) - - -# Adjust config by applying ValueGetterBase-derived -# futures. NOTE: this MUST be called after updateConfigFromSubConfig() in order -# to support value-getter-based substitutions from the sub-experiment (if any) -applyValueGettersToContainer(config) - - - -# [optional] A sequence of one or more tasks that describe what to do with the -# model. Each task consists of a task label, an input spec., iteration count, -# and a task-control spec per opfTaskSchema.json -# -# NOTE: The tasks are intended for OPF clients that make use of OPFTaskDriver. -# Clients that interact with OPFExperiment directly do not make use of -# the tasks specification. -# -control = dict( - environment='opfExperiment', - -tasks = [ - { - # Task label; this label string may be used for diagnostic logging and for - # constructing filenames or directory pathnames for task-specific files, etc. - 'taskLabel' : "Anomaly", - - # Input stream specification per py/nupic/cluster/database/StreamDef.json. - # - 'dataset' : { - 'info': 'test_NoProviders', - 'version': 1, - - 'streams': [ - { - 'columns': ['*'], - 'info': 'my simple dataset', - 'source': u'file://'+os.path.join(os.path.dirname(__file__), 'data.csv'), - } - ], - - # TODO: Aggregation is not supported yet by run_opf_experiment.py - #'aggregation' : config['aggregationInfo'] - }, - - # Iteration count: maximum number of iterations. Each iteration corresponds - # to one record from the (possibly aggregated) dataset. The task is - # terminated when either number of iterations reaches iterationCount or - # all records in the (possibly aggregated) database have been processed, - # whichever occurs first. - # - # iterationCount of -1 = iterate over the entire dataset - 'iterationCount' : -1, - - - # Task Control parameters for OPFTaskDriver (per opfTaskControlSchema.json) - 'taskControl' : { - - # Iteration cycle list consisting of opftaskdriver.IterationPhaseSpecXXXXX - # instances. - 'iterationCycle' : [ - #IterationPhaseSpecLearnOnly(1000), - IterationPhaseSpecLearnAndInfer(1000, inferenceArgs=None), - #IterationPhaseSpecInferOnly(10, inferenceArgs=None), - ], - - 'metrics' : [ - ], - - # Logged Metrics: A sequence of regular expressions that specify which of - # the metrics from the Inference Specifications section MUST be logged for - # every prediction. The regex's correspond to the automatically generated - # metric labels. This is similar to the way the optimization metric is - # specified in permutations.py. - 'loggedMetrics': ['.*nupicScore.*'], - - - # Callbacks for experimentation/research (optional) - 'callbacks' : { - # Callbacks to be called at the beginning of a task, before model iterations. - # Signature: callback(); returns nothing -# 'setup' : [htmPredictionModelControlEnableSPLearningCb, htmPredictionModelControlEnableTPLearningCb], -# 'setup' : [htmPredictionModelControlDisableTPLearningCb], - 'setup' : [], - - # Callbacks to be called after every learning/inference iteration - # Signature: callback(); returns nothing - 'postIter' : [], - - # Callbacks to be called when the experiment task is finished - # Signature: callback(); returns nothing - 'finish' : [] - } - } # End of taskControl - }, # End of task -] - -) - - - -descriptionInterface = ExperimentDescriptionAPI(modelConfig=config, - control=control) diff --git a/examples/opf/experiments/classification/category_SP_0/UNDER_DEVELOPMENT b/examples/opf/experiments/classification/category_SP_0/UNDER_DEVELOPMENT deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/examples/opf/experiments/classification/category_SP_0/description.py b/examples/opf/experiments/classification/category_SP_0/description.py deleted file mode 100644 index 6719b05bb4..0000000000 --- a/examples/opf/experiments/classification/category_SP_0/description.py +++ /dev/null @@ -1,40 +0,0 @@ -# ---------------------------------------------------------------------- -# Numenta Platform for Intelligent Computing (NuPIC) -# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement -# with Numenta, Inc., for a separate license for this software code, the -# following terms and conditions apply: -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU Affero Public License version 3 as -# published by the Free Software Foundation. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -# See the GNU Affero Public License for more details. -# -# You should have received a copy of the GNU Affero Public License -# along with this program. If not, see http://www.gnu.org/licenses. -# -# http://numenta.org/licenses/ -# ---------------------------------------------------------------------- - -## This file defines parameters for a prediction experiment. - -import os -from nupic.frameworks.opf.expdescriptionhelpers import importBaseDescription - -# the sub-experiment configuration -config = \ -{ - 'dataSource': 'file://' + os.path.join(os.path.dirname(__file__), - '../datasets/category_SP_0.csv'), - 'modelParams': { 'clParams': { 'verbosity': 1}, - 'inferenceType': 'NontemporalClassification', - 'sensorParams': { 'encoders': { }, 'verbosity': 1}, - 'spParams': { 'spVerbosity': 1 }, - 'tmEnable': False, - 'tmParams': { }}} - -mod = importBaseDescription('../base_category/description.py', config) -locals().update(mod.__dict__) diff --git a/examples/opf/experiments/classification/category_SP_1/UNDER_DEVELOPMENT b/examples/opf/experiments/classification/category_SP_1/UNDER_DEVELOPMENT deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/examples/opf/experiments/classification/category_SP_1/description.py b/examples/opf/experiments/classification/category_SP_1/description.py deleted file mode 100644 index 0ef0ec94ad..0000000000 --- a/examples/opf/experiments/classification/category_SP_1/description.py +++ /dev/null @@ -1,40 +0,0 @@ -# ---------------------------------------------------------------------- -# Numenta Platform for Intelligent Computing (NuPIC) -# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement -# with Numenta, Inc., for a separate license for this software code, the -# following terms and conditions apply: -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU Affero Public License version 3 as -# published by the Free Software Foundation. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -# See the GNU Affero Public License for more details. -# -# You should have received a copy of the GNU Affero Public License -# along with this program. If not, see http://www.gnu.org/licenses. -# -# http://numenta.org/licenses/ -# ---------------------------------------------------------------------- - -## This file defines parameters for a prediction experiment. - -import os -from nupic.frameworks.opf.expdescriptionhelpers import importBaseDescription - -# the sub-experiment configuration -config = \ -{ - 'dataSource': 'file://' + os.path.join(os.path.dirname(__file__), - '../datasets/category_SP_1.csv'), - 'modelParams': { 'clParams': { 'verbosity': 0}, - 'inferenceType': 'NontemporalClassification', - 'sensorParams': { 'encoders': { }, 'verbosity': 0}, - 'spParams': { }, - 'tmEnable': False, - 'tmParams': { }}} - -mod = importBaseDescription('../base_category/description.py', config) -locals().update(mod.__dict__)