diff --git a/CHANGELOG.md b/CHANGELOG.md index e1c8cda9aa..dfe3a0e36f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -125,7 +125,7 @@ * Updated SDR classifier internals * calculate raw anomly score in KNNAnomalyClassifier * removes anomaly.py dependency in network_api_demo.py -* changes how TPRegion computes prevPredictdColumns and updates clamodel +* changes how TMRegion computes prevPredictdColumns and updates clamodel * Install pip from local copy, other simplifications * Fixup PYTHONPATH to properly include previously-defined PYTHONPATH * adds pseudocode to core functions @@ -250,8 +250,8 @@ * Change temporalImp to tm_py for both networks and add comment about it being a temporary value until C++ TM is implemented * Refactored to remove common code between network_checkpoint_test.py and temporal_memory_compatibility_test.py * Use named constants from nupic.data.fieldmeta in aggregator module instead of naked constants. -* Fix AttributeError: 'TPShim' object has no attribute 'topDownCompute' -* Support more parameters in TPShim +* Fix AttributeError: 'TMShim' object has no attribute 'topDownCompute' +* Support more parameters in TMShim * Serialize remaining fields in CLAModel using capnproto * Enforce pyproj==1.9.3 in requirements.txt * Use FastCLAClassifier read class method instead of instance method @@ -394,12 +394,12 @@ * Merge remote-tracking branch 'upstream/master' * Rename testconsoleprinter_output.txt so as to not be picked up by py.test as a test during discovery * likelihood test: fix raw-value must be int -* Fix broken TPShim -* Revert "Fix TP Shim" +* Fix broken TMShim +* Revert "Fix TM Shim" * Anomaly serialization verify complex anomaly instance * Likelihood pickle serialization test * MovingAverage pickle serialization test -* Fix TP Shim +* Fix TM Shim * Removed stripUnlearnedColumns-from-SPRegion * Updated comment describing activeArray paramater of stripUnlearnedColumns method in SP * Revert "MovingAvera: remove unused pickle serialization method" @@ -482,7 +482,7 @@ * Remove FDRCSpatial2.py * Replace the use of FDRCSpatial2 to SpatialPooler * SP profile implemented from tp_large -* TP profile: can use args from command-line, random data used +* TM profile: can use args from command-line, random data used * Adds AnomalyRegion for computing the raw anomaly score. Updates the network api example to use the new anomaly region. Updates PyRegion to have better error messages. * Remove FlatSpatialPooler * Add delete segment/synapse functionality to Connections data structure diff --git a/ci/travis/script-run-examples.sh b/ci/travis/script-run-examples.sh index c51b704a15..20a39bb855 100755 --- a/ci/travis/script-run-examples.sh +++ b/ci/travis/script-run-examples.sh @@ -31,7 +31,7 @@ python ${NUPIC}/examples/bindings/sparse_matrix_how_to.py || exit # examples/opf (run at least 1 from each category) python ${NUPIC}/scripts/run_opf_experiment.py ${NUPIC}/examples/opf/experiments/anomaly/spatial/2field_few_skewed/ || exit python ${NUPIC}/scripts/run_opf_experiment.py ${NUPIC}/examples/opf/experiments/anomaly/temporal/saw_200/ || exit -python ${NUPIC}/scripts/run_opf_experiment.py ${NUPIC}/examples/opf/experiments/classification/category_TP_1/ || exit +python ${NUPIC}/scripts/run_opf_experiment.py ${NUPIC}/examples/opf/experiments/classification/category_TM_1/ || exit python ${NUPIC}/scripts/run_opf_experiment.py ${NUPIC}/examples/opf/experiments/missing_record/simple_0/ || exit python ${NUPIC}/scripts/run_opf_experiment.py ${NUPIC}/examples/opf/experiments/multistep/hotgym/ || exit python ${NUPIC}/scripts/run_opf_experiment.py ${NUPIC}/examples/opf/experiments/opfrunexperiment_test/simpleOPF/hotgym_1hr_agg/ || exit @@ -39,6 +39,6 @@ python ${NUPIC}/scripts/run_opf_experiment.py ${NUPIC}/examples/opf/experiments/ # opf/experiments/params - skip now python ${NUPIC}/scripts/run_opf_experiment.py ${NUPIC}/examples/opf/experiments/spatial_classification/category_1/ || exit -# examples/tp -python ${NUPIC}/examples/tp/hello_tm.py || exit -python ${NUPIC}/examples/tp/tp_test.py || exit +# examples/tm +python ${NUPIC}/examples/tm/hello_tm.py || exit +python ${NUPIC}/examples/tm/tm_test.py || exit diff --git a/docs/README.md b/docs/README.md index 75166c99b7..771ca93489 100644 --- a/docs/README.md +++ b/docs/README.md @@ -135,12 +135,12 @@ nupic │   ├── SPRegion.py [TODO] │   ├── SVMClassifierNode.py [TODO] │   ├── Spec.py [TODO] -│   ├── TPRegion.py [TODO] +│   ├── TMRegion.py [TODO] │   ├── TestRegion.py [TODO] │   └─── UnimportableNode.py [TODO] ├── research -│   ├── TP.py [TODO] -│   ├── TP10X2.py [TODO] +│   ├── BacktrackingTM.py [TODO] +│   ├── BacktrackingTMCPP.py [TODO] │   ├── TP_shim.py [TODO] │   ├── connections.py [TODO] │   ├── fdrutilities.py [TODO] diff --git a/docs/examples/network/complete-example.py b/docs/examples/network/complete-example.py index ef761fe279..509837f917 100644 --- a/docs/examples/network/complete-example.py +++ b/docs/examples/network/complete-example.py @@ -70,7 +70,7 @@ def createNetwork(dataSource): # Add SP and TM regions. network.addRegion("SP", "py.SPRegion", json.dumps(modelParams["spParams"])) - network.addRegion("TM", "py.TPRegion", json.dumps(modelParams["tmParams"])) + network.addRegion("TM", "py.TMRegion", json.dumps(modelParams["tmParams"])) # Add a classifier region. clName = "py.%s" % modelParams["clParams"].pop("regionName") diff --git a/docs/source/api/network/regions.rst b/docs/source/api/network/regions.rst index d979693c04..64876e3928 100644 --- a/docs/source/api/network/regions.rst +++ b/docs/source/api/network/regions.rst @@ -23,10 +23,10 @@ SPRegion :members: :show-inheritance: -TPRegion +TMRegion ^^^^^^^^^^^^^ -.. autoclass:: nupic.regions.TPRegion.TPRegion +.. autoclass:: nupic.regions.TMRegion.TMRegion :members: :show-inheritance: diff --git a/docs/source/guides/anomaly-detection.md b/docs/source/guides/anomaly-detection.md index 927a0a13c4..4e390f903f 100644 --- a/docs/source/guides/anomaly-detection.md +++ b/docs/source/guides/anomaly-detection.md @@ -4,17 +4,17 @@ This technical note describes how the anomaly score is implemented and incorpora The anomaly score enables the CLA to provide a metric representing the degree to which each record is predictable. For example, if you have temporal anomaly model that is predicting the energy consumption of a building, each record will have an anomaly score between zero and one. A zero represents a completely predicted value whereas a one represents a completely anomalous value. -The anomaly score feature of CLA is implemented on top of the core spatial and temporal pooler, and don’t require any spatial pooler and temporal pooler algorithm changes. +The anomaly score feature of CLA is implemented on top of the core spatial and temporal memory, and don’t require any spatial pooler and temporal memory algorithm changes. ## TemporalAnomaly model ### Description -The user must specify the model as a TemporalAnomaly type to have the model report the anomaly score. The anomaly score uses the temporal pooler to detect novel points in sequences. This will detect both novel input patterns (because they have not been seen in any sequence) as well as old spatial patterns that occur in a novel context. +The user must specify the model as a TemporalAnomaly type to have the model report the anomaly score. The anomaly score uses the temporal memory to detect novel points in sequences. This will detect both novel input patterns (because they have not been seen in any sequence) as well as old spatial patterns that occur in a novel context. ### Computation -A TemporalAnomaly model calculates the anomaly score based on the correctness of the previous prediction. This is calculated as the percentage of active spatial pooler columns that were incorrectly predicted by the temporal pooler. +A TemporalAnomaly model calculates the anomaly score based on the correctness of the previous prediction. This is calculated as the percentage of active spatial pooler columns that were incorrectly predicted by the temporal memory. The algorithm for the anomaly score is as follows: @@ -59,7 +59,7 @@ There were also some attempts at adding anomaly detection that are "non-temporal ### Computation -Since NontemporalAnomaly models have no temporal pooler, the anomaly score is based on the state within the spatial pooler. +Since NontemporalAnomaly models have no temporal memory, the anomaly score is based on the state within the spatial pooler. To compute the nontemporal anomaly score, we first compute the "match" score for each winning column after inhibition @@ -77,4 +77,4 @@ The purpose of this anomaly score was to detect input records that represented n ### Results -This algorithm was run on some artificial datasets. However, the results were not very promising, and this approach was abandoned. From a theoretical perspective the temporal anomaly detection technique is a superset of this technique. If a static pattern by itself is novel, by definition the temporal pooler won't make good predictions and hence the temporal anomaly score should be high. As such there was not too much interest in pursuing this route. +This algorithm was run on some artificial datasets. However, the results were not very promising, and this approach was abandoned. From a theoretical perspective the temporal anomaly detection technique is a superset of this technique. If a static pattern by itself is novel, by definition the temporal memory won't make good predictions and hence the temporal anomaly score should be high. As such there was not too much interest in pursuing this route. diff --git a/docs/source/guides/swarming/index.rst b/docs/source/guides/swarming/index.rst index 1919479d5d..97b8507c28 100644 --- a/docs/source/guides/swarming/index.rst +++ b/docs/source/guides/swarming/index.rst @@ -4,7 +4,7 @@ Swarming Swarming is a process that automatically determines the best model for a given dataset. By "best", we mean the model that most accurately produces the desired output. Swarming figures out which optional components should go - into a model (encoders, spatial pooler, temporal pooler, classifier, etc.), + into a model (encoders, spatial pooler, temporal memory, classifier, etc.), as well as the best parameter values to use for each component. We have plans to replace the current swarming library with a more universal diff --git a/docs/source/guides/swarming/running.md b/docs/source/guides/swarming/running.md index 33130ded19..e4daa8ce82 100644 --- a/docs/source/guides/swarming/running.md +++ b/docs/source/guides/swarming/running.md @@ -2,7 +2,7 @@ This document contains detailed instructions for configuring and running swarms. Please see the document [Swarming Algorithm](Swarming-Algorithm) for a description of the underlying swarming algorithm. -Swarming is a process that automatically determines the best model for a given dataset. By "best", we mean the model that most accurately produces the desired output. Swarming figures out which optional components should go into a model (encoders, spatial pooler, temporal pooler, classifier, etc.), as well as the best parameter values to use for each component. +Swarming is a process that automatically determines the best model for a given dataset. By "best", we mean the model that most accurately produces the desired output. Swarming figures out which optional components should go into a model (encoders, spatial pooler, temporal memory, classifier, etc.), as well as the best parameter values to use for each component. When you run a swarm, you provide the following information: * A dataset to optimize over (a .csv file containing the inputs and desired output). diff --git a/examples/.ipynb_checkpoints/NuPIC Walkthrough-checkpoint.ipynb b/examples/.ipynb_checkpoints/NuPIC Walkthrough-checkpoint.ipynb new file mode 100644 index 0000000000..dde7e48b59 --- /dev/null +++ b/examples/.ipynb_checkpoints/NuPIC Walkthrough-checkpoint.ipynb @@ -0,0 +1,2051 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "deletable": true, + "editable": true + }, + "source": [ + "# Encoders\n", + "\n", + "* Scalar\n", + "* Date/time\n", + "* Category\n", + "* Multi" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": { + "collapsed": false, + "deletable": true, + "editable": true + }, + "outputs": [], + "source": [ + "import numpy" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": { + "collapsed": false, + "deletable": true, + "editable": true + }, + "outputs": [], + "source": [ + "from nupic.encoders import ScalarEncoder\n", + "\n", + "ScalarEncoder?" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": { + "collapsed": false, + "deletable": true, + "editable": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "3 = [1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0]\n", + "4 = [1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0]\n", + "5 = [0 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0]\n" + ] + } + ], + "source": [ + "# 22 bits with 3 active representing values 0 to 100\n", + "# clipInput=True makes values >100 encode the same as 100 (instead of throwing a ValueError)\n", + "# forced=True allows small values for `n` and `w`\n", + "enc = ScalarEncoder(n=22, w=3, minval=2.5, maxval=97.5, clipInput=True, forced=True)\n", + "print \"3 =\", enc.encode(3)\n", + "print \"4 =\", enc.encode(4)\n", + "print \"5 =\", enc.encode(5)" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": { + "collapsed": false, + "deletable": true, + "editable": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "100 = [0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1]\n", + "1000 = [0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1]\n" + ] + } + ], + "source": [ + "# Encode maxval\n", + "print \"100 =\", enc.encode(100)\n", + "# See that any larger number gets the same encoding\n", + "print \"1000 =\", enc.encode(1000)" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": { + "collapsed": false, + "deletable": true, + "editable": true + }, + "outputs": [], + "source": [ + "from nupic.encoders.random_distributed_scalar import RandomDistributedScalarEncoder\n", + "\n", + "RandomDistributedScalarEncoder?" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": { + "collapsed": false, + "deletable": true, + "editable": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "3 = [0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 1 1]\n", + "4 = [0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 1 1]\n", + "5 = [0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 0 1]\n", + "\n", + "100 = [0 1 1 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0]\n", + "1000 = [0 0 0 1 0 0 0 0 1 0 0 0 0 0 0 1 0 0 0 0 0]\n" + ] + } + ], + "source": [ + "# 21 bits with 3 active with buckets of size 5\n", + "rdse = RandomDistributedScalarEncoder(n=21, w=3, resolution=5, offset=2.5)\n", + "\n", + "print \"3 = \", rdse.encode(3)\n", + "print \"4 = \", rdse.encode(4)\n", + "print \"5 = \", rdse.encode(5)\n", + "print\n", + "print \"100 = \", rdse.encode(100)\n", + "print \"1000 =\", rdse.encode(1000)" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": { + "collapsed": false, + "deletable": true, + "editable": true + }, + "outputs": [], + "source": [ + "import datetime\n", + "from nupic.encoders.date import DateEncoder\n", + "\n", + "DateEncoder?" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": { + "collapsed": false, + "deletable": true, + "editable": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "now = [0 0 0 0 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0]\n", + "next month = [0 0 0 0 0 0 1 1 1 1 1 0 0 0 0 0 0 0 0 0]\n", + "xmas = [1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1]\n" + ] + } + ], + "source": [ + "de = DateEncoder(season=5)\n", + "\n", + "now = datetime.datetime.strptime(\"2014-05-02 13:08:58\", \"%Y-%m-%d %H:%M:%S\")\n", + "print \"now = \", de.encode(now)\n", + "nextMonth = datetime.datetime.strptime(\"2014-06-02 13:08:58\", \"%Y-%m-%d %H:%M:%S\")\n", + "print \"next month =\", de.encode(nextMonth)\n", + "xmas = datetime.datetime.strptime(\"2014-12-25 13:08:58\", \"%Y-%m-%d %H:%M:%S\")\n", + "print \"xmas = \", de.encode(xmas)" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": { + "collapsed": false, + "deletable": true, + "editable": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "cat = [0 0 0 1 1 1 0 0 0 0 0 0 0 0 0]\n", + "dog = [0 0 0 0 0 0 1 1 1 0 0 0 0 0 0]\n", + "monkey = [0 0 0 0 0 0 0 0 0 1 1 1 0 0 0]\n", + "slow loris = [0 0 0 0 0 0 0 0 0 0 0 0 1 1 1]\n" + ] + } + ], + "source": [ + "from nupic.encoders.category import CategoryEncoder\n", + "\n", + "categories = (\"cat\", \"dog\", \"monkey\", \"slow loris\")\n", + "encoder = CategoryEncoder(w=3, categoryList=categories, forced=True)\n", + "cat = encoder.encode(\"cat\")\n", + "dog = encoder.encode(\"dog\")\n", + "monkey = encoder.encode(\"monkey\")\n", + "loris = encoder.encode(\"slow loris\")\n", + "print \"cat = \", cat\n", + "print \"dog = \", dog\n", + "print \"monkey = \", monkey\n", + "print \"slow loris =\", loris" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": { + "collapsed": false, + "deletable": true, + "editable": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0]\n" + ] + } + ], + "source": [ + "print encoder.encode(None)" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": { + "collapsed": false, + "deletable": true, + "editable": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[1 1 1 0 0 0 0 0 0 0 0 0 0 0 0]\n" + ] + } + ], + "source": [ + "print encoder.encode(\"unknown\")" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": { + "collapsed": false, + "deletable": true, + "editable": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "({'category': ([(1, 1)], 'cat')}, ['category'])\n" + ] + } + ], + "source": [ + "print encoder.decode(cat)" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": { + "collapsed": false, + "deletable": true, + "editable": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "({'category': ([(1, 2)], 'cat, dog')}, ['category'])\n" + ] + } + ], + "source": [ + "catdog = numpy.array([0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0])\n", + "print encoder.decode(catdog)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "deletable": true, + "editable": true + }, + "source": [ + "# Spatial Pooler" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "metadata": { + "collapsed": false, + "deletable": true, + "editable": true + }, + "outputs": [], + "source": [ + "from nupic.research.spatial_pooler import SpatialPooler\n", + "\n", + "print SpatialPooler?" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "print SpatialPooler" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "metadata": { + "collapsed": false, + "deletable": true, + "editable": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n" + ] + } + ], + "source": [ + "print SpatialPooler" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "metadata": { + "collapsed": false, + "deletable": true, + "editable": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n" + ] + } + ], + "source": [ + "print SpatialPooler" + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "metadata": { + "collapsed": false, + "deletable": true, + "editable": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n" + ] + } + ], + "source": [ + "print SpatialPooler" + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "metadata": { + "collapsed": false, + "deletable": true, + "editable": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n" + ] + } + ], + "source": [ + "print SpatialPooler" + ] + }, + { + "cell_type": "code", + "execution_count": 19, + "metadata": { + "collapsed": false, + "deletable": true, + "editable": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n" + ] + } + ], + "source": [ + "print SpatialPooler" + ] + }, + { + "cell_type": "code", + "execution_count": 20, + "metadata": { + "collapsed": false, + "deletable": true, + "editable": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n" + ] + } + ], + "source": [ + "print SpatialPooler" + ] + }, + { + "cell_type": "code", + "execution_count": 21, + "metadata": { + "collapsed": false, + "deletable": true, + "editable": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n" + ] + } + ], + "source": [ + "print SpatialPooler" + ] + }, + { + "cell_type": "code", + "execution_count": 22, + "metadata": { + "collapsed": false, + "deletable": true, + "editable": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n" + ] + } + ], + "source": [ + "print SpatialPooler" + ] + }, + { + "cell_type": "code", + "execution_count": 23, + "metadata": { + "collapsed": false, + "deletable": true, + "editable": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "15\n", + "[0 0 0 1 1 1 0 0 0 0 0 0 0 0 0]\n" + ] + } + ], + "source": [ + "print len(cat)\n", + "print cat" + ] + }, + { + "cell_type": "code", + "execution_count": 24, + "metadata": { + "collapsed": false, + "deletable": true, + "editable": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[0 0 1 1 1 1 0 0 0 0 1 1 1 1 0]\n", + "[1 0 0 0 1 1 1 1 0 1 0 0 0 1 1]\n", + "[1 1 0 0 0 0 0 1 1 1 1 1 1 0 0]\n", + "[1 1 0 1 1 0 0 1 1 0 1 0 0 1 1]\n" + ] + } + ], + "source": [ + "sp = SpatialPooler(inputDimensions=(15,),\n", + " columnDimensions=(4,),\n", + " potentialRadius=15,\n", + " numActiveColumnsPerInhArea=1,\n", + " globalInhibition=True,\n", + " synPermActiveInc=0.03,\n", + " potentialPct=1.0)\n", + "import numpy\n", + "for column in xrange(4):\n", + " connected = numpy.zeros((15,), dtype=\"int\")\n", + " sp.getConnectedSynapses(column, connected)\n", + " print connected" + ] + }, + { + "cell_type": "code", + "execution_count": 25, + "metadata": { + "collapsed": false, + "deletable": true, + "editable": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[1 0 0 0]\n" + ] + } + ], + "source": [ + "output = numpy.zeros((4,), dtype=\"int\")\n", + "sp.compute(cat, learn=True, activeArray=output)\n", + "print output" + ] + }, + { + "cell_type": "code", + "execution_count": 26, + "metadata": { + "collapsed": false, + "deletable": true, + "editable": true + }, + "outputs": [], + "source": [ + "for _ in xrange(20):\n", + " sp.compute(cat, learn=True, activeArray=output)" + ] + }, + { + "cell_type": "code", + "execution_count": 27, + "metadata": { + "collapsed": false, + "deletable": true, + "editable": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[0 0 1 1 1 1 0 0 0 0 1 1 1 1 0]\n", + "[1 0 0 0 1 1 1 1 0 1 0 0 0 1 1]\n", + "[1 1 0 0 0 0 0 1 1 1 1 1 1 0 0]\n", + "[1 1 0 1 1 0 0 1 1 0 1 0 0 1 1]\n" + ] + } + ], + "source": [ + "for column in xrange(4):\n", + " connected = numpy.zeros((15,), dtype=\"int\")\n", + " sp.getConnectedSynapses(column, connected)\n", + " print connected" + ] + }, + { + "cell_type": "code", + "execution_count": 28, + "metadata": { + "collapsed": false, + "deletable": true, + "editable": true + }, + "outputs": [], + "source": [ + "for _ in xrange(200):\n", + " sp.compute(cat, learn=True, activeArray=output)\n", + " sp.compute(dog, learn=True, activeArray=output)\n", + " sp.compute(monkey, learn=True, activeArray=output)\n", + " sp.compute(loris, learn=True, activeArray=output)" + ] + }, + { + "cell_type": "code", + "execution_count": 29, + "metadata": { + "collapsed": false, + "deletable": true, + "editable": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[0 0 0 1 1 1 0 0 0 0 0 0 0 0 0]\n", + "[1 0 0 0 1 1 1 1 0 1 0 0 0 1 1]\n", + "[0 0 0 0 0 0 0 0 0 1 1 1 0 0 0]\n", + "[0 0 0 0 0 0 1 1 1 0 0 0 1 1 1]\n" + ] + } + ], + "source": [ + "for column in xrange(4):\n", + " connected = numpy.zeros((15,), dtype=\"int\")\n", + " sp.getConnectedSynapses(column, connected)\n", + " print connected" + ] + }, + { + "cell_type": "code", + "execution_count": 30, + "metadata": { + "collapsed": false, + "deletable": true, + "editable": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[0 0 0 1 1 0 1 0 0 0 0 0 0 0 0]\n" + ] + } + ], + "source": [ + "noisyCat = numpy.zeros((15,), dtype=\"uint32\")\n", + "noisyCat[3] = 1\n", + "noisyCat[4] = 1\n", + "# This is part of dog!\n", + "noisyCat[6] = 1\n", + "print noisyCat" + ] + }, + { + "cell_type": "code", + "execution_count": 31, + "metadata": { + "collapsed": false, + "deletable": true, + "editable": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[0 1 0 0]\n" + ] + } + ], + "source": [ + "sp.compute(noisyCat, learn=False, activeArray=output)\n", + "print output # matches cat!" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "deletable": true, + "editable": true + }, + "source": [ + "# Temporal Memory (a.k.a. Sequence Memory, Temporal Pooler)\n", + "\n", + "From: `examples/tm/hello_tm.py`" + ] + }, + { + "cell_type": "code", + "execution_count": 32, + "metadata": { + "collapsed": false, + "deletable": true, + "editable": true + }, + "outputs": [], + "source": [ + "from nupic.research.BacktrackingTM import BacktrackingTM\n", + "\n", + "BacktrackingTM?" + ] + }, + { + "cell_type": "code", + "execution_count": 33, + "metadata": { + "collapsed": false, + "deletable": true, + "editable": true + }, + "outputs": [], + "source": [ + "# Step 1: create Temporal Pooler instance with appropriate parameters\n", + "tm = BacktrackingTM(numberOfCols=50, cellsPerColumn=2,\n", + " initialPerm=0.5, connectedPerm=0.5,\n", + " minThreshold=10, newSynapseCount=10,\n", + " permanenceInc=0.1, permanenceDec=0.0,\n", + " activationThreshold=8,\n", + " globalDecay=0, burnIn=1,\n", + " checkSynapseConsistency=False,\n", + " pamLength=10)" + ] + }, + { + "cell_type": "code", + "execution_count": 34, + "metadata": { + "collapsed": false, + "deletable": true, + "editable": true + }, + "outputs": [], + "source": [ + "# Step 2: create input vectors to feed to the temporal memory. Each input vector\n", + "# must be numberOfCols wide. Here we create a simple sequence of 5 vectors\n", + "# representing the sequence A -> B -> C -> D -> E\n", + "x = numpy.zeros((5, tm.numberOfCols), dtype=\"uint32\")\n", + "x[0,0:10] = 1 # Input SDR representing \"A\", corresponding to columns 0-9\n", + "x[1,10:20] = 1 # Input SDR representing \"B\", corresponding to columns 10-19\n", + "x[2,20:30] = 1 # Input SDR representing \"C\", corresponding to columns 20-29\n", + "x[3,30:40] = 1 # Input SDR representing \"D\", corresponding to columns 30-39\n", + "x[4,40:50] = 1 # Input SDR representing \"E\", corresponding to columns 40-49" + ] + }, + { + "cell_type": "code", + "execution_count": 35, + "metadata": { + "collapsed": false, + "deletable": true, + "editable": true + }, + "outputs": [], + "source": [ + "# Step 3: send this simple sequence to the temporal memory for learning\n", + "# We repeat the sequence 10 times\n", + "for i in range(10):\n", + "\n", + " # Send each letter in the sequence in order\n", + " for j in range(5):\n", + "\n", + " # The compute method performs one step of learning and/or inference. Note:\n", + " # here we just perform learning but you can perform prediction/inference and\n", + " # learning in the same step if you want (online learning).\n", + " tm.compute(x[j], enableLearn = True, computeInfOutput = False)\n", + "\n", + " # This function prints the segments associated with every cell.$$$$\n", + " # If you really want to understand the TP, uncomment this line. By following\n", + " # every step you can get an excellent understanding for exactly how the TP\n", + " # learns.\n", + " #tm.printCells()\n", + "\n", + " # The reset command tells the TM that a sequence just ended and essentially\n", + " # zeros out all the states. It is not strictly necessary but it's a bit\n", + " # messier without resets, and the TM learns quicker with resets.\n", + " tm.reset()" + ] + }, + { + "cell_type": "code", + "execution_count": 36, + "metadata": { + "collapsed": false, + "deletable": true, + "editable": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "\n", + "-------- A -----------\n", + "Raw input vector\n", + "1111111111 0000000000 0000000000 0000000000 0000000000 \n", + "\n", + "All the active and predicted cells:\n", + "\n", + "Inference Active state\n", + "1111111111 0000000000 0000000000 0000000000 0000000000 \n", + "0000000000 0000000000 0000000000 0000000000 0000000000 \n", + "Inference Predicted state\n", + "0000000000 0000000000 0000000000 0000000000 0000000000 \n", + "0000000000 1111111111 0000000000 0000000000 0000000000 \n", + "\n", + "\n", + "The following columns are predicted by the temporal memory. This\n", + "should correspond to columns in the *next* item in the sequence.\n", + "[10 11 12 13 14 15 16 17 18 19] \n", + "\n", + "\n", + "-------- B -----------\n", + "Raw input vector\n", + "0000000000 1111111111 0000000000 0000000000 0000000000 \n", + "\n", + "All the active and predicted cells:\n", + "\n", + "Inference Active state\n", + "0000000000 0000000000 0000000000 0000000000 0000000000 \n", + "0000000000 1111111111 0000000000 0000000000 0000000000 \n", + "Inference Predicted state\n", + "0000000000 0000000000 0000000000 0000000000 0000000000 \n", + "0000000000 0000000000 1111111111 0000000000 0000000000 \n", + "\n", + "\n", + "The following columns are predicted by the temporal memory. This\n", + "should correspond to columns in the *next* item in the sequence.\n", + "[20 21 22 23 24 25 26 27 28 29] \n", + "\n", + "\n", + "-------- C -----------\n", + "Raw input vector\n", + "0000000000 0000000000 1111111111 0000000000 0000000000 \n", + "\n", + "All the active and predicted cells:\n", + "\n", + "Inference Active state\n", + "0000000000 0000000000 0000000000 0000000000 0000000000 \n", + "0000000000 0000000000 1111111111 0000000000 0000000000 \n", + "Inference Predicted state\n", + "0000000000 0000000000 0000000000 0000000000 0000000000 \n", + "0000000000 0000000000 0000000000 1111111111 0000000000 \n", + "\n", + "\n", + "The following columns are predicted by the temporal memory. This\n", + "should correspond to columns in the *next* item in the sequence.\n", + "[30 31 32 33 34 35 36 37 38 39] \n", + "\n", + "\n", + "-------- D -----------\n", + "Raw input vector\n", + "0000000000 0000000000 0000000000 1111111111 0000000000 \n", + "\n", + "All the active and predicted cells:\n", + "\n", + "Inference Active state\n", + "0000000000 0000000000 0000000000 0000000000 0000000000 \n", + "0000000000 0000000000 0000000000 1111111111 0000000000 \n", + "Inference Predicted state\n", + "0000000000 0000000000 0000000000 0000000000 0000000000 \n", + "0000000000 0000000000 0000000000 0000000000 1111111111 \n", + "\n", + "\n", + "The following columns are predicted by the temporal memory. This\n", + "should correspond to columns in the *next* item in the sequence.\n", + "[40 41 42 43 44 45 46 47 48 49] \n", + "\n", + "\n", + "-------- E -----------\n", + "Raw input vector\n", + "0000000000 0000000000 0000000000 0000000000 1111111111 \n", + "\n", + "All the active and predicted cells:\n", + "\n", + "Inference Active state\n", + "0000000000 0000000000 0000000000 0000000000 0000000000 \n", + "0000000000 0000000000 0000000000 0000000000 1111111111 \n", + "Inference Predicted state\n", + "0000000000 0000000000 0000000000 0000000000 0000000000 \n", + "0000000000 0000000000 0000000000 0000000000 0000000000 \n", + "\n", + "\n", + "The following columns are predicted by the temporal memory. This\n", + "should correspond to columns in the *next* item in the sequence.\n", + "[] \n" + ] + } + ], + "source": [ + "# Step 4: send the same sequence of vectors and look at predictions made by\n", + "# temporal memory\n", + "\n", + "# Utility routine for printing the input vector\n", + "def formatRow(x):\n", + " s = ''\n", + " for c in range(len(x)):\n", + " if c > 0 and c % 10 == 0:\n", + " s += ' '\n", + " s += str(x[c])\n", + " s += ' '\n", + " return s\n", + "\n", + "for j in range(5):\n", + " print \"\\n\\n--------\",\"ABCDE\"[j],\"-----------\"\n", + " print \"Raw input vector\\n\",formatRow(x[j])\n", + "\n", + " # Send each vector to the TP, with learning turned off\n", + " tm.compute(x[j], enableLearn=False, computeInfOutput=True)\n", + "\n", + " # This method prints out the active state of each cell followed by the\n", + " # predicted state of each cell. For convenience the cells are grouped\n", + " # 10 at a time. When there are multiple cells per column the printout\n", + " # is arranged so the cells in a column are stacked together\n", + " #\n", + " # What you should notice is that the columns where active state is 1\n", + " # represent the SDR for the current input pattern and the columns where\n", + " # predicted state is 1 represent the SDR for the next expected pattern\n", + " print \"\\nAll the active and predicted cells:\"\n", + " tm.printStates(printPrevious=False, printLearnState=False)\n", + "\n", + " # tm.getPredictedState() gets the predicted cells.\n", + " # predictedCells[c][i] represents the state of the i'th cell in the c'th\n", + " # column. To see if a column is predicted, we can simply take the OR\n", + " # across all the cells in that column. In numpy we can do this by taking\n", + " # the max along axis 1.\n", + " print \"\\n\\nThe following columns are predicted by the temporal memory. This\"\n", + " print \"should correspond to columns in the *next* item in the sequence.\"\n", + " predictedCells = tm.getPredictedState()\n", + " print formatRow(predictedCells.max(axis=1).nonzero())" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "deletable": true, + "editable": true + }, + "source": [ + "# Networks and Regions\n", + "\n", + "See slides." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "deletable": true, + "editable": true + }, + "source": [ + "# Online Prediction Framework\n", + "\n", + "* CLAModel\n", + "* OPF Client\n", + "* Swarming\n", + "\n", + "# CLAModel\n", + "\n", + "From `examples/opf/clients/hotgym/simple/hotgym.py`" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "deletable": true, + "editable": true + }, + "source": [ + "# Model Parameters\n", + "\n", + "`MODEL_PARAMS` have all of the parameters for the CLA model and subcomponents" + ] + }, + { + "cell_type": "code", + "execution_count": 37, + "metadata": { + "collapsed": false, + "deletable": true, + "editable": true + }, + "outputs": [], + "source": [ + "# Model Params!\n", + "MODEL_PARAMS = {\n", + " # Type of model that the rest of these parameters apply to.\n", + " 'model': \"HTMPrediction\",\n", + "\n", + " # Version that specifies the format of the config.\n", + " 'version': 1,\n", + "\n", + " # Intermediate variables used to compute fields in modelParams and also\n", + " # referenced from the control section.\n", + " 'aggregationInfo': { 'days': 0,\n", + " 'fields': [('consumption', 'sum')],\n", + " 'hours': 1,\n", + " 'microseconds': 0,\n", + " 'milliseconds': 0,\n", + " 'minutes': 0,\n", + " 'months': 0,\n", + " 'seconds': 0,\n", + " 'weeks': 0,\n", + " 'years': 0},\n", + "\n", + " 'predictAheadTime': None,\n", + "\n", + " # Model parameter dictionary.\n", + " 'modelParams': {\n", + " # The type of inference that this model will perform\n", + " 'inferenceType': 'TemporalMultiStep',\n", + "\n", + " 'sensorParams': {\n", + " # Sensor diagnostic output verbosity control;\n", + " # if > 0: sensor region will print out on screen what it's sensing\n", + " # at each step 0: silent; >=1: some info; >=2: more info;\n", + " # >=3: even more info (see compute() in py/regions/RecordSensor.py)\n", + " 'verbosity' : 0,\n", + "\n", + " # Include the encoders we use\n", + " 'encoders': {\n", + " u'timestamp_timeOfDay': {\n", + " 'fieldname': u'timestamp',\n", + " 'name': u'timestamp_timeOfDay',\n", + " 'timeOfDay': (21, 0.5),\n", + " 'type': 'DateEncoder'\n", + " },\n", + " u'timestamp_dayOfWeek': None,\n", + " u'timestamp_weekend': None,\n", + " u'consumption': {\n", + " 'clipInput': True,\n", + " 'fieldname': u'consumption',\n", + " 'maxval': 100.0,\n", + " 'minval': 0.0,\n", + " 'n': 50,\n", + " 'name': u'c1',\n", + " 'type': 'ScalarEncoder',\n", + " 'w': 21\n", + " },\n", + " },\n", + "\n", + " # A dictionary specifying the period for automatically-generated\n", + " # resets from a RecordSensor;\n", + " #\n", + " # None = disable automatically-generated resets (also disabled if\n", + " # all of the specified values evaluate to 0).\n", + " # Valid keys is the desired combination of the following:\n", + " # days, hours, minutes, seconds, milliseconds, microseconds, weeks\n", + " #\n", + " # Example for 1.5 days: sensorAutoReset = dict(days=1,hours=12),\n", + " #\n", + " # (value generated from SENSOR_AUTO_RESET)\n", + " 'sensorAutoReset' : None,\n", + " },\n", + "\n", + " 'spEnable': True,\n", + "\n", + " 'spParams': {\n", + " # SP diagnostic output verbosity control;\n", + " # 0: silent; >=1: some info; >=2: more info;\n", + " 'spVerbosity' : 0,\n", + "\n", + " # Spatial Pooler implementation selector, see getSPClass\n", + " # in py/regions/SPRegion.py for details\n", + " # 'py' (default), 'cpp' (speed optimized, new)\n", + " 'spatialImp' : 'cpp',\n", + "\n", + " 'globalInhibition': 1,\n", + "\n", + " # Number of cell columns in the cortical region (same number for\n", + " # SP and TM)\n", + " # (see also tpNCellsPerCol)\n", + " 'columnCount': 2048,\n", + "\n", + " 'inputWidth': 0,\n", + "\n", + " # SP inhibition control (absolute value);\n", + " # Maximum number of active columns in the SP region's output (when\n", + " # there are more, the weaker ones are suppressed)\n", + " 'numActiveColumnsPerInhArea': 40,\n", + "\n", + " 'seed': 1956,\n", + "\n", + " # potentialPct\n", + " # What percent of the columns's receptive field is available\n", + " # for potential synapses. At initialization time, we will\n", + " # choose potentialPct * (2*potentialRadius+1)^2\n", + " 'potentialPct': 0.5,\n", + "\n", + " # The default connected threshold. Any synapse whose\n", + " # permanence value is above the connected threshold is\n", + " # a \"connected synapse\", meaning it can contribute to the\n", + " # cell's firing. Typical value is 0.10. Cells whose activity\n", + " # level before inhibition falls below minDutyCycleBeforeInh\n", + " # will have their own internal synPermConnectedCell\n", + " # threshold set below this default value.\n", + " # (This concept applies to both SP and TM and so 'cells'\n", + " # is correct here as opposed to 'columns')\n", + " 'synPermConnected': 0.1,\n", + "\n", + " 'synPermActiveInc': 0.1,\n", + "\n", + " 'synPermInactiveDec': 0.005,\n", + " },\n", + "\n", + " # Controls whether TM is enabled or disabled;\n", + " # TM is necessary for making temporal predictions, such as predicting\n", + " # the next inputs. Without TP, the model is only capable of\n", + " # reconstructing missing sensor inputs (via SP).\n", + " 'tmEnable' : True,\n", + "\n", + " 'tmParams': {\n", + " # TM diagnostic output verbosity control;\n", + " # 0: silent; [1..6]: increasing levels of verbosity\n", + " # (see verbosity in nupic/trunk/py/nupic/research/TP.py and BacktrackingTMCPP.py)\n", + " 'verbosity': 0,\n", + "\n", + " # Number of cell columns in the cortical region (same number for\n", + " # SP and TM)\n", + " # (see also tpNCellsPerCol)\n", + " 'columnCount': 2048,\n", + "\n", + " # The number of cells (i.e., states), allocated per column.\n", + " 'cellsPerColumn': 32,\n", + "\n", + " 'inputWidth': 2048,\n", + "\n", + " 'seed': 1960,\n", + "\n", + " # Temporal Pooler implementation selector (see _getTPClass in\n", + " # CLARegion.py).\n", + " 'temporalImp': 'cpp',\n", + "\n", + " # New Synapse formation count\n", + " # NOTE: If None, use spNumActivePerInhArea\n", + " #\n", + " # TODO: need better explanation\n", + " 'newSynapseCount': 20,\n", + "\n", + " # Maximum number of synapses per segment\n", + " # > 0 for fixed-size CLA\n", + " # -1 for non-fixed-size CLA\n", + " #\n", + " # TODO: for Ron: once the appropriate value is placed in TP\n", + " # constructor, see if we should eliminate this parameter from\n", + " # description.py.\n", + " 'maxSynapsesPerSegment': 32,\n", + "\n", + " # Maximum number of segments per cell\n", + " # > 0 for fixed-size CLA\n", + " # -1 for non-fixed-size CLA\n", + " #\n", + " # TODO: for Ron: once the appropriate value is placed in TP\n", + " # constructor, see if we should eliminate this parameter from\n", + " # description.py.\n", + " 'maxSegmentsPerCell': 128,\n", + "\n", + " # Initial Permanence\n", + " # TODO: need better explanation\n", + " 'initialPerm': 0.21,\n", + "\n", + " # Permanence Increment\n", + " 'permanenceInc': 0.1,\n", + "\n", + " # Permanence Decrement\n", + " # If set to None, will automatically default to tpPermanenceInc\n", + " # value.\n", + " 'permanenceDec' : 0.1,\n", + "\n", + " 'globalDecay': 0.0,\n", + "\n", + " 'maxAge': 0,\n", + "\n", + " # Minimum number of active synapses for a segment to be considered\n", + " # during search for the best-matching segments.\n", + " # None=use default\n", + " # Replaces: tpMinThreshold\n", + " 'minThreshold': 9,\n", + "\n", + " # Segment activation threshold.\n", + " # A segment is active if it has >= tpSegmentActivationThreshold\n", + " # connected synapses that are active due to infActiveState\n", + " # None=use default\n", + " # Replaces: tpActivationThreshold\n", + " 'activationThreshold': 12,\n", + "\n", + " 'outputType': 'normal',\n", + "\n", + " # \"Pay Attention Mode\" length. This tells the TM how many new\n", + " # elements to append to the end of a learned sequence at a time.\n", + " # Smaller values are better for datasets with short sequences,\n", + " # higher values are better for datasets with long sequences.\n", + " 'pamLength': 1,\n", + " },\n", + "\n", + " 'clParams': {\n", + " 'regionName' : 'SDRClassifierRegion',\n", + "\n", + " # Classifier diagnostic output verbosity control;\n", + " # 0: silent; [1..6]: increasing levels of verbosity\n", + " 'verbosity' : 0,\n", + "\n", + " # This controls how fast the classifier learns/forgets. Higher values\n", + " # make it adapt faster and forget older patterns faster.\n", + " 'alpha': 0.005,\n", + "\n", + " # This is set after the call to updateConfigFromSubConfig and is\n", + " # computed from the aggregationInfo and predictAheadTime.\n", + " 'steps': '1,5',\n", + "\n", + " 'implementation': 'cpp',\n", + " },\n", + "\n", + " 'trainSPNetOnlyIfRequested': False,\n", + " },\n", + "}" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "deletable": true, + "editable": true + }, + "source": [ + "# Dataset Helpers" + ] + }, + { + "cell_type": "code", + "execution_count": 38, + "metadata": { + "collapsed": false, + "deletable": true, + "editable": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "/Users/mleborgne/_git/nupic/src/nupic/datafiles/extra/hotgym/hotgym.csv\n", + "\n", + "gym,address,timestamp,consumption\n", + "string,string,datetime,float\n", + "S,,T,\n", + "Balgowlah Platinum,Shop 67 197-215 Condamine Street Balgowlah 2093,2010-07-02 00:00:00.0,5.3\n", + "Balgowlah Platinum,Shop 67 197-215 Condamine Street Balgowlah 2093,2010-07-02 00:15:00.0,5.5\n", + "Balgowlah Platinum,Shop 67 197-215 Condamine Street Balgowlah 2093,2010-07-02 00:30:00.0,5.1\n", + "Balgowlah Platinum,Shop 67 197-215 Condamine Street Balgowlah 2093,2010-07-02 00:45:00.0,5.3\n", + "Balgowlah Platinum,Shop 67 197-215 Condamine Street Balgowlah 2093,2010-07-02 01:00:00.0,5.2\n" + ] + } + ], + "source": [ + "from pkg_resources import resource_filename\n", + "\n", + "datasetPath = resource_filename(\"nupic.datafiles\", \"extra/hotgym/hotgym.csv\")\n", + "print datasetPath\n", + "\n", + "with open(datasetPath) as inputFile:\n", + " print\n", + " for _ in xrange(8):\n", + " print inputFile.next().strip()" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "deletable": true, + "editable": true + }, + "source": [ + "# Loading Data\n", + "\n", + "`FileRecordStream` - file reader for the NuPIC file format (CSV with three header rows, understands datetimes)" + ] + }, + { + "cell_type": "code", + "execution_count": 39, + "metadata": { + "collapsed": false, + "deletable": true, + "editable": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "['Balgowlah Platinum', 'Shop 67 197-215 Condamine Street Balgowlah 2093', datetime.datetime(2010, 7, 2, 0, 0), 5.3]\n", + "['Balgowlah Platinum', 'Shop 67 197-215 Condamine Street Balgowlah 2093', datetime.datetime(2010, 7, 2, 0, 15), 5.5]\n", + "['Balgowlah Platinum', 'Shop 67 197-215 Condamine Street Balgowlah 2093', datetime.datetime(2010, 7, 2, 0, 30), 5.1]\n", + "['Balgowlah Platinum', 'Shop 67 197-215 Condamine Street Balgowlah 2093', datetime.datetime(2010, 7, 2, 0, 45), 5.3]\n", + "['Balgowlah Platinum', 'Shop 67 197-215 Condamine Street Balgowlah 2093', datetime.datetime(2010, 7, 2, 1, 0), 5.2]\n" + ] + } + ], + "source": [ + "from nupic.data.file_record_stream import FileRecordStream\n", + "\n", + "def getData():\n", + " return FileRecordStream(datasetPath)\n", + "\n", + "data = getData()\n", + "for _ in xrange(5):\n", + " print data.next()" + ] + }, + { + "cell_type": "code", + "execution_count": 40, + "metadata": { + "collapsed": false, + "deletable": true, + "editable": true + }, + "outputs": [], + "source": [ + "from nupic.frameworks.opf.modelfactory import ModelFactory\n", + "model = ModelFactory.create(MODEL_PARAMS)\n", + "model.enableInference({'predictedField': 'consumption'})" + ] + }, + { + "cell_type": "code", + "execution_count": 41, + "metadata": { + "collapsed": false, + "deletable": true, + "editable": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "input: 5.3\n", + "prediction: 5.3\n", + "input: 5.5\n", + "prediction: 5.5\n", + "input: 5.1\n", + "prediction: 5.36\n", + "input: 5.3\n", + "prediction: 5.1\n", + "input: 5.2\n", + "prediction: 5.342\n", + "input: 5.5\n", + "prediction: 5.2994\n", + "input: 4.5\n", + "prediction: 5.35958\n", + "input: 1.2\n", + "prediction: 4.92\n", + "input: 1.1\n", + "prediction: 1.2\n", + "input: 1.2\n", + "prediction: 1.17\n", + "input: 1.2\n", + "prediction: 1.179\n", + "input: 1.2\n", + "prediction: 1.1853\n", + "input: 1.2\n", + "prediction: 1.18971\n", + "input: 1.2\n", + "prediction: 1.192797\n", + "input: 1.1\n", + "prediction: 1.1949579\n", + "input: 1.2\n", + "prediction: 1.16647053\n", + "input: 1.1\n", + "prediction: 1.176529371\n", + "input: 1.2\n", + "prediction: 1.1535705597\n", + "input: 1.2\n", + "prediction: 1.16749939179\n", + "input: 1.1\n", + "prediction: 1.17724957425\n", + "input: 1.2\n", + "prediction: 1.15407470198\n", + "input: 6.0\n", + "prediction: 1.16785229138\n", + "input: 7.9\n", + "prediction: 5.551706\n", + "input: 8.4\n", + "prediction: 6.2561942\n", + "input: 10.6\n", + "prediction: 6.89933594\n", + "input: 12.4\n", + "prediction: 10.6\n", + "input: 12.1\n", + "prediction: 12.4\n", + "input: 12.4\n", + "prediction: 12.31\n", + "input: 11.4\n", + "prediction: 12.337\n", + "input: 11.2\n", + "prediction: 10.84\n", + "input: 10.8\n", + "prediction: 10.948\n", + "input: 12.0\n", + "prediction: 10.9036\n", + "input: 11.8\n", + "prediction: 11.23252\n", + "input: 11.9\n", + "prediction: 11.402764\n", + "input: 11.4\n", + "prediction: 11.5519348\n", + "input: 11.0\n", + "prediction: 11.50635436\n", + "input: 9.8\n", + "prediction: 11.354448052\n", + "input: 9.8\n", + "prediction: 10.8881136364\n", + "input: 10.8\n", + "prediction: 10.5616795455\n", + "input: 11.1\n", + "prediction: 10.6331756818\n", + "input: 11.1\n", + "prediction: 10.7732229773\n", + "input: 11.0\n", + "prediction: 10.8712560841\n", + "input: 10.7\n", + "prediction: 10.9098792589\n", + "input: 10.6\n", + "prediction: 10.8469154812\n", + "input: 10.3\n", + "prediction: 10.7728408368\n", + "input: 10.1\n", + "prediction: 10.6309885858\n", + "input: 12.9\n", + "prediction: 10.4716920101\n", + "input: 10.5\n", + "prediction: 10.4716920101\n", + "input: 9.7\n", + "prediction: 10.480184407\n", + "input: 9.7\n", + "prediction: 10.2461290849\n", + "input: 9.2\n", + "prediction: 10.0822903594\n", + "input: 9.2\n", + "prediction: 9.81760325161\n", + "input: 9.2\n", + "prediction: 9.63232227613\n", + "input: 9.3\n", + "prediction: 9.50262559329\n", + "input: 9.1\n", + "prediction: 9.4418379153\n", + "input: 9.0\n", + "prediction: 9.33928654071\n", + "input: 8.9\n", + "prediction: 9.2375005785\n", + "input: 9.0\n", + "prediction: 9.13625040495\n", + "input: 8.9\n", + "prediction: 9.09537528346\n", + "input: 8.9\n", + "prediction: 9.03676269843\n", + "input: 9.0\n", + "prediction: 8.9957338889\n", + "input: 9.2\n", + "prediction: 8.99701372223\n", + "input: 10.0\n", + "prediction: 9.05790960556\n", + "input: 10.7\n", + "prediction: 9.34053672389\n", + "input: 8.9\n", + "prediction: 9.74837570672\n", + "input: 9.0\n", + "prediction: 9.49386299471\n", + "input: 9.0\n", + "prediction: 9.34570409629\n", + "input: 9.3\n", + "prediction: 9.24199286741\n", + "input: 9.3\n", + "prediction: 9.25939500718\n", + "input: 9.1\n", + "prediction: 9.27157650503\n", + "input: 9.1\n", + "prediction: 9.22010355352\n", + "input: 9.1\n", + "prediction: 9.18407248746\n", + "input: 9.2\n", + "prediction: 9.15885074122\n", + "input: 9.4\n", + "prediction: 9.17119551886\n", + "input: 9.3\n", + "prediction: 9.2398368632\n", + "input: 9.3\n", + "prediction: 9.25788580424\n", + "input: 9.1\n", + "prediction: 9.27052006297\n", + "input: 9.1\n", + "prediction: 9.21936404408\n", + "input: 11.0\n", + "prediction: 9.18355483085\n", + "input: 9.0\n", + "prediction: 9.7284883816\n", + "input: 8.6\n", + "prediction: 9.50994186712\n", + "input: 3.0\n", + "prediction: 9.50994186712\n", + "input: 1.3\n", + "prediction: 4.344\n", + "input: 1.2\n", + "prediction: 1.20749660397\n", + "input: 1.3\n", + "prediction: 1.20524762278\n", + "input: 1.3\n", + "prediction: 1.23367333594\n", + "input: 1.3\n", + "prediction: 1.25357133516\n", + "input: 1.2\n", + "prediction: 1.26749993461\n", + "input: 1.3\n", + "prediction: 1.24724995423\n", + "input: 1.2\n", + "prediction: 1.26307496796\n", + "input: 1.3\n", + "prediction: 1.24415247757\n", + "input: 1.2\n", + "prediction: 1.2609067343\n", + "input: 1.3\n", + "prediction: 1.24263471401\n", + "input: 1.2\n", + "prediction: 1.25984429981\n", + "input: 1.1\n", + "prediction: 1.24189100987\n", + "input: 2.3\n", + "prediction: 1.19932370691\n", + "input: 5.5\n", + "prediction: 3.7308\n", + "input: 5.5\n", + "prediction: 6.8366746106\n", + "input: 5.8\n", + "prediction: 6.43567222742\n", + "input: 5.7\n", + "prediction: 6.24497055919\n" + ] + } + ], + "source": [ + "data = getData()\n", + "for _ in xrange(100):\n", + " record = dict(zip(data.getFieldNames(), data.next()))\n", + " print \"input: \", record[\"consumption\"]\n", + " result = model.run(record)\n", + " print \"prediction: \", result.inferences[\"multiStepBestPredictions\"][1]" + ] + }, + { + "cell_type": "code", + "execution_count": 42, + "metadata": { + "collapsed": false, + "deletable": true, + "editable": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "5-step prediction: 1.19932370691\n" + ] + } + ], + "source": [ + "print \"5-step prediction: \", result.inferences[\"multiStepBestPredictions\"][5]" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "deletable": true, + "editable": true + }, + "source": [ + "# Anomaly Score" + ] + }, + { + "cell_type": "code", + "execution_count": 43, + "metadata": { + "collapsed": false, + "deletable": true, + "editable": true + }, + "outputs": [], + "source": [ + "# Model Params!\n", + "MODEL_PARAMS = {\n", + " # Type of model that the rest of these parameters apply to.\n", + " 'model': \"HTMPrediction\",\n", + "\n", + " # Version that specifies the format of the config.\n", + " 'version': 1,\n", + "\n", + " # Intermediate variables used to compute fields in modelParams and also\n", + " # referenced from the control section.\n", + " 'aggregationInfo': { 'days': 0,\n", + " 'fields': [('consumption', 'sum')],\n", + " 'hours': 1,\n", + " 'microseconds': 0,\n", + " 'milliseconds': 0,\n", + " 'minutes': 0,\n", + " 'months': 0,\n", + " 'seconds': 0,\n", + " 'weeks': 0,\n", + " 'years': 0},\n", + "\n", + " 'predictAheadTime': None,\n", + "\n", + " # Model parameter dictionary.\n", + " 'modelParams': {\n", + " # The type of inference that this model will perform\n", + " 'inferenceType': 'TemporalAnomaly',\n", + "\n", + " 'sensorParams': {\n", + " # Sensor diagnostic output verbosity control;\n", + " # if > 0: sensor region will print out on screen what it's sensing\n", + " # at each step 0: silent; >=1: some info; >=2: more info;\n", + " # >=3: even more info (see compute() in py/regions/RecordSensor.py)\n", + " 'verbosity' : 0,\n", + "\n", + " # Include the encoders we use\n", + " 'encoders': {\n", + " u'timestamp_timeOfDay': {\n", + " 'fieldname': u'timestamp',\n", + " 'name': u'timestamp_timeOfDay',\n", + " 'timeOfDay': (21, 0.5),\n", + " 'type': 'DateEncoder'},\n", + " u'timestamp_dayOfWeek': None,\n", + " u'timestamp_weekend': None,\n", + " u'consumption': {\n", + " 'clipInput': True,\n", + " 'fieldname': u'consumption',\n", + " 'maxval': 100.0,\n", + " 'minval': 0.0,\n", + " 'n': 50,\n", + " 'name': u'c1',\n", + " 'type': 'ScalarEncoder',\n", + " 'w': 21},},\n", + "\n", + " # A dictionary specifying the period for automatically-generated\n", + " # resets from a RecordSensor;\n", + " #\n", + " # None = disable automatically-generated resets (also disabled if\n", + " # all of the specified values evaluate to 0).\n", + " # Valid keys is the desired combination of the following:\n", + " # days, hours, minutes, seconds, milliseconds, microseconds, weeks\n", + " #\n", + " # Example for 1.5 days: sensorAutoReset = dict(days=1,hours=12),\n", + " #\n", + " # (value generated from SENSOR_AUTO_RESET)\n", + " 'sensorAutoReset' : None,\n", + " },\n", + "\n", + " 'spEnable': True,\n", + "\n", + " 'spParams': {\n", + " # SP diagnostic output verbosity control;\n", + " # 0: silent; >=1: some info; >=2: more info;\n", + " 'spVerbosity' : 0,\n", + "\n", + " # Spatial Pooler implementation selector, see getSPClass\n", + " # in py/regions/SPRegion.py for details\n", + " # 'py' (default), 'cpp' (speed optimized, new)\n", + " 'spatialImp' : 'cpp',\n", + "\n", + " 'globalInhibition': 1,\n", + "\n", + " # Number of cell columns in the cortical region (same number for\n", + " # SP and TM)\n", + " # (see also tpNCellsPerCol)\n", + " 'columnCount': 2048,\n", + "\n", + " 'inputWidth': 0,\n", + "\n", + " # SP inhibition control (absolute value);\n", + " # Maximum number of active columns in the SP region's output (when\n", + " # there are more, the weaker ones are suppressed)\n", + " 'numActiveColumnsPerInhArea': 40,\n", + "\n", + " 'seed': 1956,\n", + "\n", + " # potentialPct\n", + " # What percent of the columns's receptive field is available\n", + " # for potential synapses. At initialization time, we will\n", + " # choose potentialPct * (2*potentialRadius+1)^2\n", + " 'potentialPct': 0.5,\n", + "\n", + " # The default connected threshold. Any synapse whose\n", + " # permanence value is above the connected threshold is\n", + " # a \"connected synapse\", meaning it can contribute to the\n", + " # cell's firing. Typical value is 0.10. Cells whose activity\n", + " # level before inhibition falls below minDutyCycleBeforeInh\n", + " # will have their own internal synPermConnectedCell\n", + " # threshold set below this default value.\n", + " # (This concept applies to both SP and TM and so 'cells'\n", + " # is correct here as opposed to 'columns')\n", + " 'synPermConnected': 0.1,\n", + "\n", + " 'synPermActiveInc': 0.1,\n", + "\n", + " 'synPermInactiveDec': 0.005,\n", + " },\n", + "\n", + " # Controls whether TM is enabled or disabled;\n", + " # TM is necessary for making temporal predictions, such as predicting\n", + " # the next inputs. Without TP, the model is only capable of\n", + " # reconstructing missing sensor inputs (via SP).\n", + " 'tmEnable' : True,\n", + "\n", + " 'tmParams': {\n", + " # TM diagnostic output verbosity control;\n", + " # 0: silent; [1..6]: increasing levels of verbosity\n", + " # (see verbosity in nupic/trunk/py/nupic/research/TP.py and BacktrackingTMCPP.py)\n", + " 'verbosity': 0,\n", + "\n", + " # Number of cell columns in the cortical region (same number for\n", + " # SP and TM)\n", + " # (see also tpNCellsPerCol)\n", + " 'columnCount': 2048,\n", + "\n", + " # The number of cells (i.e., states), allocated per column.\n", + " 'cellsPerColumn': 32,\n", + "\n", + " 'inputWidth': 2048,\n", + "\n", + " 'seed': 1960,\n", + "\n", + " # Temporal Pooler implementation selector (see _getTPClass in\n", + " # CLARegion.py).\n", + " 'temporalImp': 'cpp',\n", + "\n", + " # New Synapse formation count\n", + " # NOTE: If None, use spNumActivePerInhArea\n", + " #\n", + " # TODO: need better explanation\n", + " 'newSynapseCount': 20,\n", + "\n", + " # Maximum number of synapses per segment\n", + " # > 0 for fixed-size CLA\n", + " # -1 for non-fixed-size CLA\n", + " #\n", + " # TODO: for Ron: once the appropriate value is placed in TP\n", + " # constructor, see if we should eliminate this parameter from\n", + " # description.py.\n", + " 'maxSynapsesPerSegment': 32,\n", + "\n", + " # Maximum number of segments per cell\n", + " # > 0 for fixed-size CLA\n", + " # -1 for non-fixed-size CLA\n", + " #\n", + " # TODO: for Ron: once the appropriate value is placed in TP\n", + " # constructor, see if we should eliminate this parameter from\n", + " # description.py.\n", + " 'maxSegmentsPerCell': 128,\n", + "\n", + " # Initial Permanence\n", + " # TODO: need better explanation\n", + " 'initialPerm': 0.21,\n", + "\n", + " # Permanence Increment\n", + " 'permanenceInc': 0.1,\n", + "\n", + " # Permanence Decrement\n", + " # If set to None, will automatically default to tpPermanenceInc\n", + " # value.\n", + " 'permanenceDec' : 0.1,\n", + "\n", + " 'globalDecay': 0.0,\n", + "\n", + " 'maxAge': 0,\n", + "\n", + " # Minimum number of active synapses for a segment to be considered\n", + " # during search for the best-matching segments.\n", + " # None=use default\n", + " # Replaces: tpMinThreshold\n", + " 'minThreshold': 9,\n", + "\n", + " # Segment activation threshold.\n", + " # A segment is active if it has >= tpSegmentActivationThreshold\n", + " # connected synapses that are active due to infActiveState\n", + " # None=use default\n", + " # Replaces: tpActivationThreshold\n", + " 'activationThreshold': 12,\n", + "\n", + " 'outputType': 'normal',\n", + "\n", + " # \"Pay Attention Mode\" length. This tells the TM how many new\n", + " # elements to append to the end of a learned sequence at a time.\n", + " # Smaller values are better for datasets with short sequences,\n", + " # higher values are better for datasets with long sequences.\n", + " 'pamLength': 1,\n", + " },\n", + "\n", + " 'clParams': {\n", + " 'regionName' : 'SDRClassifierRegion',\n", + "\n", + " # Classifier diagnostic output verbosity control;\n", + " # 0: silent; [1..6]: increasing levels of verbosity\n", + " 'verbosity' : 0,\n", + "\n", + " # This controls how fast the classifier learns/forgets. Higher values\n", + " # make it adapt faster and forget older patterns faster.\n", + " 'alpha': 0.005,\n", + "\n", + " # This is set after the call to updateConfigFromSubConfig and is\n", + " # computed from the aggregationInfo and predictAheadTime.\n", + " 'steps': '1',\n", + "\n", + " 'implementation': 'cpp',\n", + " },\n", + "\n", + " 'anomalyParams': {\n", + " u'anomalyCacheRecords': None,\n", + " u'autoDetectThreshold': None,\n", + " u'autoDetectWaitRecords': 2184\n", + " },\n", + "\n", + " 'trainSPNetOnlyIfRequested': False,\n", + " },\n", + "}" + ] + }, + { + "cell_type": "code", + "execution_count": 44, + "metadata": { + "collapsed": false, + "deletable": true, + "editable": true + }, + "outputs": [], + "source": [ + "from nupic.frameworks.opf.modelfactory import ModelFactory\n", + "model = ModelFactory.create(MODEL_PARAMS)\n", + "model.enableInference({'predictedField': 'consumption'})" + ] + }, + { + "cell_type": "code", + "execution_count": 45, + "metadata": { + "collapsed": false, + "deletable": true, + "editable": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "input: 5.3\n", + "prediction: 5.3\n", + "input: 5.5\n", + "prediction: 5.5\n", + "input: 5.1\n", + "prediction: 5.36\n", + "input: 5.3\n", + "prediction: 5.1\n", + "input: 5.2\n", + "prediction: 5.342\n" + ] + } + ], + "source": [ + "data = getData()\n", + "for _ in xrange(5):\n", + " record = dict(zip(data.getFieldNames(), data.next()))\n", + " print \"input: \", record[\"consumption\"]\n", + " result = model.run(record)\n", + " print \"prediction: \", result.inferences[\"multiStepBestPredictions\"][1]" + ] + }, + { + "cell_type": "code", + "execution_count": 46, + "metadata": { + "collapsed": false, + "deletable": true, + "editable": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "ModelResult(\tpredictionNumber=4\n", + "\trawInput={'timestamp': datetime.datetime(2010, 7, 2, 1, 0), 'gym': 'Balgowlah Platinum', 'consumption': 5.2, 'address': 'Shop 67 197-215 Condamine Street Balgowlah 2093'}\n", + "\tsensorInput=SensorInput(\tdataRow=(5.2, 1.0)\n", + "\tdataDict={'timestamp': datetime.datetime(2010, 7, 2, 1, 0), 'gym': 'Balgowlah Platinum', 'consumption': 5.2, 'address': 'Shop 67 197-215 Condamine Street Balgowlah 2093'}\n", + "\tdataEncodings=[array([ 0., 0., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,\n", + " 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 0., 0., 0.,\n", + " 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n", + " 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.], dtype=float32), array([ 0., 0., 0., ..., 0., 0., 0.], dtype=float32)]\n", + "\tsequenceReset=0.0\n", + "\tcategory=-1\n", + ")\n", + "\tinferences={'multiStepPredictions': {1: {5.1: 0.0088801263517415546, 5.2: 0.010775254623541418, 5.341999999999999: 0.98034461902471692}}, 'multiStepBucketLikelihoods': {1: {1: 0.0088801263517415546, 2: 0.98034461902471692}}, 'multiStepBestPredictions': {1: 5.341999999999999}, 'anomalyLabel': '[]', 'anomalyScore': 0.40000001}\n", + "\tmetrics=None\n", + "\tpredictedFieldIdx=0\n", + "\tpredictedFieldName=consumption\n", + "\tclassifierInput=ClassifierInput(\tdataRow=5.2\n", + "\tbucketIndex=2\n", + ")\n", + ")\n" + ] + } + ], + "source": [ + "print result" + ] + }, + { + "cell_type": "code", + "execution_count": 47, + "metadata": { + "collapsed": false, + "deletable": true, + "editable": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "anomaly score: 0.4\n" + ] + } + ], + "source": [ + "print \"anomaly score: \", result.inferences[\"anomalyScore\"]" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "deletable": true, + "editable": true + }, + "source": [ + "__See Subutai's talk for more info on anomaly detection!__\n", + "\n", + "# Built-in OPF Clients\n", + "\n", + "`python examples/opf/bin/OpfRunExperiment.py examples/opf/experiments/multistep/hotgym/`\n", + "\n", + "Outputs `examples/opf/experiments/multistep/hotgym/inference/DefaultTask.TemporalMultiStep.predictionLog.csv`\n", + "\n", + "`python bin/run_swarm.py examples/opf/experiments/multistep/hotgym/permutations.py`\n", + "\n", + "Outputs `examples/opf/experiments/multistep/hotgym/model_0/description.py`" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false, + "deletable": true, + "editable": true + }, + "outputs": [], + "source": [ + "" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 2", + "language": "python", + "name": "python2" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 2.0 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython2", + "version": "2.7.10" + } + }, + "nbformat": 4, + "nbformat_minor": 0 +} \ No newline at end of file diff --git a/examples/NuPIC Walkthrough.ipynb b/examples/NuPIC Walkthrough.ipynb index ccb3ecf38b..5942fae3f6 100644 --- a/examples/NuPIC Walkthrough.ipynb +++ b/examples/NuPIC Walkthrough.ipynb @@ -2,7 +2,10 @@ "cells": [ { "cell_type": "markdown", - "metadata": {}, + "metadata": { + "deletable": true, + "editable": true + }, "source": [ "# Encoders\n", "\n", @@ -16,7 +19,9 @@ "cell_type": "code", "execution_count": 1, "metadata": { - "collapsed": false + "collapsed": false, + "deletable": true, + "editable": true }, "outputs": [], "source": [ @@ -27,7 +32,9 @@ "cell_type": "code", "execution_count": 2, "metadata": { - "collapsed": false + "collapsed": false, + "deletable": true, + "editable": true }, "outputs": [], "source": [ @@ -40,7 +47,9 @@ "cell_type": "code", "execution_count": 3, "metadata": { - "collapsed": false + "collapsed": false, + "deletable": true, + "editable": true }, "outputs": [ { @@ -67,7 +76,9 @@ "cell_type": "code", "execution_count": 4, "metadata": { - "collapsed": false + "collapsed": false, + "deletable": true, + "editable": true }, "outputs": [ { @@ -90,7 +101,9 @@ "cell_type": "code", "execution_count": 5, "metadata": { - "collapsed": false + "collapsed": false, + "deletable": true, + "editable": true }, "outputs": [], "source": [ @@ -103,7 +116,9 @@ "cell_type": "code", "execution_count": 6, "metadata": { - "collapsed": false + "collapsed": false, + "deletable": true, + "editable": true }, "outputs": [ { @@ -135,7 +150,9 @@ "cell_type": "code", "execution_count": 7, "metadata": { - "collapsed": false + "collapsed": false, + "deletable": true, + "editable": true }, "outputs": [], "source": [ @@ -149,7 +166,9 @@ "cell_type": "code", "execution_count": 8, "metadata": { - "collapsed": false + "collapsed": false, + "deletable": true, + "editable": true }, "outputs": [ { @@ -177,7 +196,9 @@ "cell_type": "code", "execution_count": 9, "metadata": { - "collapsed": false + "collapsed": false, + "deletable": true, + "editable": true }, "outputs": [ { @@ -210,7 +231,9 @@ "cell_type": "code", "execution_count": 10, "metadata": { - "collapsed": false + "collapsed": false, + "deletable": true, + "editable": true }, "outputs": [ { @@ -229,7 +252,9 @@ "cell_type": "code", "execution_count": 11, "metadata": { - "collapsed": false + "collapsed": false, + "deletable": true, + "editable": true }, "outputs": [ { @@ -248,7 +273,9 @@ "cell_type": "code", "execution_count": 12, "metadata": { - "collapsed": false + "collapsed": false, + "deletable": true, + "editable": true }, "outputs": [ { @@ -267,7 +294,9 @@ "cell_type": "code", "execution_count": 13, "metadata": { - "collapsed": false + "collapsed": false, + "deletable": true, + "editable": true }, "outputs": [ { @@ -285,7 +314,10 @@ }, { "cell_type": "markdown", - "metadata": {}, + "metadata": { + "deletable": true, + "editable": true + }, "source": [ "# Spatial Pooler" ] @@ -294,7 +326,9 @@ "cell_type": "code", "execution_count": 14, "metadata": { - "collapsed": false + "collapsed": false, + "deletable": true, + "editable": true }, "outputs": [], "source": [ @@ -318,7 +352,9 @@ "cell_type": "code", "execution_count": 15, "metadata": { - "collapsed": false + "collapsed": false, + "deletable": true, + "editable": true }, "outputs": [ { @@ -337,7 +373,9 @@ "cell_type": "code", "execution_count": 16, "metadata": { - "collapsed": false + "collapsed": false, + "deletable": true, + "editable": true }, "outputs": [ { @@ -356,7 +394,9 @@ "cell_type": "code", "execution_count": 17, "metadata": { - "collapsed": false + "collapsed": false, + "deletable": true, + "editable": true }, "outputs": [ { @@ -375,7 +415,9 @@ "cell_type": "code", "execution_count": 18, "metadata": { - "collapsed": false + "collapsed": false, + "deletable": true, + "editable": true }, "outputs": [ { @@ -394,7 +436,9 @@ "cell_type": "code", "execution_count": 19, "metadata": { - "collapsed": false + "collapsed": false, + "deletable": true, + "editable": true }, "outputs": [ { @@ -413,7 +457,9 @@ "cell_type": "code", "execution_count": 20, "metadata": { - "collapsed": false + "collapsed": false, + "deletable": true, + "editable": true }, "outputs": [ { @@ -432,7 +478,9 @@ "cell_type": "code", "execution_count": 21, "metadata": { - "collapsed": false + "collapsed": false, + "deletable": true, + "editable": true }, "outputs": [ { @@ -451,7 +499,30 @@ "cell_type": "code", "execution_count": 22, "metadata": { - "collapsed": false + "collapsed": false, + "deletable": true, + "editable": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n" + ] + } + ], + "source": [ + "print SpatialPooler" + ] + }, + { + "cell_type": "code", + "execution_count": 23, + "metadata": { + "collapsed": false, + "deletable": true, + "editable": true }, "outputs": [ { @@ -470,19 +541,21 @@ }, { "cell_type": "code", - "execution_count": 23, + "execution_count": 24, "metadata": { - "collapsed": false + "collapsed": false, + "deletable": true, + "editable": true }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "[0 1 1 0 0 1 0 0 0 1 1 1 1 0 0]\n", - "[1 1 1 1 1 0 1 0 1 1 1 0 0 1 0]\n", - "[0 1 0 1 1 0 1 0 1 1 0 0 1 0 1]\n", - "[0 1 1 1 0 1 0 0 0 1 1 1 1 1 0]\n" + "[0 0 1 1 1 1 0 0 0 0 1 1 1 1 0]\n", + "[1 0 0 0 1 1 1 1 0 1 0 0 0 1 1]\n", + "[1 1 0 0 0 0 0 1 1 1 1 1 1 0 0]\n", + "[1 1 0 1 1 0 0 1 1 0 1 0 0 1 1]\n" ] } ], @@ -503,16 +576,18 @@ }, { "cell_type": "code", - "execution_count": 24, + "execution_count": 25, "metadata": { - "collapsed": false + "collapsed": false, + "deletable": true, + "editable": true }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "[0 0 0 1]\n" + "[1 0 0 0]\n" ] } ], @@ -524,9 +599,11 @@ }, { "cell_type": "code", - "execution_count": 25, + "execution_count": 26, "metadata": { - "collapsed": false + "collapsed": false, + "deletable": true, + "editable": true }, "outputs": [], "source": [ @@ -536,19 +613,21 @@ }, { "cell_type": "code", - "execution_count": 26, + "execution_count": 27, "metadata": { - "collapsed": false + "collapsed": false, + "deletable": true, + "editable": true }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "[0 1 1 0 0 1 0 0 0 1 1 1 1 0 0]\n", - "[1 1 1 1 1 0 1 0 1 1 1 0 0 1 0]\n", - "[0 1 0 1 1 0 1 0 1 1 0 0 1 0 1]\n", - "[0 0 1 1 1 1 0 0 0 1 1 1 1 0 0]\n" + "[0 0 1 1 1 1 0 0 0 0 1 1 1 1 0]\n", + "[1 0 0 0 1 1 1 1 0 1 0 0 0 1 1]\n", + "[1 1 0 0 0 0 0 1 1 1 1 1 1 0 0]\n", + "[1 1 0 1 1 0 0 1 1 0 1 0 0 1 1]\n" ] } ], @@ -561,9 +640,11 @@ }, { "cell_type": "code", - "execution_count": 27, + "execution_count": 28, "metadata": { - "collapsed": false + "collapsed": false, + "deletable": true, + "editable": true }, "outputs": [], "source": [ @@ -576,19 +657,21 @@ }, { "cell_type": "code", - "execution_count": 28, + "execution_count": 29, "metadata": { - "collapsed": false + "collapsed": false, + "deletable": true, + "editable": true }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "[0 1 1 0 0 1 0 0 0 1 1 1 1 0 0]\n", - "[1 1 1 1 1 0 1 0 1 1 1 0 1 1 0]\n", - "[0 0 0 0 0 0 1 1 1 0 0 0 1 1 1]\n", - "[0 0 0 1 1 1 0 0 0 1 1 1 0 0 0]\n" + "[0 0 0 1 1 1 0 0 0 0 0 0 0 0 0]\n", + "[1 0 0 0 1 1 1 1 0 1 0 0 0 1 1]\n", + "[0 0 0 0 0 0 0 0 0 1 1 1 0 0 0]\n", + "[0 0 0 0 0 0 1 1 1 0 0 0 1 1 1]\n" ] } ], @@ -601,9 +684,11 @@ }, { "cell_type": "code", - "execution_count": 29, + "execution_count": 30, "metadata": { - "collapsed": false + "collapsed": false, + "deletable": true, + "editable": true }, "outputs": [ { @@ -625,9 +710,11 @@ }, { "cell_type": "code", - "execution_count": 30, + "execution_count": 31, "metadata": { - "collapsed": false + "collapsed": false, + "deletable": true, + "editable": true }, "outputs": [ { @@ -645,57 +732,66 @@ }, { "cell_type": "markdown", - "metadata": {}, + "metadata": { + "deletable": true, + "editable": true + }, "source": [ "# Temporal Memory (a.k.a. Sequence Memory, Temporal Pooler)\n", "\n", - "From: `examples/tp/hello_tm.py`" + "From: `examples/tm/hello_tm.py`" ] }, { "cell_type": "code", - "execution_count": 31, + "execution_count": 32, "metadata": { - "collapsed": false + "collapsed": false, + "deletable": true, + "editable": true }, "outputs": [], "source": [ - "from nupic.research.TP import TP\n", + "from nupic.research.BacktrackingTM import BacktrackingTM\n", "\n", - "TP?" + "BacktrackingTM?" ] }, { "cell_type": "code", - "execution_count": 32, + "execution_count": 33, "metadata": { - "collapsed": false + "collapsed": false, + "deletable": true, + "editable": true }, "outputs": [], "source": [ "# Step 1: create Temporal Pooler instance with appropriate parameters\n", - "tp = TP(numberOfCols=50, cellsPerColumn=2,\n", - " initialPerm=0.5, connectedPerm=0.5,\n", - " minThreshold=10, newSynapseCount=10,\n", - " permanenceInc=0.1, permanenceDec=0.0,\n", - " activationThreshold=8,\n", - " globalDecay=0, burnIn=1,\n", - " checkSynapseConsistency=False,\n", - " pamLength=10)" + "tm = BacktrackingTM(numberOfCols=50, cellsPerColumn=2,\n", + " initialPerm=0.5, connectedPerm=0.5,\n", + " minThreshold=10, newSynapseCount=10,\n", + " permanenceInc=0.1, permanenceDec=0.0,\n", + " activationThreshold=8,\n", + " globalDecay=0, burnIn=1,\n", + " checkSynapseConsistency=False,\n", + " pamLength=10)" ] }, { "cell_type": "code", - "execution_count": 33, + "execution_count": 34, "metadata": { - "collapsed": false + "collapsed": false, + "deletable": true, + "editable": true }, "outputs": [], "source": [ - "# Step 2: create input vectors to feed to the temporal pooler. Each input vector\n", + "# Step 2: create input vectors to feed to the temporal memory. Each input vector\n", "# must be numberOfCols wide. Here we create a simple sequence of 5 vectors\n", "# representing the sequence A -> B -> C -> D -> E\n", - "x = numpy.zeros((5, tp.numberOfCols), dtype=\"uint32\")\n", + "x = numpy.zeros((5, tm.numberOfCols), dtype=\"uint32\")\n", "x[0,0:10] = 1 # Input SDR representing \"A\", corresponding to columns 0-9\n", "x[1,10:20] = 1 # Input SDR representing \"B\", corresponding to columns 10-19\n", "x[2,20:30] = 1 # Input SDR representing \"C\", corresponding to columns 20-29\n", @@ -705,13 +801,15 @@ }, { "cell_type": "code", - "execution_count": 34, + "execution_count": 35, "metadata": { - "collapsed": false + "collapsed": false, + "deletable": true, + "editable": true }, "outputs": [], "source": [ - "# Step 3: send this simple sequence to the temporal pooler for learning\n", + "# Step 3: send this simple sequence to the temporal memory for learning\n", "# We repeat the sequence 10 times\n", "for i in range(10):\n", "\n", @@ -721,25 +819,27 @@ " # The compute method performs one step of learning and/or inference. Note:\n", " # here we just perform learning but you can perform prediction/inference and\n", " # learning in the same step if you want (online learning).\n", - " tp.compute(x[j], enableLearn = True, computeInfOutput = False)\n", + " tm.compute(x[j], enableLearn = True, computeInfOutput = False)\n", "\n", " # This function prints the segments associated with every cell.$$$$\n", " # If you really want to understand the TP, uncomment this line. By following\n", " # every step you can get an excellent understanding for exactly how the TP\n", " # learns.\n", - " #tp.printCells()\n", + " #tm.printCells()\n", "\n", - " # The reset command tells the TP that a sequence just ended and essentially\n", + " # The reset command tells the TM that a sequence just ended and essentially\n", " # zeros out all the states. It is not strictly necessary but it's a bit\n", - " # messier without resets, and the TP learns quicker with resets.\n", - " tp.reset()" + " # messier without resets, and the TM learns quicker with resets.\n", + " tm.reset()" ] }, { "cell_type": "code", - "execution_count": 35, + "execution_count": 36, "metadata": { - "collapsed": false + "collapsed": false, + "deletable": true, + "editable": true }, "outputs": [ { @@ -762,7 +862,7 @@ "0000000000 1111111111 0000000000 0000000000 0000000000 \n", "\n", "\n", - "The following columns are predicted by the temporal pooler. This\n", + "The following columns are predicted by the temporal memory. This\n", "should correspond to columns in the *next* item in the sequence.\n", "[10 11 12 13 14 15 16 17 18 19] \n", "\n", @@ -781,7 +881,7 @@ "0000000000 0000000000 1111111111 0000000000 0000000000 \n", "\n", "\n", - "The following columns are predicted by the temporal pooler. This\n", + "The following columns are predicted by the temporal memory. This\n", "should correspond to columns in the *next* item in the sequence.\n", "[20 21 22 23 24 25 26 27 28 29] \n", "\n", @@ -800,7 +900,7 @@ "0000000000 0000000000 0000000000 1111111111 0000000000 \n", "\n", "\n", - "The following columns are predicted by the temporal pooler. This\n", + "The following columns are predicted by the temporal memory. This\n", "should correspond to columns in the *next* item in the sequence.\n", "[30 31 32 33 34 35 36 37 38 39] \n", "\n", @@ -819,7 +919,7 @@ "0000000000 0000000000 0000000000 0000000000 1111111111 \n", "\n", "\n", - "The following columns are predicted by the temporal pooler. This\n", + "The following columns are predicted by the temporal memory. This\n", "should correspond to columns in the *next* item in the sequence.\n", "[40 41 42 43 44 45 46 47 48 49] \n", "\n", @@ -838,7 +938,7 @@ "0000000000 0000000000 0000000000 0000000000 0000000000 \n", "\n", "\n", - "The following columns are predicted by the temporal pooler. This\n", + "The following columns are predicted by the temporal memory. This\n", "should correspond to columns in the *next* item in the sequence.\n", "[] \n" ] @@ -846,7 +946,7 @@ ], "source": [ "# Step 4: send the same sequence of vectors and look at predictions made by\n", - "# temporal pooler\n", + "# temporal memory\n", "\n", "# Utility routine for printing the input vector\n", "def formatRow(x):\n", @@ -863,7 +963,7 @@ " print \"Raw input vector\\n\",formatRow(x[j])\n", "\n", " # Send each vector to the TP, with learning turned off\n", - " tp.compute(x[j], enableLearn=False, computeInfOutput=True)\n", + " tm.compute(x[j], enableLearn=False, computeInfOutput=True)\n", "\n", " # This method prints out the active state of each cell followed by the\n", " # predicted state of each cell. For convenience the cells are grouped\n", @@ -874,22 +974,25 @@ " # represent the SDR for the current input pattern and the columns where\n", " # predicted state is 1 represent the SDR for the next expected pattern\n", " print \"\\nAll the active and predicted cells:\"\n", - " tp.printStates(printPrevious=False, printLearnState=False)\n", + " tm.printStates(printPrevious=False, printLearnState=False)\n", "\n", - " # tp.getPredictedState() gets the predicted cells.\n", + " # tm.getPredictedState() gets the predicted cells.\n", " # predictedCells[c][i] represents the state of the i'th cell in the c'th\n", " # column. To see if a column is predicted, we can simply take the OR\n", " # across all the cells in that column. In numpy we can do this by taking\n", " # the max along axis 1.\n", - " print \"\\n\\nThe following columns are predicted by the temporal pooler. This\"\n", + " print \"\\n\\nThe following columns are predicted by the temporal memory. This\"\n", " print \"should correspond to columns in the *next* item in the sequence.\"\n", - " predictedCells = tp.getPredictedState()\n", + " predictedCells = tm.getPredictedState()\n", " print formatRow(predictedCells.max(axis=1).nonzero())" ] }, { "cell_type": "markdown", - "metadata": {}, + "metadata": { + "deletable": true, + "editable": true + }, "source": [ "# Networks and Regions\n", "\n", @@ -898,7 +1001,10 @@ }, { "cell_type": "markdown", - "metadata": {}, + "metadata": { + "deletable": true, + "editable": true + }, "source": [ "# Online Prediction Framework\n", "\n", @@ -913,7 +1019,10 @@ }, { "cell_type": "markdown", - "metadata": {}, + "metadata": { + "deletable": true, + "editable": true + }, "source": [ "# Model Parameters\n", "\n", @@ -922,16 +1031,18 @@ }, { "cell_type": "code", - "execution_count": 36, + "execution_count": 37, "metadata": { - "collapsed": false + "collapsed": false, + "deletable": true, + "editable": true }, "outputs": [], "source": [ "# Model Params!\n", "MODEL_PARAMS = {\n", " # Type of model that the rest of these parameters apply to.\n", - " 'model': \"CLA\",\n", + " 'model': \"HTMPrediction\",\n", "\n", " # Version that specifies the format of the config.\n", " 'version': 1,\n", @@ -1014,7 +1125,7 @@ " 'globalInhibition': 1,\n", "\n", " # Number of cell columns in the cortical region (same number for\n", - " # SP and TP)\n", + " # SP and TM)\n", " # (see also tpNCellsPerCol)\n", " 'columnCount': 2048,\n", "\n", @@ -1040,7 +1151,7 @@ " # level before inhibition falls below minDutyCycleBeforeInh\n", " # will have their own internal synPermConnectedCell\n", " # threshold set below this default value.\n", - " # (This concept applies to both SP and TP and so 'cells'\n", + " # (This concept applies to both SP and TM and so 'cells'\n", " # is correct here as opposed to 'columns')\n", " 'synPermConnected': 0.1,\n", "\n", @@ -1049,20 +1160,20 @@ " 'synPermInactiveDec': 0.005,\n", " },\n", "\n", - " # Controls whether TP is enabled or disabled;\n", - " # TP is necessary for making temporal predictions, such as predicting\n", + " # Controls whether TM is enabled or disabled;\n", + " # TM is necessary for making temporal predictions, such as predicting\n", " # the next inputs. Without TP, the model is only capable of\n", " # reconstructing missing sensor inputs (via SP).\n", " 'tmEnable' : True,\n", "\n", " 'tmParams': {\n", - " # TP diagnostic output verbosity control;\n", + " # TM diagnostic output verbosity control;\n", " # 0: silent; [1..6]: increasing levels of verbosity\n", - " # (see verbosity in nupic/trunk/py/nupic/research/TP.py and TP10X*.py)\n", + " # (see verbosity in nupic/trunk/py/nupic/research/TP.py and BacktrackingTMCPP.py)\n", " 'verbosity': 0,\n", "\n", " # Number of cell columns in the cortical region (same number for\n", - " # SP and TP)\n", + " # SP and TM)\n", " # (see also tpNCellsPerCol)\n", " 'columnCount': 2048,\n", "\n", @@ -1132,7 +1243,7 @@ "\n", " 'outputType': 'normal',\n", "\n", - " # \"Pay Attention Mode\" length. This tells the TP how many new\n", + " # \"Pay Attention Mode\" length. This tells the TM how many new\n", " # elements to append to the end of a learned sequence at a time.\n", " # Smaller values are better for datasets with short sequences,\n", " # higher values are better for datasets with long sequences.\n", @@ -1164,23 +1275,28 @@ }, { "cell_type": "markdown", - "metadata": {}, + "metadata": { + "deletable": true, + "editable": true + }, "source": [ "# Dataset Helpers" ] }, { "cell_type": "code", - "execution_count": 37, + "execution_count": 38, "metadata": { - "collapsed": false + "collapsed": false, + "deletable": true, + "editable": true }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "/Users/nromano/workspace/nupic/src/nupic/datafiles/extra/hotgym/hotgym.csv\n", + "/Users/mleborgne/_git/nupic/src/nupic/datafiles/extra/hotgym/hotgym.csv\n", "\n", "gym,address,timestamp,consumption\n", "string,string,datetime,float\n", @@ -1207,7 +1323,10 @@ }, { "cell_type": "markdown", - "metadata": {}, + "metadata": { + "deletable": true, + "editable": true + }, "source": [ "# Loading Data\n", "\n", @@ -1216,9 +1335,11 @@ }, { "cell_type": "code", - "execution_count": 38, + "execution_count": 39, "metadata": { - "collapsed": false + "collapsed": false, + "deletable": true, + "editable": true }, "outputs": [ { @@ -1246,9 +1367,11 @@ }, { "cell_type": "code", - "execution_count": 39, + "execution_count": 40, "metadata": { - "collapsed": false + "collapsed": false, + "deletable": true, + "editable": true }, "outputs": [], "source": [ @@ -1259,9 +1382,11 @@ }, { "cell_type": "code", - "execution_count": 40, + "execution_count": 41, "metadata": { - "collapsed": false + "collapsed": false, + "deletable": true, + "editable": true }, "outputs": [ { @@ -1283,9 +1408,9 @@ "input: 4.5\n", "prediction: 5.35958\n", "input: 1.2\n", - "prediction: 5.35958\n", + "prediction: 4.92\n", "input: 1.1\n", - "prediction: 5.35958\n", + "prediction: 1.2\n", "input: 1.2\n", "prediction: 1.17\n", "input: 1.2\n", @@ -1313,25 +1438,25 @@ "input: 6.0\n", "prediction: 1.16785229138\n", "input: 7.9\n", - "prediction: 1.16785229138\n", + "prediction: 5.551706\n", "input: 8.4\n", - "prediction: 1.16785229138\n", + "prediction: 6.2561942\n", "input: 10.6\n", - "prediction: 1.16785229138\n", + "prediction: 6.89933594\n", "input: 12.4\n", - "prediction: 1.16785229138\n", + "prediction: 10.6\n", "input: 12.1\n", - "prediction: 1.16785229138\n", + "prediction: 12.4\n", "input: 12.4\n", - "prediction: 1.16785229138\n", + "prediction: 12.31\n", "input: 11.4\n", - "prediction: 1.16785229138\n", + "prediction: 12.337\n", "input: 11.2\n", - "prediction: 1.16785229138\n", + "prediction: 10.84\n", "input: 10.8\n", - "prediction: 1.16785229138\n", + "prediction: 10.948\n", "input: 12.0\n", - "prediction: 1.16785229138\n", + "prediction: 10.9036\n", "input: 11.8\n", "prediction: 11.23252\n", "input: 11.9\n", @@ -1339,9 +1464,9 @@ "input: 11.4\n", "prediction: 11.5519348\n", "input: 11.0\n", - "prediction: 1.16785229138\n", + "prediction: 11.50635436\n", "input: 9.8\n", - "prediction: 1.16785229138\n", + "prediction: 11.354448052\n", "input: 9.8\n", "prediction: 10.8881136364\n", "input: 10.8\n", @@ -1371,69 +1496,69 @@ "input: 9.2\n", "prediction: 10.0822903594\n", "input: 9.2\n", - "prediction: 1.16785229138\n", + "prediction: 9.81760325161\n", "input: 9.2\n", - "prediction: 1.16785229138\n", + "prediction: 9.63232227613\n", "input: 9.3\n", - "prediction: 1.16785229138\n", + "prediction: 9.50262559329\n", "input: 9.1\n", - "prediction: 1.16785229138\n", + "prediction: 9.4418379153\n", "input: 9.0\n", - "prediction: 1.16785229138\n", + "prediction: 9.33928654071\n", "input: 8.9\n", - "prediction: 1.16785229138\n", + "prediction: 9.2375005785\n", "input: 9.0\n", - "prediction: 1.16785229138\n", + "prediction: 9.13625040495\n", "input: 8.9\n", - "prediction: 1.16785229138\n", + "prediction: 9.09537528346\n", "input: 8.9\n", - "prediction: 1.16785229138\n", + "prediction: 9.03676269843\n", "input: 9.0\n", - "prediction: 1.16785229138\n", + "prediction: 8.9957338889\n", "input: 9.2\n", - "prediction: 1.16785229138\n", + "prediction: 8.99701372223\n", "input: 10.0\n", - "prediction: 1.16785229138\n", + "prediction: 9.05790960556\n", "input: 10.7\n", - "prediction: 1.16785229138\n", + "prediction: 9.34053672389\n", "input: 8.9\n", - "prediction: 1.16785229138\n", + "prediction: 9.74837570672\n", "input: 9.0\n", - "prediction: 1.16785229138\n", + "prediction: 9.49386299471\n", "input: 9.0\n", - "prediction: 1.16785229138\n", + "prediction: 9.34570409629\n", "input: 9.3\n", - "prediction: 1.16785229138\n", + "prediction: 9.24199286741\n", "input: 9.3\n", - "prediction: 1.16785229138\n", + "prediction: 9.25939500718\n", "input: 9.1\n", - "prediction: 1.16785229138\n", + "prediction: 9.27157650503\n", "input: 9.1\n", - "prediction: 1.16785229138\n", + "prediction: 9.22010355352\n", "input: 9.1\n", - "prediction: 1.16785229138\n", + "prediction: 9.18407248746\n", "input: 9.2\n", - "prediction: 1.16785229138\n", + "prediction: 9.15885074122\n", "input: 9.4\n", - "prediction: 1.16785229138\n", + "prediction: 9.17119551886\n", "input: 9.3\n", - "prediction: 1.16785229138\n", + "prediction: 9.2398368632\n", "input: 9.3\n", - "prediction: 1.16785229138\n", + "prediction: 9.25788580424\n", "input: 9.1\n", - "prediction: 1.16785229138\n", + "prediction: 9.27052006297\n", "input: 9.1\n", - "prediction: 1.16785229138\n", + "prediction: 9.21936404408\n", "input: 11.0\n", - "prediction: 1.16785229138\n", + "prediction: 9.18355483085\n", "input: 9.0\n", - "prediction: 1.16785229138\n", + "prediction: 9.7284883816\n", "input: 8.6\n", - "prediction: 1.16785229138\n", + "prediction: 9.50994186712\n", "input: 3.0\n", - "prediction: 1.16785229138\n", + "prediction: 9.50994186712\n", "input: 1.3\n", - "prediction: 1.16785229138\n", + "prediction: 4.344\n", "input: 1.2\n", "prediction: 1.20749660397\n", "input: 1.3\n", @@ -1461,13 +1586,13 @@ "input: 2.3\n", "prediction: 1.19932370691\n", "input: 5.5\n", - "prediction: 1.19932370691\n", + "prediction: 3.7308\n", "input: 5.5\n", - "prediction: 1.19932370691\n", + "prediction: 6.8366746106\n", "input: 5.8\n", - "prediction: 1.19932370691\n", + "prediction: 6.43567222742\n", "input: 5.7\n", - "prediction: 9.50994186712\n" + "prediction: 6.24497055919\n" ] } ], @@ -1482,16 +1607,18 @@ }, { "cell_type": "code", - "execution_count": 41, + "execution_count": 42, "metadata": { - "collapsed": false + "collapsed": false, + "deletable": true, + "editable": true }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "5-step prediction: 9.50994186712\n" + "5-step prediction: 1.19932370691\n" ] } ], @@ -1501,23 +1628,28 @@ }, { "cell_type": "markdown", - "metadata": {}, + "metadata": { + "deletable": true, + "editable": true + }, "source": [ "# Anomaly Score" ] }, { "cell_type": "code", - "execution_count": 42, + "execution_count": 43, "metadata": { - "collapsed": false + "collapsed": false, + "deletable": true, + "editable": true }, "outputs": [], "source": [ "# Model Params!\n", "MODEL_PARAMS = {\n", " # Type of model that the rest of these parameters apply to.\n", - " 'model': \"CLA\",\n", + " 'model': \"HTMPrediction\",\n", "\n", " # Version that specifies the format of the config.\n", " 'version': 1,\n", @@ -1597,7 +1729,7 @@ " 'globalInhibition': 1,\n", "\n", " # Number of cell columns in the cortical region (same number for\n", - " # SP and TP)\n", + " # SP and TM)\n", " # (see also tpNCellsPerCol)\n", " 'columnCount': 2048,\n", "\n", @@ -1623,7 +1755,7 @@ " # level before inhibition falls below minDutyCycleBeforeInh\n", " # will have their own internal synPermConnectedCell\n", " # threshold set below this default value.\n", - " # (This concept applies to both SP and TP and so 'cells'\n", + " # (This concept applies to both SP and TM and so 'cells'\n", " # is correct here as opposed to 'columns')\n", " 'synPermConnected': 0.1,\n", "\n", @@ -1632,20 +1764,20 @@ " 'synPermInactiveDec': 0.005,\n", " },\n", "\n", - " # Controls whether TP is enabled or disabled;\n", - " # TP is necessary for making temporal predictions, such as predicting\n", + " # Controls whether TM is enabled or disabled;\n", + " # TM is necessary for making temporal predictions, such as predicting\n", " # the next inputs. Without TP, the model is only capable of\n", " # reconstructing missing sensor inputs (via SP).\n", " 'tmEnable' : True,\n", "\n", " 'tmParams': {\n", - " # TP diagnostic output verbosity control;\n", + " # TM diagnostic output verbosity control;\n", " # 0: silent; [1..6]: increasing levels of verbosity\n", - " # (see verbosity in nupic/trunk/py/nupic/research/TP.py and TP10X*.py)\n", + " # (see verbosity in nupic/trunk/py/nupic/research/TP.py and BacktrackingTMCPP.py)\n", " 'verbosity': 0,\n", "\n", " # Number of cell columns in the cortical region (same number for\n", - " # SP and TP)\n", + " # SP and TM)\n", " # (see also tpNCellsPerCol)\n", " 'columnCount': 2048,\n", "\n", @@ -1715,7 +1847,7 @@ "\n", " 'outputType': 'normal',\n", "\n", - " # \"Pay Attention Mode\" length. This tells the TP how many new\n", + " # \"Pay Attention Mode\" length. This tells the TM how many new\n", " # elements to append to the end of a learned sequence at a time.\n", " # Smaller values are better for datasets with short sequences,\n", " # higher values are better for datasets with long sequences.\n", @@ -1753,9 +1885,11 @@ }, { "cell_type": "code", - "execution_count": 43, + "execution_count": 44, "metadata": { - "collapsed": false + "collapsed": false, + "deletable": true, + "editable": true }, "outputs": [], "source": [ @@ -1766,9 +1900,11 @@ }, { "cell_type": "code", - "execution_count": 44, + "execution_count": 45, "metadata": { - "collapsed": false + "collapsed": false, + "deletable": true, + "editable": true }, "outputs": [ { @@ -1799,9 +1935,11 @@ }, { "cell_type": "code", - "execution_count": 45, + "execution_count": 46, "metadata": { - "collapsed": false + "collapsed": false, + "deletable": true, + "editable": true }, "outputs": [ { @@ -1819,7 +1957,7 @@ "\tsequenceReset=0.0\n", "\tcategory=-1\n", ")\n", - "\tinferences={'multiStepPredictions': {1: {5.1: 0.29314434936959172, 5.341999999999999: 0.70685565063040834}}, 'multiStepBucketLikelihoods': {1: {1: 0.29314434936959172, 2: 0.70685565063040834}}, 'multiStepBestPredictions': {1: 5.341999999999999}, 'anomalyLabel': '[]', 'anomalyScore': 0.29999999999999999}\n", + "\tinferences={'multiStepPredictions': {1: {5.1: 0.0088801263517415546, 5.2: 0.010775254623541418, 5.341999999999999: 0.98034461902471692}}, 'multiStepBucketLikelihoods': {1: {1: 0.0088801263517415546, 2: 0.98034461902471692}}, 'multiStepBestPredictions': {1: 5.341999999999999}, 'anomalyLabel': '[]', 'anomalyScore': 0.40000001}\n", "\tmetrics=None\n", "\tpredictedFieldIdx=0\n", "\tpredictedFieldName=consumption\n", @@ -1836,16 +1974,18 @@ }, { "cell_type": "code", - "execution_count": 46, + "execution_count": 47, "metadata": { - "collapsed": false + "collapsed": false, + "deletable": true, + "editable": true }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "anomaly score: 0.3\n" + "anomaly score: 0.4\n" ] } ], @@ -1855,7 +1995,10 @@ }, { "cell_type": "markdown", - "metadata": {}, + "metadata": { + "deletable": true, + "editable": true + }, "source": [ "__See Subutai's talk for more info on anomaly detection!__\n", "\n", @@ -1874,7 +2017,9 @@ "cell_type": "code", "execution_count": null, "metadata": { - "collapsed": false + "collapsed": false, + "deletable": true, + "editable": true }, "outputs": [], "source": [ @@ -1903,4 +2048,4 @@ }, "nbformat": 4, "nbformat_minor": 0 -} +} \ No newline at end of file diff --git a/examples/network/core_encoders_demo.py b/examples/network/core_encoders_demo.py index 3a958c8d1b..c386758641 100644 --- a/examples/network/core_encoders_demo.py +++ b/examples/network/core_encoders_demo.py @@ -81,7 +81,7 @@ def createNetwork(): # # Add a TPRegion, a region containing a Temporal Memory # - network.addRegion("tm", "py.TPRegion", + network.addRegion("tm", "py.TMRegion", json.dumps({ "columnCount": 2048, "cellsPerColumn": 32, diff --git a/examples/network/hierarchy_network_demo.py b/examples/network/hierarchy_network_demo.py index 5242f5021f..5e881794a1 100755 --- a/examples/network/hierarchy_network_demo.py +++ b/examples/network/hierarchy_network_demo.py @@ -68,7 +68,7 @@ "boostStrength": 0.0} # Parameter dict for TPRegion -TP_PARAMS = {"verbosity": _VERBOSITY, +TM_PARAMS = {"verbosity": _VERBOSITY, "temporalImp": "cpp", "seed": _SEED, @@ -168,8 +168,8 @@ def createSpatialPooler(network, name, inputWidth): def createTemporalMemory(network, name): - temporalMemoryRegion = network.addRegion(name, "py.TPRegion", - json.dumps(TP_PARAMS)) + temporalMemoryRegion = network.addRegion(name, "py.TMRegion", + json.dumps(TM_PARAMS)) # Enable topDownMode to get the predicted columns output temporalMemoryRegion.setParameter("topDownMode", True) # Make sure learning is enabled (this is the default) @@ -186,7 +186,7 @@ def createTemporalMemory(network, name): def createNetwork(dataSource): """Creates and returns a new Network with a sensor region reading data from - 'dataSource'. There are two hierarchical levels, each with one SP and one TP. + 'dataSource'. There are two hierarchical levels, each with one SP and one TM. @param dataSource - A RecordStream containing the input data @returns a Network ready to run """ @@ -203,10 +203,10 @@ def createNetwork(dataSource): linkParams = "" network.link(_RECORD_SENSOR, _L1_SPATIAL_POOLER, linkType, linkParams) - # Create and add a TP region + # Create and add a TM region l1temporalMemory = createTemporalMemory(network, _L1_TEMPORAL_MEMORY) - # Link SP region to TP region in the feedforward direction + # Link SP region to TM region in the feedforward direction network.link(_L1_SPATIAL_POOLER, _L1_TEMPORAL_MEMORY, linkType, linkParams) # Add a classifier @@ -254,7 +254,7 @@ def createNetwork(dataSource): def runClassifier(classifier, sensorRegion, tpRegion, recordNumber): """Calls classifier manually, not using network""" - # Obtain input, its encoding, and the tp output for classification + # Obtain input, its encoding, and the tm output for classification actualInput = float(sensorRegion.getOutputData("sourceOut")[0]) scalarEncoder = sensorRegion.getSelf().encoder.encoders[0][1] bucketIndex = scalarEncoder.getBucketIndices(actualInput)[0] diff --git a/examples/network/network_api_demo.py b/examples/network/network_api_demo.py index 25a2dda1f0..3972e9ac6e 100755 --- a/examples/network/network_api_demo.py +++ b/examples/network/network_api_demo.py @@ -30,7 +30,7 @@ from nupic.engine import Network from nupic.encoders import MultiEncoder, ScalarEncoder, DateEncoder from nupic.regions.SPRegion import SPRegion -from nupic.regions.TPRegion import TPRegion +from nupic.regions.TMRegion import TMRegion _VERBOSITY = 0 # how chatty the demo should be _SEED = 1956 # the random seed used throughout @@ -57,8 +57,8 @@ "boostStrength": 0.0, } -# Config field for TPRegion -TP_PARAMS = { +# Config field for TMRegion +TM_PARAMS = { "verbosity": _VERBOSITY, "columnCount": 2048, "cellsPerColumn": 32, @@ -100,7 +100,7 @@ def createNetwork(dataSource): The network has a sensor region reading data from `dataSource` and passing the encoded representation to an SPRegion. The SPRegion output is passed to - a TPRegion. + a TMRegion. :param dataSource: a RecordStream instance to get data from :returns: a Network instance ready to run @@ -131,15 +131,15 @@ def createNetwork(dataSource): network.link("spatialPoolerRegion", "sensor", "UniformLink", "", srcOutput="temporalTopDownOut", destInput="temporalTopDownIn") - # Add the TPRegion on top of the SPRegion - network.addRegion("temporalPoolerRegion", "py.TPRegion", - json.dumps(TP_PARAMS)) + # Add the TMRegion on top of the SPRegion + network.addRegion("temporalPoolerRegion", "py.TMRegion", + json.dumps(TM_PARAMS)) network.link("spatialPoolerRegion", "temporalPoolerRegion", "UniformLink", "") network.link("temporalPoolerRegion", "spatialPoolerRegion", "UniformLink", "", srcOutput="topDownOut", destInput="topDownIn") - # Add the AnomalyLikelihoodRegion on top of the TPRegion + # Add the AnomalyLikelihoodRegion on top of the TMRegion network.addRegion("anomalyLikelihoodRegion", "py.AnomalyLikelihoodRegion", json.dumps({})) @@ -210,7 +210,7 @@ def runNetwork(network, writer): print "# spatial pooler columns: {0}".format(sp.getNumColumns()) print - tmRegion = network.getRegionsByType(TPRegion)[0] + tmRegion = network.getRegionsByType(TMRegion)[0] tm = tmRegion.getSelf().getAlgorithmInstance() print "temporal memory region inputs: {0}".format(tmRegion.getInputNames()) print "temporal memory region outputs: {0}".format(tmRegion.getOutputNames()) diff --git a/examples/network/temporal_anomaly_network_demo.py b/examples/network/temporal_anomaly_network_demo.py index 5d49adb6db..d982328870 100644 --- a/examples/network/temporal_anomaly_network_demo.py +++ b/examples/network/temporal_anomaly_network_demo.py @@ -47,7 +47,7 @@ } # Default config fields for TPRegion -_TP_PARAMS = { +_TM_PARAMS = { "verbosity": _VERBOSITY, "columnCount": 2048, "cellsPerColumn": 32, @@ -77,16 +77,16 @@ def createTemporalAnomaly(recordParams, spatialParams=_SP_PARAMS, - temporalParams=_TP_PARAMS, + temporalParams=_TM_PARAMS, verbosity=_VERBOSITY): - """Generates a Network with connected RecordSensor, SP, TP. + """Generates a Network with connected RecordSensor, SP, TM. This function takes care of generating regions and the canonical links. The network has a sensor region reading data from a specified input and passing the encoded representation to an SPRegion. - The SPRegion output is passed to a TPRegion. + The SPRegion output is passed to a TMRegion. Note: this function returns a network that needs to be initialized. This allows the user to extend the network by adding further regions and @@ -94,7 +94,7 @@ def createTemporalAnomaly(recordParams, spatialParams=_SP_PARAMS, :param recordParams: a dict with parameters for creating RecordSensor region. :param spatialParams: a dict with parameters for creating SPRegion. - :param temporalParams: a dict with parameters for creating TPRegion. + :param temporalParams: a dict with parameters for creating TMRegion. :param verbosity: an integer representing how chatty the network will be. """ inputFilePath = recordParams["inputFilePath"] @@ -132,7 +132,7 @@ def createTemporalAnomaly(recordParams, spatialParams=_SP_PARAMS, srcOutput="temporalTopDownOut", destInput="temporalTopDownIn") # Add the TPRegion on top of the SPRegion - network.addRegion("temporalPoolerRegion", "py.TPRegion", + network.addRegion("temporalPoolerRegion", "py.TMRegion", json.dumps(temporalParams)) network.link("spatialPoolerRegion", "temporalPoolerRegion", "UniformLink", "") diff --git a/examples/opf/clients/cpu/model_params.py b/examples/opf/clients/cpu/model_params.py index ea8c39f031..3741b083c9 100644 --- a/examples/opf/clients/cpu/model_params.py +++ b/examples/opf/clients/cpu/model_params.py @@ -75,7 +75,7 @@ 'globalInhibition': 1, # Number of cell columns in the cortical region (same number for - # SP and TP) + # SP and TM) # (see also tpNCellsPerCol) 'columnCount': 2048, @@ -101,7 +101,7 @@ # level before inhibition falls below minDutyCycleBeforeInh # will have their own internal synPermConnectedCell # threshold set below this default value. - # (This concept applies to both SP and TP and so 'cells' + # (This concept applies to both SP and TM and so 'cells' # is correct here as opposed to 'columns') 'synPermConnected': 0.1, @@ -110,20 +110,20 @@ 'synPermInactiveDec': 0.01, }, - # Controls whether TP is enabled or disabled; - # TP is necessary for making temporal predictions, such as predicting - # the next inputs. Without TP, the model is only capable of + # Controls whether TM is enabled or disabled; + # TM is necessary for making temporal predictions, such as predicting + # the next inputs. Without TM, the model is only capable of # reconstructing missing sensor inputs (via SP). 'tmEnable' : True, 'tmParams': { - # TP diagnostic output verbosity control; + # TM diagnostic output verbosity control; # 0: silent; [1..6]: increasing levels of verbosity - # (see verbosity in nupic/trunk/py/nupic/research/TP.py and TP10X*.py) + # (see verbosity in nupic/trunk/py/nupic/research/BacktrackingTM.py and BacktrackingTMCPP.py) 'verbosity': 0, # Number of cell columns in the cortical region (same number for - # SP and TP) + # SP and TM) # (see also tpNCellsPerCol) 'columnCount': 2048, @@ -148,7 +148,7 @@ # > 0 for fixed-size CLA # -1 for non-fixed-size CLA # - # TODO: for Ron: once the appropriate value is placed in TP + # TODO: for Ron: once the appropriate value is placed in TM # constructor, see if we should eliminate this parameter from # description.py. 'maxSynapsesPerSegment': 32, @@ -157,7 +157,7 @@ # > 0 for fixed-size CLA # -1 for non-fixed-size CLA # - # TODO: for Ron: once the appropriate value is placed in TP + # TODO: for Ron: once the appropriate value is placed in TM # constructor, see if we should eliminate this parameter from # description.py. 'maxSegmentsPerCell': 128, @@ -193,7 +193,7 @@ 'outputType': 'normal', - # "Pay Attention Mode" length. This tells the TP how many new + # "Pay Attention Mode" length. This tells the TM how many new # elements to append to the end of a learned sequence at a time. # Smaller values are better for datasets with short sequences, # higher values are better for datasets with long sequences. diff --git a/examples/opf/clients/hotgym/anomaly/model_params.py b/examples/opf/clients/hotgym/anomaly/model_params.py index 6b80174f2a..2c824be775 100644 --- a/examples/opf/clients/hotgym/anomaly/model_params.py +++ b/examples/opf/clients/hotgym/anomaly/model_params.py @@ -105,7 +105,7 @@ 'globalInhibition': 1, - # Number of columns in the SP (must be same as in TP) + # Number of columns in the SP (must be same as in TM) 'columnCount': 2048, 'inputWidth': 0, @@ -138,20 +138,20 @@ 'boostStrength': 0.0, }, - # Controls whether TP is enabled or disabled; - # TP is necessary for making temporal predictions, such as predicting - # the next inputs. Without TP, the model is only capable of + # Controls whether TM is enabled or disabled; + # TM is necessary for making temporal predictions, such as predicting + # the next inputs. Without TM, the model is only capable of # reconstructing missing sensor inputs (via SP). 'tmEnable' : True, 'tmParams': { - # TP diagnostic output verbosity control; + # TM diagnostic output verbosity control; # 0: silent; [1..6]: increasing levels of verbosity - # (see verbosity in nupic/trunk/py/nupic/research/TP.py and TP10X*.py) + # (see verbosity in nupic/trunk/py/nupic/research/BacktrackingTM.py and BacktrackingTMCPP.py) 'verbosity': 0, # Number of cell columns in the cortical region (same number for - # SP and TP) + # SP and TM) # (see also tpNCellsPerCol) 'columnCount': 2048, @@ -176,7 +176,7 @@ # > 0 for fixed-size CLA # -1 for non-fixed-size CLA # - # TODO: for Ron: once the appropriate value is placed in TP + # TODO: for Ron: once the appropriate value is placed in TM # constructor, see if we should eliminate this parameter from # description.py. 'maxSynapsesPerSegment': 32, @@ -185,7 +185,7 @@ # > 0 for fixed-size CLA # -1 for non-fixed-size CLA # - # TODO: for Ron: once the appropriate value is placed in TP + # TODO: for Ron: once the appropriate value is placed in TM # constructor, see if we should eliminate this parameter from # description.py. 'maxSegmentsPerCell': 128, @@ -221,7 +221,7 @@ 'outputType': 'normal', - # "Pay Attention Mode" length. This tells the TP how many new + # "Pay Attention Mode" length. This tells the TM how many new # elements to append to the end of a learned sequence at a time. # Smaller values are better for datasets with short sequences, # higher values are better for datasets with long sequences. diff --git a/examples/opf/clients/hotgym/simple/model_params.py b/examples/opf/clients/hotgym/simple/model_params.py index b3ad116ef7..7466c83242 100644 --- a/examples/opf/clients/hotgym/simple/model_params.py +++ b/examples/opf/clients/hotgym/simple/model_params.py @@ -101,7 +101,7 @@ 'globalInhibition': 1, # Number of cell columns in the cortical region (same number for - # SP and TP) + # SP and TM) # (see also tpNCellsPerCol) 'columnCount': 2048, @@ -135,20 +135,20 @@ 'boostStrength': 3.0, }, - # Controls whether TP is enabled or disabled; - # TP is necessary for making temporal predictions, such as predicting - # the next inputs. Without TP, the model is only capable of + # Controls whether TM is enabled or disabled; + # TM is necessary for making temporal predictions, such as predicting + # the next inputs. Without TM, the model is only capable of # reconstructing missing sensor inputs (via SP). 'tmEnable' : True, 'tmParams': { - # TP diagnostic output verbosity control; + # TM diagnostic output verbosity control; # 0: silent; [1..6]: increasing levels of verbosity - # (see verbosity in nupic/trunk/py/nupic/research/TP.py and TP10X*.py) + # (see verbosity in nupic/trunk/py/nupic/research/BacktrackingTM.py and BacktrackingTMCPP.py) 'verbosity': 0, # Number of cell columns in the cortical region (same number for - # SP and TP) + # SP and TM) # (see also tpNCellsPerCol) 'columnCount': 2048, @@ -173,7 +173,7 @@ # > 0 for fixed-size CLA # -1 for non-fixed-size CLA # - # TODO: for Ron: once the appropriate value is placed in TP + # TODO: for Ron: once the appropriate value is placed in TM # constructor, see if we should eliminate this parameter from # description.py. 'maxSynapsesPerSegment': 32, @@ -182,7 +182,7 @@ # > 0 for fixed-size CLA # -1 for non-fixed-size CLA # - # TODO: for Ron: once the appropriate value is placed in TP + # TODO: for Ron: once the appropriate value is placed in TM # constructor, see if we should eliminate this parameter from # description.py. 'maxSegmentsPerCell': 128, @@ -217,7 +217,7 @@ 'outputType': 'normal', - # "Pay Attention Mode" length. This tells the TP how many new + # "Pay Attention Mode" length. This tells the TM how many new # elements to append to the end of a learned sequence at a time. # Smaller values are better for datasets with short sequences, # higher values are better for datasets with long sequences. diff --git a/examples/opf/experiments/anomaly/spatial/10field_few2_skewed/description.py b/examples/opf/experiments/anomaly/spatial/10field_few2_skewed/description.py index bd7775d3a4..b24b96a542 100644 --- a/examples/opf/experiments/anomaly/spatial/10field_few2_skewed/description.py +++ b/examples/opf/experiments/anomaly/spatial/10field_few2_skewed/description.py @@ -156,7 +156,7 @@ 'globalInhibition': 1, # Number of cell columns in the cortical region (same number for - # SP and TP) + # SP and TM) # (see also tpNCellsPerCol) 'columnCount': 2048, @@ -182,7 +182,7 @@ # level before inhibition falls below minDutyCycleBeforeInh # will have their own internal synPermConnectedCell # threshold set below this default value. - # (This concept applies to both SP and TP and so 'cells' + # (This concept applies to both SP and TM and so 'cells' # is correct here as opposed to 'columns') 'synPermConnected': 0.1, @@ -191,20 +191,20 @@ 'synPermInactiveDec': 0.01, }, - # Controls whether TP is enabled or disabled; - # TP is necessary for making temporal predictions, such as predicting - # the next inputs. Without TP, the model is only capable of + # Controls whether TM is enabled or disabled; + # TM is necessary for making temporal predictions, such as predicting + # the next inputs. Without TM, the model is only capable of # reconstructing missing sensor inputs (via SP). 'tmEnable' : True, 'tmParams': { - # TP diagnostic output verbosity control; + # TM diagnostic output verbosity control; # 0: silent; [1..6]: increasing levels of verbosity - # (see verbosity in nupic/trunk/py/nupic/research/TP.py and TP10X*.py) + # (see verbosity in nupic/trunk/py/nupic/research/BacktrackingTM.py and BacktrackingTMCPP.py) 'verbosity': 0, # Number of cell columns in the cortical region (same number for - # SP and TP) + # SP and TM) # (see also tpNCellsPerCol) 'columnCount': 2048, @@ -229,7 +229,7 @@ # > 0 for fixed-size CLA # -1 for non-fixed-size CLA # - # TODO: for Ron: once the appropriate value is placed in TP + # TODO: for Ron: once the appropriate value is placed in TM # constructor, see if we should eliminate this parameter from # description.py. 'maxSynapsesPerSegment': 32, @@ -238,7 +238,7 @@ # > 0 for fixed-size CLA # -1 for non-fixed-size CLA # - # TODO: for Ron: once the appropriate value is placed in TP + # TODO: for Ron: once the appropriate value is placed in TM # constructor, see if we should eliminate this parameter from # description.py. 'maxSegmentsPerCell': 128, @@ -274,7 +274,7 @@ 'outputType': 'normal', - # "Pay Attention Mode" length. This tells the TP how many new + # "Pay Attention Mode" length. This tells the TM how many new # elements to append to the end of a learned sequence at a time. # Smaller values are better for datasets with short sequences, # higher values are better for datasets with long sequences. diff --git a/examples/opf/experiments/anomaly/spatial/10field_few_skewed/description.py b/examples/opf/experiments/anomaly/spatial/10field_few_skewed/description.py index bd7775d3a4..b24b96a542 100644 --- a/examples/opf/experiments/anomaly/spatial/10field_few_skewed/description.py +++ b/examples/opf/experiments/anomaly/spatial/10field_few_skewed/description.py @@ -156,7 +156,7 @@ 'globalInhibition': 1, # Number of cell columns in the cortical region (same number for - # SP and TP) + # SP and TM) # (see also tpNCellsPerCol) 'columnCount': 2048, @@ -182,7 +182,7 @@ # level before inhibition falls below minDutyCycleBeforeInh # will have their own internal synPermConnectedCell # threshold set below this default value. - # (This concept applies to both SP and TP and so 'cells' + # (This concept applies to both SP and TM and so 'cells' # is correct here as opposed to 'columns') 'synPermConnected': 0.1, @@ -191,20 +191,20 @@ 'synPermInactiveDec': 0.01, }, - # Controls whether TP is enabled or disabled; - # TP is necessary for making temporal predictions, such as predicting - # the next inputs. Without TP, the model is only capable of + # Controls whether TM is enabled or disabled; + # TM is necessary for making temporal predictions, such as predicting + # the next inputs. Without TM, the model is only capable of # reconstructing missing sensor inputs (via SP). 'tmEnable' : True, 'tmParams': { - # TP diagnostic output verbosity control; + # TM diagnostic output verbosity control; # 0: silent; [1..6]: increasing levels of verbosity - # (see verbosity in nupic/trunk/py/nupic/research/TP.py and TP10X*.py) + # (see verbosity in nupic/trunk/py/nupic/research/BacktrackingTM.py and BacktrackingTMCPP.py) 'verbosity': 0, # Number of cell columns in the cortical region (same number for - # SP and TP) + # SP and TM) # (see also tpNCellsPerCol) 'columnCount': 2048, @@ -229,7 +229,7 @@ # > 0 for fixed-size CLA # -1 for non-fixed-size CLA # - # TODO: for Ron: once the appropriate value is placed in TP + # TODO: for Ron: once the appropriate value is placed in TM # constructor, see if we should eliminate this parameter from # description.py. 'maxSynapsesPerSegment': 32, @@ -238,7 +238,7 @@ # > 0 for fixed-size CLA # -1 for non-fixed-size CLA # - # TODO: for Ron: once the appropriate value is placed in TP + # TODO: for Ron: once the appropriate value is placed in TM # constructor, see if we should eliminate this parameter from # description.py. 'maxSegmentsPerCell': 128, @@ -274,7 +274,7 @@ 'outputType': 'normal', - # "Pay Attention Mode" length. This tells the TP how many new + # "Pay Attention Mode" length. This tells the TM how many new # elements to append to the end of a learned sequence at a time. # Smaller values are better for datasets with short sequences, # higher values are better for datasets with long sequences. diff --git a/examples/opf/experiments/anomaly/spatial/10field_many_skewed/description.py b/examples/opf/experiments/anomaly/spatial/10field_many_skewed/description.py index cc3786257d..6ec6865e2b 100644 --- a/examples/opf/experiments/anomaly/spatial/10field_many_skewed/description.py +++ b/examples/opf/experiments/anomaly/spatial/10field_many_skewed/description.py @@ -147,7 +147,7 @@ 'globalInhibition': 1, # Number of cell columns in the cortical region (same number for - # SP and TP) + # SP and TM) # (see also tpNCellsPerCol) 'columnCount': 2048, @@ -173,7 +173,7 @@ # level before inhibition falls below minDutyCycleBeforeInh # will have their own internal synPermConnectedCell # threshold set below this default value. - # (This concept applies to both SP and TP and so 'cells' + # (This concept applies to both SP and TM and so 'cells' # is correct here as opposed to 'columns') 'synPermConnected': 0.1, @@ -182,20 +182,20 @@ 'synPermInactiveDec': 0.01, }, - # Controls whether TP is enabled or disabled; - # TP is necessary for making temporal predictions, such as predicting - # the next inputs. Without TP, the model is only capable of + # Controls whether TM is enabled or disabled; + # TM is necessary for making temporal predictions, such as predicting + # the next inputs. Without TM, the model is only capable of # reconstructing missing sensor inputs (via SP). 'tmEnable' : True, 'tmParams': { - # TP diagnostic output verbosity control; + # TM diagnostic output verbosity control; # 0: silent; [1..6]: increasing levels of verbosity - # (see verbosity in nupic/trunk/py/nupic/research/TP.py and TP10X*.py) + # (see verbosity in nupic/trunk/py/nupic/research/BacktrackingTM.py and BacktrackingTMCPP.py) 'verbosity': 0, # Number of cell columns in the cortical region (same number for - # SP and TP) + # SP and TM) # (see also tpNCellsPerCol) 'columnCount': 2048, @@ -220,7 +220,7 @@ # > 0 for fixed-size CLA # -1 for non-fixed-size CLA # - # TODO: for Ron: once the appropriate value is placed in TP + # TODO: for Ron: once the appropriate value is placed in TM # constructor, see if we should eliminate this parameter from # description.py. 'maxSynapsesPerSegment': 32, @@ -229,7 +229,7 @@ # > 0 for fixed-size CLA # -1 for non-fixed-size CLA # - # TODO: for Ron: once the appropriate value is placed in TP + # TODO: for Ron: once the appropriate value is placed in TM # constructor, see if we should eliminate this parameter from # description.py. 'maxSegmentsPerCell': 128, @@ -265,7 +265,7 @@ 'outputType': 'normal', - # "Pay Attention Mode" length. This tells the TP how many new + # "Pay Attention Mode" length. This tells the TM how many new # elements to append to the end of a learned sequence at a time. # Smaller values are better for datasets with short sequences, # higher values are better for datasets with long sequences. diff --git a/examples/opf/experiments/anomaly/spatial/1field_few_balanced/description.py b/examples/opf/experiments/anomaly/spatial/1field_few_balanced/description.py index 1945409ec2..e41e8afd55 100644 --- a/examples/opf/experiments/anomaly/spatial/1field_few_balanced/description.py +++ b/examples/opf/experiments/anomaly/spatial/1field_few_balanced/description.py @@ -147,7 +147,7 @@ 'globalInhibition': 1, # Number of cell columns in the cortical region (same number for - # SP and TP) + # SP and TM) # (see also tpNCellsPerCol) 'columnCount': 2048, @@ -173,7 +173,7 @@ # level before inhibition falls below minDutyCycleBeforeInh # will have their own internal synPermConnectedCell # threshold set below this default value. - # (This concept applies to both SP and TP and so 'cells' + # (This concept applies to both SP and TM and so 'cells' # is correct here as opposed to 'columns') 'synPermConnected': 0.1, @@ -182,20 +182,20 @@ 'synPermInactiveDec': 0.01, }, - # Controls whether TP is enabled or disabled; - # TP is necessary for making temporal predictions, such as predicting - # the next inputs. Without TP, the model is only capable of + # Controls whether TM is enabled or disabled; + # TM is necessary for making temporal predictions, such as predicting + # the next inputs. Without TM, the model is only capable of # reconstructing missing sensor inputs (via SP). 'tmEnable' : True, 'tmParams': { - # TP diagnostic output verbosity control; + # TM diagnostic output verbosity control; # 0: silent; [1..6]: increasing levels of verbosity - # (see verbosity in nupic/trunk/py/nupic/research/TP.py and TP10X*.py) + # (see verbosity in nupic/trunk/py/nupic/research/BacktrackingTM.py and BacktrackingTMCPP.py) 'verbosity': 0, # Number of cell columns in the cortical region (same number for - # SP and TP) + # SP and TM) # (see also tpNCellsPerCol) 'columnCount': 2048, @@ -220,7 +220,7 @@ # > 0 for fixed-size CLA # -1 for non-fixed-size CLA # - # TODO: for Ron: once the appropriate value is placed in TP + # TODO: for Ron: once the appropriate value is placed in TM # constructor, see if we should eliminate this parameter from # description.py. 'maxSynapsesPerSegment': 32, @@ -229,7 +229,7 @@ # > 0 for fixed-size CLA # -1 for non-fixed-size CLA # - # TODO: for Ron: once the appropriate value is placed in TP + # TODO: for Ron: once the appropriate value is placed in TM # constructor, see if we should eliminate this parameter from # description.py. 'maxSegmentsPerCell': 128, @@ -265,7 +265,7 @@ 'outputType': 'normal', - # "Pay Attention Mode" length. This tells the TP how many new + # "Pay Attention Mode" length. This tells the TM how many new # elements to append to the end of a learned sequence at a time. # Smaller values are better for datasets with short sequences, # higher values are better for datasets with long sequences. diff --git a/examples/opf/experiments/anomaly/spatial/1field_few_skewed/description.py b/examples/opf/experiments/anomaly/spatial/1field_few_skewed/description.py index cc3786257d..6ec6865e2b 100644 --- a/examples/opf/experiments/anomaly/spatial/1field_few_skewed/description.py +++ b/examples/opf/experiments/anomaly/spatial/1field_few_skewed/description.py @@ -147,7 +147,7 @@ 'globalInhibition': 1, # Number of cell columns in the cortical region (same number for - # SP and TP) + # SP and TM) # (see also tpNCellsPerCol) 'columnCount': 2048, @@ -173,7 +173,7 @@ # level before inhibition falls below minDutyCycleBeforeInh # will have their own internal synPermConnectedCell # threshold set below this default value. - # (This concept applies to both SP and TP and so 'cells' + # (This concept applies to both SP and TM and so 'cells' # is correct here as opposed to 'columns') 'synPermConnected': 0.1, @@ -182,20 +182,20 @@ 'synPermInactiveDec': 0.01, }, - # Controls whether TP is enabled or disabled; - # TP is necessary for making temporal predictions, such as predicting - # the next inputs. Without TP, the model is only capable of + # Controls whether TM is enabled or disabled; + # TM is necessary for making temporal predictions, such as predicting + # the next inputs. Without TM, the model is only capable of # reconstructing missing sensor inputs (via SP). 'tmEnable' : True, 'tmParams': { - # TP diagnostic output verbosity control; + # TM diagnostic output verbosity control; # 0: silent; [1..6]: increasing levels of verbosity - # (see verbosity in nupic/trunk/py/nupic/research/TP.py and TP10X*.py) + # (see verbosity in nupic/trunk/py/nupic/research/BacktrackingTM.py and BacktrackingTMCPP.py) 'verbosity': 0, # Number of cell columns in the cortical region (same number for - # SP and TP) + # SP and TM) # (see also tpNCellsPerCol) 'columnCount': 2048, @@ -220,7 +220,7 @@ # > 0 for fixed-size CLA # -1 for non-fixed-size CLA # - # TODO: for Ron: once the appropriate value is placed in TP + # TODO: for Ron: once the appropriate value is placed in TM # constructor, see if we should eliminate this parameter from # description.py. 'maxSynapsesPerSegment': 32, @@ -229,7 +229,7 @@ # > 0 for fixed-size CLA # -1 for non-fixed-size CLA # - # TODO: for Ron: once the appropriate value is placed in TP + # TODO: for Ron: once the appropriate value is placed in TM # constructor, see if we should eliminate this parameter from # description.py. 'maxSegmentsPerCell': 128, @@ -265,7 +265,7 @@ 'outputType': 'normal', - # "Pay Attention Mode" length. This tells the TP how many new + # "Pay Attention Mode" length. This tells the TM how many new # elements to append to the end of a learned sequence at a time. # Smaller values are better for datasets with short sequences, # higher values are better for datasets with long sequences. diff --git a/examples/opf/experiments/anomaly/spatial/2field_few_6040/description.py b/examples/opf/experiments/anomaly/spatial/2field_few_6040/description.py index e6356fd657..b95f0b7481 100644 --- a/examples/opf/experiments/anomaly/spatial/2field_few_6040/description.py +++ b/examples/opf/experiments/anomaly/spatial/2field_few_6040/description.py @@ -148,7 +148,7 @@ 'globalInhibition': 1, # Number of cell columns in the cortical region (same number for - # SP and TP) + # SP and TM) # (see also tpNCellsPerCol) 'columnCount': 2048, @@ -174,7 +174,7 @@ # level before inhibition falls below minDutyCycleBeforeInh # will have their own internal synPermConnectedCell # threshold set below this default value. - # (This concept applies to both SP and TP and so 'cells' + # (This concept applies to both SP and TM and so 'cells' # is correct here as opposed to 'columns') 'synPermConnected': 0.1, @@ -183,20 +183,20 @@ 'synPermInactiveDec': 0.01, }, - # Controls whether TP is enabled or disabled; - # TP is necessary for making temporal predictions, such as predicting - # the next inputs. Without TP, the model is only capable of + # Controls whether TM is enabled or disabled; + # TM is necessary for making temporal predictions, such as predicting + # the next inputs. Without TM, the model is only capable of # reconstructing missing sensor inputs (via SP). 'tmEnable' : True, 'tmParams': { - # TP diagnostic output verbosity control; + # TM diagnostic output verbosity control; # 0: silent; [1..6]: increasing levels of verbosity - # (see verbosity in nupic/trunk/py/nupic/research/TP.py and TP10X*.py) + # (see verbosity in nupic/trunk/py/nupic/research/BacktrackingTM.py and BacktrackingTMCPP.py) 'verbosity': 0, # Number of cell columns in the cortical region (same number for - # SP and TP) + # SP and TM) # (see also tpNCellsPerCol) 'columnCount': 2048, @@ -221,7 +221,7 @@ # > 0 for fixed-size CLA # -1 for non-fixed-size CLA # - # TODO: for Ron: once the appropriate value is placed in TP + # TODO: for Ron: once the appropriate value is placed in TM # constructor, see if we should eliminate this parameter from # description.py. 'maxSynapsesPerSegment': 32, @@ -230,7 +230,7 @@ # > 0 for fixed-size CLA # -1 for non-fixed-size CLA # - # TODO: for Ron: once the appropriate value is placed in TP + # TODO: for Ron: once the appropriate value is placed in TM # constructor, see if we should eliminate this parameter from # description.py. 'maxSegmentsPerCell': 128, @@ -266,7 +266,7 @@ 'outputType': 'normal', - # "Pay Attention Mode" length. This tells the TP how many new + # "Pay Attention Mode" length. This tells the TM how many new # elements to append to the end of a learned sequence at a time. # Smaller values are better for datasets with short sequences, # higher values are better for datasets with long sequences. diff --git a/examples/opf/experiments/anomaly/spatial/2field_few_balanced/description.py b/examples/opf/experiments/anomaly/spatial/2field_few_balanced/description.py index fd3158f709..ab0ecab14e 100644 --- a/examples/opf/experiments/anomaly/spatial/2field_few_balanced/description.py +++ b/examples/opf/experiments/anomaly/spatial/2field_few_balanced/description.py @@ -148,7 +148,7 @@ 'globalInhibition': 1, # Number of cell columns in the cortical region (same number for - # SP and TP) + # SP and TM) # (see also tpNCellsPerCol) 'columnCount': 2048, @@ -174,7 +174,7 @@ # level before inhibition falls below minDutyCycleBeforeInh # will have their own internal synPermConnectedCell # threshold set below this default value. - # (This concept applies to both SP and TP and so 'cells' + # (This concept applies to both SP and TM and so 'cells' # is correct here as opposed to 'columns') 'synPermConnected': 0.1, @@ -183,20 +183,20 @@ 'synPermInactiveDec': 0.01, }, - # Controls whether TP is enabled or disabled; - # TP is necessary for making temporal predictions, such as predicting - # the next inputs. Without TP, the model is only capable of + # Controls whether TM is enabled or disabled; + # TM is necessary for making temporal predictions, such as predicting + # the next inputs. Without TM, the model is only capable of # reconstructing missing sensor inputs (via SP). 'tmEnable' : True, 'tmParams': { - # TP diagnostic output verbosity control; + # TM diagnostic output verbosity control; # 0: silent; [1..6]: increasing levels of verbosity - # (see verbosity in nupic/trunk/py/nupic/research/TP.py and TP10X*.py) + # (see verbosity in nupic/trunk/py/nupic/research/BacktrackingTM.py and BacktrackingTMCPP.py) 'verbosity': 0, # Number of cell columns in the cortical region (same number for - # SP and TP) + # SP and TM) # (see also tpNCellsPerCol) 'columnCount': 2048, @@ -221,7 +221,7 @@ # > 0 for fixed-size CLA # -1 for non-fixed-size CLA # - # TODO: for Ron: once the appropriate value is placed in TP + # TODO: for Ron: once the appropriate value is placed in TM # constructor, see if we should eliminate this parameter from # description.py. 'maxSynapsesPerSegment': 32, @@ -230,7 +230,7 @@ # > 0 for fixed-size CLA # -1 for non-fixed-size CLA # - # TODO: for Ron: once the appropriate value is placed in TP + # TODO: for Ron: once the appropriate value is placed in TM # constructor, see if we should eliminate this parameter from # description.py. 'maxSegmentsPerCell': 128, @@ -266,7 +266,7 @@ 'outputType': 'normal', - # "Pay Attention Mode" length. This tells the TP how many new + # "Pay Attention Mode" length. This tells the TM how many new # elements to append to the end of a learned sequence at a time. # Smaller values are better for datasets with short sequences, # higher values are better for datasets with long sequences. diff --git a/examples/opf/experiments/anomaly/spatial/2field_few_skewed/description.py b/examples/opf/experiments/anomaly/spatial/2field_few_skewed/description.py index e6356fd657..b95f0b7481 100644 --- a/examples/opf/experiments/anomaly/spatial/2field_few_skewed/description.py +++ b/examples/opf/experiments/anomaly/spatial/2field_few_skewed/description.py @@ -148,7 +148,7 @@ 'globalInhibition': 1, # Number of cell columns in the cortical region (same number for - # SP and TP) + # SP and TM) # (see also tpNCellsPerCol) 'columnCount': 2048, @@ -174,7 +174,7 @@ # level before inhibition falls below minDutyCycleBeforeInh # will have their own internal synPermConnectedCell # threshold set below this default value. - # (This concept applies to both SP and TP and so 'cells' + # (This concept applies to both SP and TM and so 'cells' # is correct here as opposed to 'columns') 'synPermConnected': 0.1, @@ -183,20 +183,20 @@ 'synPermInactiveDec': 0.01, }, - # Controls whether TP is enabled or disabled; - # TP is necessary for making temporal predictions, such as predicting - # the next inputs. Without TP, the model is only capable of + # Controls whether TM is enabled or disabled; + # TM is necessary for making temporal predictions, such as predicting + # the next inputs. Without TM, the model is only capable of # reconstructing missing sensor inputs (via SP). 'tmEnable' : True, 'tmParams': { - # TP diagnostic output verbosity control; + # TM diagnostic output verbosity control; # 0: silent; [1..6]: increasing levels of verbosity - # (see verbosity in nupic/trunk/py/nupic/research/TP.py and TP10X*.py) + # (see verbosity in nupic/trunk/py/nupic/research/BacktrackingTM.py and BacktrackingTMCPP.py) 'verbosity': 0, # Number of cell columns in the cortical region (same number for - # SP and TP) + # SP and TM) # (see also tpNCellsPerCol) 'columnCount': 2048, @@ -221,7 +221,7 @@ # > 0 for fixed-size CLA # -1 for non-fixed-size CLA # - # TODO: for Ron: once the appropriate value is placed in TP + # TODO: for Ron: once the appropriate value is placed in TM # constructor, see if we should eliminate this parameter from # description.py. 'maxSynapsesPerSegment': 32, @@ -230,7 +230,7 @@ # > 0 for fixed-size CLA # -1 for non-fixed-size CLA # - # TODO: for Ron: once the appropriate value is placed in TP + # TODO: for Ron: once the appropriate value is placed in TM # constructor, see if we should eliminate this parameter from # description.py. 'maxSegmentsPerCell': 128, @@ -266,7 +266,7 @@ 'outputType': 'normal', - # "Pay Attention Mode" length. This tells the TP how many new + # "Pay Attention Mode" length. This tells the TM how many new # elements to append to the end of a learned sequence at a time. # Smaller values are better for datasets with short sequences, # higher values are better for datasets with long sequences. diff --git a/examples/opf/experiments/anomaly/spatial/2field_many_balanced/description.py b/examples/opf/experiments/anomaly/spatial/2field_many_balanced/description.py index e6356fd657..b95f0b7481 100644 --- a/examples/opf/experiments/anomaly/spatial/2field_many_balanced/description.py +++ b/examples/opf/experiments/anomaly/spatial/2field_many_balanced/description.py @@ -148,7 +148,7 @@ 'globalInhibition': 1, # Number of cell columns in the cortical region (same number for - # SP and TP) + # SP and TM) # (see also tpNCellsPerCol) 'columnCount': 2048, @@ -174,7 +174,7 @@ # level before inhibition falls below minDutyCycleBeforeInh # will have their own internal synPermConnectedCell # threshold set below this default value. - # (This concept applies to both SP and TP and so 'cells' + # (This concept applies to both SP and TM and so 'cells' # is correct here as opposed to 'columns') 'synPermConnected': 0.1, @@ -183,20 +183,20 @@ 'synPermInactiveDec': 0.01, }, - # Controls whether TP is enabled or disabled; - # TP is necessary for making temporal predictions, such as predicting - # the next inputs. Without TP, the model is only capable of + # Controls whether TM is enabled or disabled; + # TM is necessary for making temporal predictions, such as predicting + # the next inputs. Without TM, the model is only capable of # reconstructing missing sensor inputs (via SP). 'tmEnable' : True, 'tmParams': { - # TP diagnostic output verbosity control; + # TM diagnostic output verbosity control; # 0: silent; [1..6]: increasing levels of verbosity - # (see verbosity in nupic/trunk/py/nupic/research/TP.py and TP10X*.py) + # (see verbosity in nupic/trunk/py/nupic/research/BacktrackingTM.py and BacktrackingTMCPP.py) 'verbosity': 0, # Number of cell columns in the cortical region (same number for - # SP and TP) + # SP and TM) # (see also tpNCellsPerCol) 'columnCount': 2048, @@ -221,7 +221,7 @@ # > 0 for fixed-size CLA # -1 for non-fixed-size CLA # - # TODO: for Ron: once the appropriate value is placed in TP + # TODO: for Ron: once the appropriate value is placed in TM # constructor, see if we should eliminate this parameter from # description.py. 'maxSynapsesPerSegment': 32, @@ -230,7 +230,7 @@ # > 0 for fixed-size CLA # -1 for non-fixed-size CLA # - # TODO: for Ron: once the appropriate value is placed in TP + # TODO: for Ron: once the appropriate value is placed in TM # constructor, see if we should eliminate this parameter from # description.py. 'maxSegmentsPerCell': 128, @@ -266,7 +266,7 @@ 'outputType': 'normal', - # "Pay Attention Mode" length. This tells the TP how many new + # "Pay Attention Mode" length. This tells the TM how many new # elements to append to the end of a learned sequence at a time. # Smaller values are better for datasets with short sequences, # higher values are better for datasets with long sequences. diff --git a/examples/opf/experiments/anomaly/spatial/2field_many_novelAtEnd/description.py b/examples/opf/experiments/anomaly/spatial/2field_many_novelAtEnd/description.py index e6356fd657..b95f0b7481 100644 --- a/examples/opf/experiments/anomaly/spatial/2field_many_novelAtEnd/description.py +++ b/examples/opf/experiments/anomaly/spatial/2field_many_novelAtEnd/description.py @@ -148,7 +148,7 @@ 'globalInhibition': 1, # Number of cell columns in the cortical region (same number for - # SP and TP) + # SP and TM) # (see also tpNCellsPerCol) 'columnCount': 2048, @@ -174,7 +174,7 @@ # level before inhibition falls below minDutyCycleBeforeInh # will have their own internal synPermConnectedCell # threshold set below this default value. - # (This concept applies to both SP and TP and so 'cells' + # (This concept applies to both SP and TM and so 'cells' # is correct here as opposed to 'columns') 'synPermConnected': 0.1, @@ -183,20 +183,20 @@ 'synPermInactiveDec': 0.01, }, - # Controls whether TP is enabled or disabled; - # TP is necessary for making temporal predictions, such as predicting - # the next inputs. Without TP, the model is only capable of + # Controls whether TM is enabled or disabled; + # TM is necessary for making temporal predictions, such as predicting + # the next inputs. Without TM, the model is only capable of # reconstructing missing sensor inputs (via SP). 'tmEnable' : True, 'tmParams': { - # TP diagnostic output verbosity control; + # TM diagnostic output verbosity control; # 0: silent; [1..6]: increasing levels of verbosity - # (see verbosity in nupic/trunk/py/nupic/research/TP.py and TP10X*.py) + # (see verbosity in nupic/trunk/py/nupic/research/BacktrackingTM.py and BacktrackingTMCPP.py) 'verbosity': 0, # Number of cell columns in the cortical region (same number for - # SP and TP) + # SP and TM) # (see also tpNCellsPerCol) 'columnCount': 2048, @@ -221,7 +221,7 @@ # > 0 for fixed-size CLA # -1 for non-fixed-size CLA # - # TODO: for Ron: once the appropriate value is placed in TP + # TODO: for Ron: once the appropriate value is placed in TM # constructor, see if we should eliminate this parameter from # description.py. 'maxSynapsesPerSegment': 32, @@ -230,7 +230,7 @@ # > 0 for fixed-size CLA # -1 for non-fixed-size CLA # - # TODO: for Ron: once the appropriate value is placed in TP + # TODO: for Ron: once the appropriate value is placed in TM # constructor, see if we should eliminate this parameter from # description.py. 'maxSegmentsPerCell': 128, @@ -266,7 +266,7 @@ 'outputType': 'normal', - # "Pay Attention Mode" length. This tells the TP how many new + # "Pay Attention Mode" length. This tells the TM how many new # elements to append to the end of a learned sequence at a time. # Smaller values are better for datasets with short sequences, # higher values are better for datasets with long sequences. diff --git a/examples/opf/experiments/anomaly/spatial/2fields_many_skewed/description.py b/examples/opf/experiments/anomaly/spatial/2fields_many_skewed/description.py index e6356fd657..b95f0b7481 100644 --- a/examples/opf/experiments/anomaly/spatial/2fields_many_skewed/description.py +++ b/examples/opf/experiments/anomaly/spatial/2fields_many_skewed/description.py @@ -148,7 +148,7 @@ 'globalInhibition': 1, # Number of cell columns in the cortical region (same number for - # SP and TP) + # SP and TM) # (see also tpNCellsPerCol) 'columnCount': 2048, @@ -174,7 +174,7 @@ # level before inhibition falls below minDutyCycleBeforeInh # will have their own internal synPermConnectedCell # threshold set below this default value. - # (This concept applies to both SP and TP and so 'cells' + # (This concept applies to both SP and TM and so 'cells' # is correct here as opposed to 'columns') 'synPermConnected': 0.1, @@ -183,20 +183,20 @@ 'synPermInactiveDec': 0.01, }, - # Controls whether TP is enabled or disabled; - # TP is necessary for making temporal predictions, such as predicting - # the next inputs. Without TP, the model is only capable of + # Controls whether TM is enabled or disabled; + # TM is necessary for making temporal predictions, such as predicting + # the next inputs. Without TM, the model is only capable of # reconstructing missing sensor inputs (via SP). 'tmEnable' : True, 'tmParams': { - # TP diagnostic output verbosity control; + # TM diagnostic output verbosity control; # 0: silent; [1..6]: increasing levels of verbosity - # (see verbosity in nupic/trunk/py/nupic/research/TP.py and TP10X*.py) + # (see verbosity in nupic/trunk/py/nupic/research/BacktrackingTM.py and BacktrackingTMCPP.py) 'verbosity': 0, # Number of cell columns in the cortical region (same number for - # SP and TP) + # SP and TM) # (see also tpNCellsPerCol) 'columnCount': 2048, @@ -221,7 +221,7 @@ # > 0 for fixed-size CLA # -1 for non-fixed-size CLA # - # TODO: for Ron: once the appropriate value is placed in TP + # TODO: for Ron: once the appropriate value is placed in TM # constructor, see if we should eliminate this parameter from # description.py. 'maxSynapsesPerSegment': 32, @@ -230,7 +230,7 @@ # > 0 for fixed-size CLA # -1 for non-fixed-size CLA # - # TODO: for Ron: once the appropriate value is placed in TP + # TODO: for Ron: once the appropriate value is placed in TM # constructor, see if we should eliminate this parameter from # description.py. 'maxSegmentsPerCell': 128, @@ -266,7 +266,7 @@ 'outputType': 'normal', - # "Pay Attention Mode" length. This tells the TP how many new + # "Pay Attention Mode" length. This tells the TM how many new # elements to append to the end of a learned sequence at a time. # Smaller values are better for datasets with short sequences, # higher values are better for datasets with long sequences. diff --git a/examples/opf/experiments/anomaly/spatial/novel_combination/description.py b/examples/opf/experiments/anomaly/spatial/novel_combination/description.py index 490d82f663..8538a6d7fc 100644 --- a/examples/opf/experiments/anomaly/spatial/novel_combination/description.py +++ b/examples/opf/experiments/anomaly/spatial/novel_combination/description.py @@ -148,7 +148,7 @@ 'globalInhibition': 1, # Number of cell columns in the cortical region (same number for - # SP and TP) + # SP and TM) # (see also tpNCellsPerCol) 'columnCount': 2048, @@ -174,7 +174,7 @@ # level before inhibition falls below minDutyCycleBeforeInh # will have their own internal synPermConnectedCell # threshold set below this default value. - # (This concept applies to both SP and TP and so 'cells' + # (This concept applies to both SP and TM and so 'cells' # is correct here as opposed to 'columns') 'synPermConnected': 0.3, @@ -183,20 +183,20 @@ 'synPermInactiveDec': 0.01, }, - # Controls whether TP is enabled or disabled; - # TP is necessary for making temporal predictions, such as predicting - # the next inputs. Without TP, the model is only capable of + # Controls whether TM is enabled or disabled; + # TM is necessary for making temporal predictions, such as predicting + # the next inputs. Without TM, the model is only capable of # reconstructing missing sensor inputs (via SP). 'tmEnable' : False, 'tmParams': { - # TP diagnostic output verbosity control; + # TM diagnostic output verbosity control; # 0: silent; [1..6]: increasing levels of verbosity - # (see verbosity in nupic/trunk/py/nupic/research/TP.py and TP10X*.py) + # (see verbosity in nupic/trunk/py/nupic/research/BacktrackingTM.py and BacktrackingTMCPP.py) 'verbosity': 0, # Number of cell columns in the cortical region (same number for - # SP and TP) + # SP and TM) # (see also tpNCellsPerCol) 'columnCount': 2048, @@ -221,7 +221,7 @@ # > 0 for fixed-size CLA # -1 for non-fixed-size CLA # - # TODO: for Ron: once the appropriate value is placed in TP + # TODO: for Ron: once the appropriate value is placed in TM # constructor, see if we should eliminate this parameter from # description.py. 'maxSynapsesPerSegment': 32, @@ -230,7 +230,7 @@ # > 0 for fixed-size CLA # -1 for non-fixed-size CLA # - # TODO: for Ron: once the appropriate value is placed in TP + # TODO: for Ron: once the appropriate value is placed in TM # constructor, see if we should eliminate this parameter from # description.py. 'maxSegmentsPerCell': 128, @@ -266,7 +266,7 @@ 'outputType': 'normal', - # "Pay Attention Mode" length. This tells the TP how many new + # "Pay Attention Mode" length. This tells the TM how many new # elements to append to the end of a learned sequence at a time. # Smaller values are better for datasets with short sequences, # higher values are better for datasets with long sequences. diff --git a/examples/opf/experiments/anomaly/spatial/simple/description.py b/examples/opf/experiments/anomaly/spatial/simple/description.py index c2135096d6..742d2518e0 100644 --- a/examples/opf/experiments/anomaly/spatial/simple/description.py +++ b/examples/opf/experiments/anomaly/spatial/simple/description.py @@ -148,7 +148,7 @@ 'globalInhibition': 1, # Number of cell columns in the cortical region (same number for - # SP and TP) + # SP and TM) # (see also tpNCellsPerCol) 'columnCount': 2048, @@ -174,7 +174,7 @@ # level before inhibition falls below minDutyCycleBeforeInh # will have their own internal synPermConnectedCell # threshold set below this default value. - # (This concept applies to both SP and TP and so 'cells' + # (This concept applies to both SP and TM and so 'cells' # is correct here as opposed to 'columns') 'synPermConnected': 0.1, @@ -183,20 +183,20 @@ 'synPermInactiveDec': 0.01, }, - # Controls whether TP is enabled or disabled; - # TP is necessary for making temporal predictions, such as predicting - # the next inputs. Without TP, the model is only capable of + # Controls whether TM is enabled or disabled; + # TM is necessary for making temporal predictions, such as predicting + # the next inputs. Without TM, the model is only capable of # reconstructing missing sensor inputs (via SP). 'tmEnable' : True, 'tmParams': { - # TP diagnostic output verbosity control; + # TM diagnostic output verbosity control; # 0: silent; [1..6]: increasing levels of verbosity - # (see verbosity in nupic/trunk/py/nupic/research/TP.py and TP10X*.py) + # (see verbosity in nupic/trunk/py/nupic/research/BacktrackingTM.py and BacktrackingTMCPP.py) 'verbosity': 0, # Number of cell columns in the cortical region (same number for - # SP and TP) + # SP and TM) # (see also tpNCellsPerCol) 'columnCount': 2048, @@ -221,7 +221,7 @@ # > 0 for fixed-size CLA # -1 for non-fixed-size CLA # - # TODO: for Ron: once the appropriate value is placed in TP + # TODO: for Ron: once the appropriate value is placed in TM # constructor, see if we should eliminate this parameter from # description.py. 'maxSynapsesPerSegment': 32, @@ -230,7 +230,7 @@ # > 0 for fixed-size CLA # -1 for non-fixed-size CLA # - # TODO: for Ron: once the appropriate value is placed in TP + # TODO: for Ron: once the appropriate value is placed in TM # constructor, see if we should eliminate this parameter from # description.py. 'maxSegmentsPerCell': 128, @@ -266,7 +266,7 @@ 'outputType': 'normal', - # "Pay Attention Mode" length. This tells the TP how many new + # "Pay Attention Mode" length. This tells the TM how many new # elements to append to the end of a learned sequence at a time. # Smaller values are better for datasets with short sequences, # higher values are better for datasets with long sequences. diff --git a/examples/opf/experiments/anomaly/temporal/hotgym/description.py b/examples/opf/experiments/anomaly/temporal/hotgym/description.py index 5538c3a2e7..3677e4fc96 100644 --- a/examples/opf/experiments/anomaly/temporal/hotgym/description.py +++ b/examples/opf/experiments/anomaly/temporal/hotgym/description.py @@ -168,7 +168,7 @@ 'globalInhibition': 1, # Number of cell columns in the cortical region (same number for - # SP and TP) + # SP and TM) # (see also tpNCellsPerCol) 'columnCount': 2048, @@ -194,7 +194,7 @@ # level before inhibition falls below minDutyCycleBeforeInh # will have their own internal synPermConnectedCell # threshold set below this default value. - # (This concept applies to both SP and TP and so 'cells' + # (This concept applies to both SP and TM and so 'cells' # is correct here as opposed to 'columns') 'synPermConnected': 0.1, @@ -203,20 +203,20 @@ 'synPermInactiveDec': 0.01, }, - # Controls whether TP is enabled or disabled; - # TP is necessary for making temporal predictions, such as predicting - # the next inputs. Without TP, the model is only capable of + # Controls whether TM is enabled or disabled; + # TM is necessary for making temporal predictions, such as predicting + # the next inputs. Without TM, the model is only capable of # reconstructing missing sensor inputs (via SP). 'tmEnable' : True, 'tmParams': { - # TP diagnostic output verbosity control; + # TM diagnostic output verbosity control; # 0: silent; [1..6]: increasing levels of verbosity - # (see verbosity in nupic/trunk/py/nupic/research/TP.py and TP10X*.py) + # (see verbosity in nupic/trunk/py/nupic/research/BacktrackingTM.py and BacktrackingTMCPP.py) 'verbosity': 0, # Number of cell columns in the cortical region (same number for - # SP and TP) + # SP and TM) # (see also tpNCellsPerCol) 'columnCount': 2048, @@ -241,7 +241,7 @@ # > 0 for fixed-size CLA # -1 for non-fixed-size CLA # - # TODO: for Ron: once the appropriate value is placed in TP + # TODO: for Ron: once the appropriate value is placed in TM # constructor, see if we should eliminate this parameter from # description.py. 'maxSynapsesPerSegment': 32, @@ -250,7 +250,7 @@ # > 0 for fixed-size CLA # -1 for non-fixed-size CLA # - # TODO: for Ron: once the appropriate value is placed in TP + # TODO: for Ron: once the appropriate value is placed in TM # constructor, see if we should eliminate this parameter from # description.py. 'maxSegmentsPerCell': 128, @@ -286,7 +286,7 @@ 'outputType': 'normal', - # "Pay Attention Mode" length. This tells the TP how many new + # "Pay Attention Mode" length. This tells the TM how many new # elements to append to the end of a learned sequence at a time. # Smaller values are better for datasets with short sequences, # higher values are better for datasets with long sequences. diff --git a/examples/opf/experiments/anomaly/temporal/saw_200/description.py b/examples/opf/experiments/anomaly/temporal/saw_200/description.py index 280ee6b2a7..0729671ce2 100644 --- a/examples/opf/experiments/anomaly/temporal/saw_200/description.py +++ b/examples/opf/experiments/anomaly/temporal/saw_200/description.py @@ -158,7 +158,7 @@ 'globalInhibition': 1, # Number of cell columns in the cortical region (same number for - # SP and TP) + # SP and TM) # (see also tpNCellsPerCol) 'columnCount': 2048, @@ -184,7 +184,7 @@ # level before inhibition falls below minDutyCycleBeforeInh # will have their own internal synPermConnectedCell # threshold set below this default value. - # (This concept applies to both SP and TP and so 'cells' + # (This concept applies to both SP and TM and so 'cells' # is correct here as opposed to 'columns') 'synPermConnected': 0.1, @@ -193,20 +193,20 @@ 'synPermInactiveDec': 0.01, }, - # Controls whether TP is enabled or disabled; - # TP is necessary for making temporal predictions, such as predicting - # the next inputs. Without TP, the model is only capable of + # Controls whether TM is enabled or disabled; + # TM is necessary for making temporal predictions, such as predicting + # the next inputs. Without TM, the model is only capable of # reconstructing missing sensor inputs (via SP). 'tmEnable' : True, 'tmParams': { - # TP diagnostic output verbosity control; + # TM diagnostic output verbosity control; # 0: silent; [1..6]: increasing levels of verbosity - # (see verbosity in nupic/trunk/py/nupic/research/TP.py and TP10X*.py) + # (see verbosity in nupic/trunk/py/nupic/research/BacktrackingTM.py and BacktrackingTMCPP.py) 'verbosity': 0, # Number of cell columns in the cortical region (same number for - # SP and TP) + # SP and TM) # (see also tpNCellsPerCol) 'columnCount': 2048, @@ -231,7 +231,7 @@ # > 0 for fixed-size CLA # -1 for non-fixed-size CLA # - # TODO: for Ron: once the appropriate value is placed in TP + # TODO: for Ron: once the appropriate value is placed in TM # constructor, see if we should eliminate this parameter from # description.py. 'maxSynapsesPerSegment': 32, @@ -240,7 +240,7 @@ # > 0 for fixed-size CLA # -1 for non-fixed-size CLA # - # TODO: for Ron: once the appropriate value is placed in TP + # TODO: for Ron: once the appropriate value is placed in TM # constructor, see if we should eliminate this parameter from # description.py. 'maxSegmentsPerCell': 128, @@ -276,7 +276,7 @@ 'outputType': 'normal', - # "Pay Attention Mode" length. This tells the TP how many new + # "Pay Attention Mode" length. This tells the TM how many new # elements to append to the end of a learned sequence at a time. # Smaller values are better for datasets with short sequences, # higher values are better for datasets with long sequences. diff --git a/examples/opf/experiments/anomaly/temporal/saw_big/description.py b/examples/opf/experiments/anomaly/temporal/saw_big/description.py index cc17fc8f67..b1ec35d2fb 100644 --- a/examples/opf/experiments/anomaly/temporal/saw_big/description.py +++ b/examples/opf/experiments/anomaly/temporal/saw_big/description.py @@ -158,7 +158,7 @@ 'globalInhibition': 1, # Number of cell columns in the cortical region (same number for - # SP and TP) + # SP and TM) # (see also tpNCellsPerCol) 'columnCount': 2048, @@ -184,7 +184,7 @@ # level before inhibition falls below minDutyCycleBeforeInh # will have their own internal synPermConnectedCell # threshold set below this default value. - # (This concept applies to both SP and TP and so 'cells' + # (This concept applies to both SP and TM and so 'cells' # is correct here as opposed to 'columns') 'synPermConnected': 0.1, @@ -193,20 +193,20 @@ 'synPermInactiveDec': 0.01, }, - # Controls whether TP is enabled or disabled; - # TP is necessary for making temporal predictions, such as predicting - # the next inputs. Without TP, the model is only capable of + # Controls whether TM is enabled or disabled; + # TM is necessary for making temporal predictions, such as predicting + # the next inputs. Without TM, the model is only capable of # reconstructing missing sensor inputs (via SP). 'tmEnable' : True, 'tmParams': { - # TP diagnostic output verbosity control; + # TM diagnostic output verbosity control; # 0: silent; [1..6]: increasing levels of verbosity - # (see verbosity in nupic/trunk/py/nupic/research/TP.py and TP10X*.py) + # (see verbosity in nupic/trunk/py/nupic/research/BacktrackingTM.py and BacktrackingTMCPP.py) 'verbosity': 0, # Number of cell columns in the cortical region (same number for - # SP and TP) + # SP and TM) # (see also tpNCellsPerCol) 'columnCount': 2048, @@ -231,7 +231,7 @@ # > 0 for fixed-size CLA # -1 for non-fixed-size CLA # - # TODO: for Ron: once the appropriate value is placed in TP + # TODO: for Ron: once the appropriate value is placed in TM # constructor, see if we should eliminate this parameter from # description.py. 'maxSynapsesPerSegment': 32, @@ -240,7 +240,7 @@ # > 0 for fixed-size CLA # -1 for non-fixed-size CLA # - # TODO: for Ron: once the appropriate value is placed in TP + # TODO: for Ron: once the appropriate value is placed in TM # constructor, see if we should eliminate this parameter from # description.py. 'maxSegmentsPerCell': 128, @@ -276,7 +276,7 @@ 'outputType': 'normal', - # "Pay Attention Mode" length. This tells the TP how many new + # "Pay Attention Mode" length. This tells the TM how many new # elements to append to the end of a learned sequence at a time. # Smaller values are better for datasets with short sequences, # higher values are better for datasets with long sequences. diff --git a/examples/opf/experiments/anomaly/temporal/simple/description.py b/examples/opf/experiments/anomaly/temporal/simple/description.py index 5bb9968fc4..2fa9da9213 100644 --- a/examples/opf/experiments/anomaly/temporal/simple/description.py +++ b/examples/opf/experiments/anomaly/temporal/simple/description.py @@ -162,7 +162,7 @@ 'globalInhibition': 1, # Number of cell columns in the cortical region (same number for - # SP and TP) + # SP and TM) # (see also tpNCellsPerCol) 'columnCount': 2048, @@ -188,7 +188,7 @@ # level before inhibition falls below minDutyCycleBeforeInh # will have their own internal synPermConnectedCell # threshold set below this default value. - # (This concept applies to both SP and TP and so 'cells' + # (This concept applies to both SP and TM and so 'cells' # is correct here as opposed to 'columns') 'synPermConnected': 0.1, @@ -202,20 +202,20 @@ 'boostStrength': 0.0, }, - # Controls whether TP is enabled or disabled; - # TP is necessary for making temporal predictions, such as predicting - # the next inputs. Without TP, the model is only capable of + # Controls whether TM is enabled or disabled; + # TM is necessary for making temporal predictions, such as predicting + # the next inputs. Without TM, the model is only capable of # reconstructing missing sensor inputs (via SP). 'tmEnable': True, 'tmParams': { - # TP diagnostic output verbosity control; + # TM diagnostic output verbosity control; # 0: silent; [1..6]: increasing levels of verbosity - # (see verbosity in nupic/trunk/py/nupic/research/TP.py and TP10X*.py) + # (see verbosity in nupic/trunk/py/nupic/research/BacktrackingTM.py and BacktrackingTMCPP.py) 'verbosity': 0, # Number of cell columns in the cortical region (same number for - # SP and TP) + # SP and TM) # (see also tpNCellsPerCol) 'columnCount': 2048, @@ -240,7 +240,7 @@ # > 0 for fixed-size CLA # -1 for non-fixed-size CLA # - # TODO: for Ron: once the appropriate value is placed in TP + # TODO: for Ron: once the appropriate value is placed in TM # constructor, see if we should eliminate this parameter from # description.py. 'maxSynapsesPerSegment': 32, @@ -249,7 +249,7 @@ # > 0 for fixed-size CLA # -1 for non-fixed-size CLA # - # TODO: for Ron: once the appropriate value is placed in TP + # TODO: for Ron: once the appropriate value is placed in TM # constructor, see if we should eliminate this parameter from # description.py. 'maxSegmentsPerCell': 128, @@ -285,7 +285,7 @@ 'outputType': 'normal', - # "Pay Attention Mode" length. This tells the TP how many new + # "Pay Attention Mode" length. This tells the TM how many new # elements to append to the end of a learned sequence at a time. # Smaller values are better for datasets with short sequences, # higher values are better for datasets with long sequences. diff --git a/examples/opf/experiments/classification/base_category/UNDER_DEVELOPMENT b/examples/opf/experiments/classification/base_category/UNDER_DEVELOPMENT index 1ec8c6a032..900ecda6d0 100644 --- a/examples/opf/experiments/classification/base_category/UNDER_DEVELOPMENT +++ b/examples/opf/experiments/classification/base_category/UNDER_DEVELOPMENT @@ -156,7 +156,7 @@ config = { 'globalInhibition': 1, # Number of cell columns in the cortical region (same number for - # SP and TP) + # SP and TM) # (see also tpNCellsPerCol) 'columnCount': 2048, @@ -182,7 +182,7 @@ config = { # level before inhibition falls below minDutyCycleBeforeInh # will have their own internal synPermConnectedCell # threshold set below this default value. - # (This concept applies to both SP and TP and so 'cells' + # (This concept applies to both SP and TM and so 'cells' # is correct here as opposed to 'columns') 'synPermConnected': 0.1, @@ -191,20 +191,20 @@ config = { 'synPermInactiveDec': 0.01, }, - # Controls whether TP is enabled or disabled; - # TP is necessary for making temporal predictions, such as predicting + # Controls whether TM is enabled or disabled; + # TM is necessary for making temporal predictions, such as predicting # the next inputs. Without TP, the model is only capable of # reconstructing missing sensor inputs (via SP). 'tmEnable' : True, 'tmParams': { - # TP diagnostic output verbosity control; + # TM diagnostic output verbosity control; # 0: silent; [1..6]: increasing levels of verbosity - # (see verbosity in nta/trunk/py/nupic/research/TP.py and TP10X*.py) + # (see verbosity in nta/trunk/py/nupic/research/TP.py and BacktrackingTMCPP.py) 'verbosity': 0, # Number of cell columns in the cortical region (same number for - # SP and TP) + # SP and TM) # (see also tpNCellsPerCol) 'columnCount': 2048, @@ -274,7 +274,7 @@ config = { 'outputType': 'normal', - # "Pay Attention Mode" length. This tells the TP how many new + # "Pay Attention Mode" length. This tells the TM how many new # elements to append to the end of a learned sequence at a time. # Smaller values are better for datasets with short sequences, # higher values are better for datasets with long sequences. diff --git a/examples/opf/experiments/classification/base_category/description.py b/examples/opf/experiments/classification/base_category/description.py index 1332e14f25..1bbc172fd1 100644 --- a/examples/opf/experiments/classification/base_category/description.py +++ b/examples/opf/experiments/classification/base_category/description.py @@ -157,7 +157,7 @@ 'globalInhibition': 1, # Number of cell columns in the cortical region (same number for - # SP and TP) + # SP and TM) # (see also tpNCellsPerCol) 'columnCount': 2048, @@ -183,7 +183,7 @@ # level before inhibition falls below minDutyCycleBeforeInh # will have their own internal synPermConnectedCell # threshold set below this default value. - # (This concept applies to both SP and TP and so 'cells' + # (This concept applies to both SP and TM and so 'cells' # is correct here as opposed to 'columns') 'synPermConnected': 0.1, @@ -197,20 +197,20 @@ 'boostStrength': 0.0, }, - # Controls whether TP is enabled or disabled; - # TP is necessary for making temporal predictions, such as predicting - # the next inputs. Without TP, the model is only capable of + # Controls whether TM is enabled or disabled; + # TM is necessary for making temporal predictions, such as predicting + # the next inputs. Without TM, the model is only capable of # reconstructing missing sensor inputs (via SP). 'tmEnable' : True, 'tmParams': { - # TP diagnostic output verbosity control; + # TM diagnostic output verbosity control; # 0: silent; [1..6]: increasing levels of verbosity - # (see verbosity in nupic/trunk/py/nupic/research/TP.py and TP10X*.py) + # (see verbosity in nupic/trunk/py/nupic/research/BacktrackingTM.py and BacktrackingTMCPP.py) 'verbosity': 0, # Number of cell columns in the cortical region (same number for - # SP and TP) + # SP and TM) # (see also tpNCellsPerCol) 'columnCount': 2048, @@ -235,7 +235,7 @@ # > 0 for fixed-size CLA # -1 for non-fixed-size CLA # - # TODO: for Ron: once the appropriate value is placed in TP + # TODO: for Ron: once the appropriate value is placed in TM # constructor, see if we should eliminate this parameter from # description.py. 'maxSynapsesPerSegment': 32, @@ -244,7 +244,7 @@ # > 0 for fixed-size CLA # -1 for non-fixed-size CLA # - # TODO: for Ron: once the appropriate value is placed in TP + # TODO: for Ron: once the appropriate value is placed in TM # constructor, see if we should eliminate this parameter from # description.py. 'maxSegmentsPerCell': 128, @@ -280,7 +280,7 @@ 'outputType': 'activeState', - # "Pay Attention Mode" length. This tells the TP how many new + # "Pay Attention Mode" length. This tells the TM how many new # elements to append to the end of a learned sequence at a time. # Smaller values are better for datasets with short sequences, # higher values are better for datasets with long sequences. @@ -331,7 +331,7 @@ # to support value-getter-based substitutions from the sub-experiment (if any) applyValueGettersToContainer(config) -# With no TP, there are no columns +# With no TM, there are no columns if not config['modelParams']['tmEnable']: config['modelParams']['clParams']['cellsPerCol'] = 0 diff --git a/examples/opf/experiments/classification/base_scalar/UNDER_DEVELOPMENT b/examples/opf/experiments/classification/base_scalar/UNDER_DEVELOPMENT index 1ec8c6a032..900ecda6d0 100644 --- a/examples/opf/experiments/classification/base_scalar/UNDER_DEVELOPMENT +++ b/examples/opf/experiments/classification/base_scalar/UNDER_DEVELOPMENT @@ -156,7 +156,7 @@ config = { 'globalInhibition': 1, # Number of cell columns in the cortical region (same number for - # SP and TP) + # SP and TM) # (see also tpNCellsPerCol) 'columnCount': 2048, @@ -182,7 +182,7 @@ config = { # level before inhibition falls below minDutyCycleBeforeInh # will have their own internal synPermConnectedCell # threshold set below this default value. - # (This concept applies to both SP and TP and so 'cells' + # (This concept applies to both SP and TM and so 'cells' # is correct here as opposed to 'columns') 'synPermConnected': 0.1, @@ -191,20 +191,20 @@ config = { 'synPermInactiveDec': 0.01, }, - # Controls whether TP is enabled or disabled; - # TP is necessary for making temporal predictions, such as predicting + # Controls whether TM is enabled or disabled; + # TM is necessary for making temporal predictions, such as predicting # the next inputs. Without TP, the model is only capable of # reconstructing missing sensor inputs (via SP). 'tmEnable' : True, 'tmParams': { - # TP diagnostic output verbosity control; + # TM diagnostic output verbosity control; # 0: silent; [1..6]: increasing levels of verbosity - # (see verbosity in nta/trunk/py/nupic/research/TP.py and TP10X*.py) + # (see verbosity in nta/trunk/py/nupic/research/TP.py and BacktrackingTMCPP.py) 'verbosity': 0, # Number of cell columns in the cortical region (same number for - # SP and TP) + # SP and TM) # (see also tpNCellsPerCol) 'columnCount': 2048, @@ -274,7 +274,7 @@ config = { 'outputType': 'normal', - # "Pay Attention Mode" length. This tells the TP how many new + # "Pay Attention Mode" length. This tells the TM how many new # elements to append to the end of a learned sequence at a time. # Smaller values are better for datasets with short sequences, # higher values are better for datasets with long sequences. diff --git a/examples/opf/experiments/classification/base_scalar/description.py b/examples/opf/experiments/classification/base_scalar/description.py index 7fc4692e27..d602bfc85e 100644 --- a/examples/opf/experiments/classification/base_scalar/description.py +++ b/examples/opf/experiments/classification/base_scalar/description.py @@ -159,7 +159,7 @@ 'globalInhibition': 1, # Number of cell columns in the cortical region (same number for - # SP and TP) + # SP and TM) # (see also tpNCellsPerCol) 'columnCount': 2048, @@ -185,7 +185,7 @@ # level before inhibition falls below minDutyCycleBeforeInh # will have their own internal synPermConnectedCell # threshold set below this default value. - # (This concept applies to both SP and TP and so 'cells' + # (This concept applies to both SP and TM and so 'cells' # is correct here as opposed to 'columns') 'synPermConnected': 0.1, @@ -199,20 +199,20 @@ 'boostStrength': 0.0, }, - # Controls whether TP is enabled or disabled; - # TP is necessary for making temporal predictions, such as predicting - # the next inputs. Without TP, the model is only capable of + # Controls whether TM is enabled or disabled; + # TM is necessary for making temporal predictions, such as predicting + # the next inputs. Without TM, the model is only capable of # reconstructing missing sensor inputs (via SP). 'tmEnable' : True, 'tmParams': { - # TP diagnostic output verbosity control; + # TM diagnostic output verbosity control; # 0: silent; [1..6]: increasing levels of verbosity - # (see verbosity in nupic/trunk/py/nupic/research/TP.py and TP10X*.py) + # (see verbosity in nupic/trunk/py/nupic/research/BacktrackingTM.py and BacktrackingTMCPP.py) 'verbosity': 0, # Number of cell columns in the cortical region (same number for - # SP and TP) + # SP and TM) # (see also tpNCellsPerCol) 'columnCount': 2048, @@ -237,7 +237,7 @@ # > 0 for fixed-size CLA # -1 for non-fixed-size CLA # - # TODO: for Ron: once the appropriate value is placed in TP + # TODO: for Ron: once the appropriate value is placed in TM # constructor, see if we should eliminate this parameter from # description.py. 'maxSynapsesPerSegment': 32, @@ -246,7 +246,7 @@ # > 0 for fixed-size CLA # -1 for non-fixed-size CLA # - # TODO: for Ron: once the appropriate value is placed in TP + # TODO: for Ron: once the appropriate value is placed in TM # constructor, see if we should eliminate this parameter from # description.py. 'maxSegmentsPerCell': 128, @@ -282,7 +282,7 @@ 'outputType': 'activeState', - # "Pay Attention Mode" length. This tells the TP how many new + # "Pay Attention Mode" length. This tells the TM how many new # elements to append to the end of a learned sequence at a time. # Smaller values are better for datasets with short sequences, # higher values are better for datasets with long sequences. @@ -333,7 +333,7 @@ # to support value-getter-based substitutions from the sub-experiment (if any) applyValueGettersToContainer(config) -# With no TP, there are no columns +# With no TM, there are no columns if not config['modelParams']['tmEnable']: config['modelParams']['clParams']['cellsPerCol'] = 0 diff --git a/examples/opf/experiments/classification/category_TP_0/description.py b/examples/opf/experiments/classification/category_TM_0/description.py similarity index 99% rename from examples/opf/experiments/classification/category_TP_0/description.py rename to examples/opf/experiments/classification/category_TM_0/description.py index b97fb0417e..882cd9a86e 100644 --- a/examples/opf/experiments/classification/category_TP_0/description.py +++ b/examples/opf/experiments/classification/category_TM_0/description.py @@ -28,7 +28,7 @@ config = \ { 'dataSource': 'file://' + os.path.join(os.path.dirname(__file__), - '../datasets/category_TP_0.csv'), + '../datasets/category_TM_0.csv'), 'modelParams': { 'clParams': { 'verbosity': 0}, 'sensorParams': { 'encoders': { }, 'verbosity': 0}, 'spParams': { }, diff --git a/examples/opf/experiments/classification/category_TP_1/description.py b/examples/opf/experiments/classification/category_TM_1/description.py similarity index 99% rename from examples/opf/experiments/classification/category_TP_1/description.py rename to examples/opf/experiments/classification/category_TM_1/description.py index dfc770dc53..5f89088908 100644 --- a/examples/opf/experiments/classification/category_TP_1/description.py +++ b/examples/opf/experiments/classification/category_TM_1/description.py @@ -28,7 +28,7 @@ config = \ { 'dataSource': 'file://' + os.path.join(os.path.dirname(__file__), - '../datasets/category_TP_1.csv'), + '../datasets/category_TM_1.csv'), 'modelParams': { 'clParams': { 'verbosity': 0}, 'sensorParams': { 'encoders': { }, 'verbosity': 0}, 'spParams': { 'spVerbosity': 0}, diff --git a/examples/opf/experiments/classification/datasets/category_TP_0.csv b/examples/opf/experiments/classification/datasets/category_TM_0.csv similarity index 100% rename from examples/opf/experiments/classification/datasets/category_TP_0.csv rename to examples/opf/experiments/classification/datasets/category_TM_0.csv diff --git a/examples/opf/experiments/classification/datasets/category_TP_1.csv b/examples/opf/experiments/classification/datasets/category_TM_1.csv similarity index 100% rename from examples/opf/experiments/classification/datasets/category_TP_1.csv rename to examples/opf/experiments/classification/datasets/category_TM_1.csv diff --git a/examples/opf/experiments/classification/datasets/scalar_TP_0.csv b/examples/opf/experiments/classification/datasets/scalar_TM_0.csv similarity index 100% rename from examples/opf/experiments/classification/datasets/scalar_TP_0.csv rename to examples/opf/experiments/classification/datasets/scalar_TM_0.csv diff --git a/examples/opf/experiments/classification/datasets/scalar_TP_1.csv b/examples/opf/experiments/classification/datasets/scalar_TM_1.csv similarity index 100% rename from examples/opf/experiments/classification/datasets/scalar_TP_1.csv rename to examples/opf/experiments/classification/datasets/scalar_TM_1.csv diff --git a/examples/opf/experiments/classification/makeDatasets.py b/examples/opf/experiments/classification/makeDatasets.py index 272689fb29..edd18e4dbe 100644 --- a/examples/opf/experiments/classification/makeDatasets.py +++ b/examples/opf/experiments/classification/makeDatasets.py @@ -231,10 +231,10 @@ def _generateOverlapping(filename="overlap.csv", numSequences=2, elementsPerSeq= _generateCategory('category_SP_1.csv', numSequences=50, elementsPerSeq=1, numRepeats=20) - _generateCategory('category_TP_0.csv', numSequences=2, elementsPerSeq=5, + _generateCategory('category_TM_0.csv', numSequences=2, elementsPerSeq=5, numRepeats=30) - _generateCategory('category_TP_1.csv', numSequences=10, elementsPerSeq=5, + _generateCategory('category_TM_1.csv', numSequences=10, elementsPerSeq=5, numRepeats=20) _generateOverlapping('category_hub_TP_0.csv', numSequences=10, elementsPerSeq=5, @@ -246,10 +246,10 @@ def _generateOverlapping(filename="overlap.csv", numSequences=2, elementsPerSeq= _generateScalar('scalar_SP_0.csv', numSequences=2, elementsPerSeq=1, numRepeats=20, stepSize=0.1, resets=False) - _generateScalar('scalar_TP_0.csv', numSequences=2, elementsPerSeq=5, + _generateScalar('scalar_TM_0.csv', numSequences=2, elementsPerSeq=5, numRepeats=20, stepSize=0.1, resets=False) - _generateScalar('scalar_TP_1.csv', numSequences=10, elementsPerSeq=5, + _generateScalar('scalar_TM_1.csv', numSequences=10, elementsPerSeq=5, numRepeats=20, stepSize=0.1, resets=False) diff --git a/examples/opf/experiments/classification/scalar_TP_0/description.py b/examples/opf/experiments/classification/scalar_TP_0/description.py index 4283a69347..76c2b98d7c 100644 --- a/examples/opf/experiments/classification/scalar_TP_0/description.py +++ b/examples/opf/experiments/classification/scalar_TP_0/description.py @@ -28,7 +28,7 @@ config = \ { 'claEvalClassification': True, 'dataSource': 'file://' + os.path.join(os.path.dirname(__file__), - '../datasets/scalar_TP_0.csv'), + '../datasets/scalar_TM_0.csv'), 'modelParams': { 'clParams': { 'verbosity': 0}, 'sensorParams': { 'encoders': { }, 'verbosity': 0}, 'spParams': { }, diff --git a/examples/opf/experiments/classification/scalar_TP_1/description.py b/examples/opf/experiments/classification/scalar_TP_1/description.py index 560c1b58e3..9a036aee25 100644 --- a/examples/opf/experiments/classification/scalar_TP_1/description.py +++ b/examples/opf/experiments/classification/scalar_TP_1/description.py @@ -28,7 +28,7 @@ config = \ { 'claEvalClassification': True, 'dataSource': 'file://' + os.path.join(os.path.dirname(__file__), - '../datasets/scalar_TP_1.csv'), + '../datasets/scalar_TM_1.csv'), 'modelParams': { 'clParams': { 'verbosity': 0}, 'sensorParams': { 'encoders': { }, 'verbosity': 0}, 'spParams': { }, diff --git a/examples/opf/experiments/classification/scalar_encoder_0/UNDER_DEVELOPMENT b/examples/opf/experiments/classification/scalar_encoder_0/UNDER_DEVELOPMENT index 7b7a3ed2d1..49601e41aa 100644 --- a/examples/opf/experiments/classification/scalar_encoder_0/UNDER_DEVELOPMENT +++ b/examples/opf/experiments/classification/scalar_encoder_0/UNDER_DEVELOPMENT @@ -156,7 +156,7 @@ config = { 'globalInhibition': 1, # Number of cell columns in the cortical region (same number for - # SP and TP) + # SP and TM) # (see also tpNCellsPerCol) 'columnCount': 2048, @@ -182,7 +182,7 @@ config = { # level before inhibition falls below minDutyCycleBeforeInh # will have their own internal synPermConnectedCell # threshold set below this default value. - # (This concept applies to both SP and TP and so 'cells' + # (This concept applies to both SP and TM and so 'cells' # is correct here as opposed to 'columns') 'synPermConnected': 0.1, @@ -191,20 +191,20 @@ config = { 'synPermInactiveDec': 0.01, }, - # Controls whether TP is enabled or disabled; - # TP is necessary for making temporal predictions, such as predicting + # Controls whether TM is enabled or disabled; + # TM is necessary for making temporal predictions, such as predicting # the next inputs. Without TP, the model is only capable of # reconstructing missing sensor inputs (via SP). 'tmEnable' : True, 'tmParams': { - # TP diagnostic output verbosity control; + # TM diagnostic output verbosity control; # 0: silent; [1..6]: increasing levels of verbosity - # (see verbosity in nta/trunk/py/nupic/research/TP.py and TP10X*.py) + # (see verbosity in nta/trunk/py/nupic/research/TP.py and BacktrackingTMCPP.py) 'verbosity': 0, # Number of cell columns in the cortical region (same number for - # SP and TP) + # SP and TM) # (see also tpNCellsPerCol) 'columnCount': 2048, @@ -274,7 +274,7 @@ config = { 'outputType': 'normal', - # "Pay Attention Mode" length. This tells the TP how many new + # "Pay Attention Mode" length. This tells the TM how many new # elements to append to the end of a learned sequence at a time. # Smaller values are better for datasets with short sequences, # higher values are better for datasets with long sequences. diff --git a/examples/opf/experiments/missing_record/base/UNDER_DEVELOPMENT b/examples/opf/experiments/missing_record/base/UNDER_DEVELOPMENT index 1ec8c6a032..900ecda6d0 100644 --- a/examples/opf/experiments/missing_record/base/UNDER_DEVELOPMENT +++ b/examples/opf/experiments/missing_record/base/UNDER_DEVELOPMENT @@ -156,7 +156,7 @@ config = { 'globalInhibition': 1, # Number of cell columns in the cortical region (same number for - # SP and TP) + # SP and TM) # (see also tpNCellsPerCol) 'columnCount': 2048, @@ -182,7 +182,7 @@ config = { # level before inhibition falls below minDutyCycleBeforeInh # will have their own internal synPermConnectedCell # threshold set below this default value. - # (This concept applies to both SP and TP and so 'cells' + # (This concept applies to both SP and TM and so 'cells' # is correct here as opposed to 'columns') 'synPermConnected': 0.1, @@ -191,20 +191,20 @@ config = { 'synPermInactiveDec': 0.01, }, - # Controls whether TP is enabled or disabled; - # TP is necessary for making temporal predictions, such as predicting + # Controls whether TM is enabled or disabled; + # TM is necessary for making temporal predictions, such as predicting # the next inputs. Without TP, the model is only capable of # reconstructing missing sensor inputs (via SP). 'tmEnable' : True, 'tmParams': { - # TP diagnostic output verbosity control; + # TM diagnostic output verbosity control; # 0: silent; [1..6]: increasing levels of verbosity - # (see verbosity in nta/trunk/py/nupic/research/TP.py and TP10X*.py) + # (see verbosity in nta/trunk/py/nupic/research/TP.py and BacktrackingTMCPP.py) 'verbosity': 0, # Number of cell columns in the cortical region (same number for - # SP and TP) + # SP and TM) # (see also tpNCellsPerCol) 'columnCount': 2048, @@ -274,7 +274,7 @@ config = { 'outputType': 'normal', - # "Pay Attention Mode" length. This tells the TP how many new + # "Pay Attention Mode" length. This tells the TM how many new # elements to append to the end of a learned sequence at a time. # Smaller values are better for datasets with short sequences, # higher values are better for datasets with long sequences. diff --git a/examples/opf/experiments/missing_record/base/description.py b/examples/opf/experiments/missing_record/base/description.py index 6f34e5d9a5..78f4b271e7 100644 --- a/examples/opf/experiments/missing_record/base/description.py +++ b/examples/opf/experiments/missing_record/base/description.py @@ -179,7 +179,7 @@ 'globalInhibition': 1, # Number of cell columns in the cortical region (same number for - # SP and TP) + # SP and TM) # (see also tpNCellsPerCol) 'columnCount': 2048, @@ -205,7 +205,7 @@ # level before inhibition falls below minDutyCycleBeforeInh # will have their own internal synPermConnectedCell # threshold set below this default value. - # (This concept applies to both SP and TP and so 'cells' + # (This concept applies to both SP and TM and so 'cells' # is correct here as opposed to 'columns') 'synPermConnected': 0.1, @@ -219,20 +219,20 @@ 'boostStrength': 10.0, }, - # Controls whether TP is enabled or disabled; - # TP is necessary for making temporal predictions, such as predicting - # the next inputs. Without TP, the model is only capable of + # Controls whether TM is enabled or disabled; + # TM is necessary for making temporal predictions, such as predicting + # the next inputs. Without TM, the model is only capable of # reconstructing missing sensor inputs (via SP). 'tmEnable' : False, 'tmParams': { - # TP diagnostic output verbosity control; + # TM diagnostic output verbosity control; # 0: silent; [1..6]: increasing levels of verbosity - # (see verbosity in nupic/trunk/py/nupic/research/TP.py and TP10X*.py) + # (see verbosity in nupic/trunk/py/nupic/research/BacktrackingTM.py and BacktrackingTMCPP.py) 'verbosity': 0, # Number of cell columns in the cortical region (same number for - # SP and TP) + # SP and TM) # (see also tpNCellsPerCol) 'columnCount': 2048, @@ -257,7 +257,7 @@ # > 0 for fixed-size CLA # -1 for non-fixed-size CLA # - # TODO: for Ron: once the appropriate value is placed in TP + # TODO: for Ron: once the appropriate value is placed in TM # constructor, see if we should eliminate this parameter from # description.py. 'maxSynapsesPerSegment': 32, @@ -266,7 +266,7 @@ # > 0 for fixed-size CLA # -1 for non-fixed-size CLA # - # TODO: for Ron: once the appropriate value is placed in TP + # TODO: for Ron: once the appropriate value is placed in TM # constructor, see if we should eliminate this parameter from # description.py. 'maxSegmentsPerCell': 128, @@ -302,7 +302,7 @@ 'outputType': 'normal', - # "Pay Attention Mode" length. This tells the TP how many new + # "Pay Attention Mode" length. This tells the TM how many new # elements to append to the end of a learned sequence at a time. # Smaller values are better for datasets with short sequences, # higher values are better for datasets with long sequences. diff --git a/examples/opf/experiments/multistep/base/UNDER_DEVELOPMENT b/examples/opf/experiments/multistep/base/UNDER_DEVELOPMENT index 1ec8c6a032..900ecda6d0 100644 --- a/examples/opf/experiments/multistep/base/UNDER_DEVELOPMENT +++ b/examples/opf/experiments/multistep/base/UNDER_DEVELOPMENT @@ -156,7 +156,7 @@ config = { 'globalInhibition': 1, # Number of cell columns in the cortical region (same number for - # SP and TP) + # SP and TM) # (see also tpNCellsPerCol) 'columnCount': 2048, @@ -182,7 +182,7 @@ config = { # level before inhibition falls below minDutyCycleBeforeInh # will have their own internal synPermConnectedCell # threshold set below this default value. - # (This concept applies to both SP and TP and so 'cells' + # (This concept applies to both SP and TM and so 'cells' # is correct here as opposed to 'columns') 'synPermConnected': 0.1, @@ -191,20 +191,20 @@ config = { 'synPermInactiveDec': 0.01, }, - # Controls whether TP is enabled or disabled; - # TP is necessary for making temporal predictions, such as predicting + # Controls whether TM is enabled or disabled; + # TM is necessary for making temporal predictions, such as predicting # the next inputs. Without TP, the model is only capable of # reconstructing missing sensor inputs (via SP). 'tmEnable' : True, 'tmParams': { - # TP diagnostic output verbosity control; + # TM diagnostic output verbosity control; # 0: silent; [1..6]: increasing levels of verbosity - # (see verbosity in nta/trunk/py/nupic/research/TP.py and TP10X*.py) + # (see verbosity in nta/trunk/py/nupic/research/TP.py and BacktrackingTMCPP.py) 'verbosity': 0, # Number of cell columns in the cortical region (same number for - # SP and TP) + # SP and TM) # (see also tpNCellsPerCol) 'columnCount': 2048, @@ -274,7 +274,7 @@ config = { 'outputType': 'normal', - # "Pay Attention Mode" length. This tells the TP how many new + # "Pay Attention Mode" length. This tells the TM how many new # elements to append to the end of a learned sequence at a time. # Smaller values are better for datasets with short sequences, # higher values are better for datasets with long sequences. diff --git a/examples/opf/experiments/multistep/base/description.py b/examples/opf/experiments/multistep/base/description.py index 300f7e5ee2..d95075ff46 100644 --- a/examples/opf/experiments/multistep/base/description.py +++ b/examples/opf/experiments/multistep/base/description.py @@ -165,7 +165,7 @@ 'globalInhibition': 1, # Number of cell columns in the cortical region (same number for - # SP and TP) + # SP and TM) # (see also tpNCellsPerCol) 'columnCount': 2048, @@ -191,7 +191,7 @@ # level before inhibition falls below minDutyCycleBeforeInh # will have their own internal synPermConnectedCell # threshold set below this default value. - # (This concept applies to both SP and TP and so 'cells' + # (This concept applies to both SP and TM and so 'cells' # is correct here as opposed to 'columns') 'synPermConnected': 0.1, @@ -205,20 +205,20 @@ 'boostStrength': 0.0, }, - # Controls whether TP is enabled or disabled; - # TP is necessary for making temporal predictions, such as predicting - # the next inputs. Without TP, the model is only capable of + # Controls whether TM is enabled or disabled; + # TM is necessary for making temporal predictions, such as predicting + # the next inputs. Without TM, the model is only capable of # reconstructing missing sensor inputs (via SP). 'tmEnable' : True, 'tmParams': { - # TP diagnostic output verbosity control; + # TM diagnostic output verbosity control; # 0: silent; [1..6]: increasing levels of verbosity - # (see verbosity in nupic/trunk/py/nupic/research/TP.py and TP10X*.py) + # (see verbosity in nupic/trunk/py/nupic/research/BacktrackingTM.py and BacktrackingTMCPP.py) 'verbosity': 0, # Number of cell columns in the cortical region (same number for - # SP and TP) + # SP and TM) # (see also tpNCellsPerCol) 'columnCount': 2048, @@ -243,7 +243,7 @@ # > 0 for fixed-size CLA # -1 for non-fixed-size CLA # - # TODO: for Ron: once the appropriate value is placed in TP + # TODO: for Ron: once the appropriate value is placed in TM # constructor, see if we should eliminate this parameter from # description.py. 'maxSynapsesPerSegment': 32, @@ -252,7 +252,7 @@ # > 0 for fixed-size CLA # -1 for non-fixed-size CLA # - # TODO: for Ron: once the appropriate value is placed in TP + # TODO: for Ron: once the appropriate value is placed in TM # constructor, see if we should eliminate this parameter from # description.py. 'maxSegmentsPerCell': 128, @@ -288,7 +288,7 @@ 'outputType': 'normal', - # "Pay Attention Mode" length. This tells the TP how many new + # "Pay Attention Mode" length. This tells the TM how many new # elements to append to the end of a learned sequence at a time. # Smaller values are better for datasets with short sequences, # higher values are better for datasets with long sequences. diff --git a/examples/opf/experiments/multistep/hotgym/description.py b/examples/opf/experiments/multistep/hotgym/description.py index 179cba6032..3c9582c9ff 100644 --- a/examples/opf/experiments/multistep/hotgym/description.py +++ b/examples/opf/experiments/multistep/hotgym/description.py @@ -173,7 +173,7 @@ 'globalInhibition': 1, # Number of cell columns in the cortical region (same number for - # SP and TP) + # SP and TM) # (see also tpNCellsPerCol) 'columnCount': 2048, @@ -199,7 +199,7 @@ # level before inhibition falls below minDutyCycleBeforeInh # will have their own internal synPermConnectedCell # threshold set below this default value. - # (This concept applies to both SP and TP and so 'cells' + # (This concept applies to both SP and TM and so 'cells' # is correct here as opposed to 'columns') 'synPermConnected': 0.1, @@ -213,20 +213,20 @@ 'boostStrength': 10.0, }, - # Controls whether TP is enabled or disabled; - # TP is necessary for making temporal predictions, such as predicting - # the next inputs. Without TP, the model is only capable of + # Controls whether TM is enabled or disabled; + # TM is necessary for making temporal predictions, such as predicting + # the next inputs. Without TM, the model is only capable of # reconstructing missing sensor inputs (via SP). 'tmEnable' : True, 'tmParams': { - # TP diagnostic output verbosity control; + # TM diagnostic output verbosity control; # 0: silent; [1..6]: increasing levels of verbosity - # (see verbosity in nupic/trunk/py/nupic/research/TP.py and TP10X*.py) + # (see verbosity in nupic/trunk/py/nupic/research/BacktrackingTM.py and BacktrackingTMCPP.py) 'verbosity': 0, # Number of cell columns in the cortical region (same number for - # SP and TP) + # SP and TM) # (see also tpNCellsPerCol) 'columnCount': 2048, @@ -251,7 +251,7 @@ # > 0 for fixed-size CLA # -1 for non-fixed-size CLA # - # TODO: for Ron: once the appropriate value is placed in TP + # TODO: for Ron: once the appropriate value is placed in TM # constructor, see if we should eliminate this parameter from # description.py. 'maxSynapsesPerSegment': 32, @@ -260,7 +260,7 @@ # > 0 for fixed-size CLA # -1 for non-fixed-size CLA # - # TODO: for Ron: once the appropriate value is placed in TP + # TODO: for Ron: once the appropriate value is placed in TM # constructor, see if we should eliminate this parameter from # description.py. 'maxSegmentsPerCell': 128, @@ -296,7 +296,7 @@ 'outputType': 'normal', - # "Pay Attention Mode" length. This tells the TP how many new + # "Pay Attention Mode" length. This tells the TM how many new # elements to append to the end of a learned sequence at a time. # Smaller values are better for datasets with short sequences, # higher values are better for datasets with long sequences. diff --git a/examples/opf/experiments/opfrunexperiment_test/checkpoints/UNDER_DEVELOPMENT b/examples/opf/experiments/opfrunexperiment_test/checkpoints/UNDER_DEVELOPMENT index 1ec8c6a032..900ecda6d0 100644 --- a/examples/opf/experiments/opfrunexperiment_test/checkpoints/UNDER_DEVELOPMENT +++ b/examples/opf/experiments/opfrunexperiment_test/checkpoints/UNDER_DEVELOPMENT @@ -156,7 +156,7 @@ config = { 'globalInhibition': 1, # Number of cell columns in the cortical region (same number for - # SP and TP) + # SP and TM) # (see also tpNCellsPerCol) 'columnCount': 2048, @@ -182,7 +182,7 @@ config = { # level before inhibition falls below minDutyCycleBeforeInh # will have their own internal synPermConnectedCell # threshold set below this default value. - # (This concept applies to both SP and TP and so 'cells' + # (This concept applies to both SP and TM and so 'cells' # is correct here as opposed to 'columns') 'synPermConnected': 0.1, @@ -191,20 +191,20 @@ config = { 'synPermInactiveDec': 0.01, }, - # Controls whether TP is enabled or disabled; - # TP is necessary for making temporal predictions, such as predicting + # Controls whether TM is enabled or disabled; + # TM is necessary for making temporal predictions, such as predicting # the next inputs. Without TP, the model is only capable of # reconstructing missing sensor inputs (via SP). 'tmEnable' : True, 'tmParams': { - # TP diagnostic output verbosity control; + # TM diagnostic output verbosity control; # 0: silent; [1..6]: increasing levels of verbosity - # (see verbosity in nta/trunk/py/nupic/research/TP.py and TP10X*.py) + # (see verbosity in nta/trunk/py/nupic/research/TP.py and BacktrackingTMCPP.py) 'verbosity': 0, # Number of cell columns in the cortical region (same number for - # SP and TP) + # SP and TM) # (see also tpNCellsPerCol) 'columnCount': 2048, @@ -274,7 +274,7 @@ config = { 'outputType': 'normal', - # "Pay Attention Mode" length. This tells the TP how many new + # "Pay Attention Mode" length. This tells the TM how many new # elements to append to the end of a learned sequence at a time. # Smaller values are better for datasets with short sequences, # higher values are better for datasets with long sequences. diff --git a/examples/opf/experiments/opfrunexperiment_test/checkpoints/base.py b/examples/opf/experiments/opfrunexperiment_test/checkpoints/base.py index a6184ab462..c573c6bb4a 100644 --- a/examples/opf/experiments/opfrunexperiment_test/checkpoints/base.py +++ b/examples/opf/experiments/opfrunexperiment_test/checkpoints/base.py @@ -173,7 +173,7 @@ 'globalInhibition': 1, # Number of cell columns in the cortical region (same number for - # SP and TP) + # SP and TM) # (see also tpNCellsPerCol) 'columnCount': 2048, @@ -199,7 +199,7 @@ # level before inhibition falls below minDutyCycleBeforeInh # will have their own internal synPermConnectedCell # threshold set below this default value. - # (This concept applies to both SP and TP and so 'cells' + # (This concept applies to both SP and TM and so 'cells' # is correct here as opposed to 'columns') 'synPermConnected': 0.1, @@ -208,20 +208,20 @@ 'synPermInactiveDec': 0.01, }, - # Controls whether TP is enabled or disabled; - # TP is necessary for making temporal predictions, such as predicting - # the next inputs. Without TP, the model is only capable of + # Controls whether TM is enabled or disabled; + # TM is necessary for making temporal predictions, such as predicting + # the next inputs. Without TM, the model is only capable of # reconstructing missing sensor inputs (via SP). 'tmEnable' : True, 'tmParams': { - # TP diagnostic output verbosity control; + # TM diagnostic output verbosity control; # 0: silent; [1..6]: increasing levels of verbosity - # (see verbosity in nupic/trunk/py/nupic/research/TP.py and TP10X*.py) + # (see verbosity in nupic/trunk/py/nupic/research/BacktrackingTM.py and BacktrackingTMCPP.py) 'verbosity': 0, # Number of cell columns in the cortical region (same number for - # SP and TP) + # SP and TM) # (see also tpNCellsPerCol) 'columnCount': 2048, @@ -246,7 +246,7 @@ # > 0 for fixed-size CLA # -1 for non-fixed-size CLA # - # TODO: for Ron: once the appropriate value is placed in TP + # TODO: for Ron: once the appropriate value is placed in TM # constructor, see if we should eliminate this parameter from # description.py. 'maxSynapsesPerSegment': 32, @@ -255,7 +255,7 @@ # > 0 for fixed-size CLA # -1 for non-fixed-size CLA # - # TODO: for Ron: once the appropriate value is placed in TP + # TODO: for Ron: once the appropriate value is placed in TM # constructor, see if we should eliminate this parameter from # description.py. 'maxSegmentsPerCell': 128, @@ -291,7 +291,7 @@ 'outputType': 'normal', - # "Pay Attention Mode" length. This tells the TP how many new + # "Pay Attention Mode" length. This tells the TM how many new # elements to append to the end of a learned sequence at a time. # Smaller values are better for datasets with short sequences, # higher values are better for datasets with long sequences. diff --git a/examples/opf/experiments/opfrunexperiment_test/simpleOPF/hotgym/description.py b/examples/opf/experiments/opfrunexperiment_test/simpleOPF/hotgym/description.py index fc51d07671..d632e45913 100644 --- a/examples/opf/experiments/opfrunexperiment_test/simpleOPF/hotgym/description.py +++ b/examples/opf/experiments/opfrunexperiment_test/simpleOPF/hotgym/description.py @@ -151,7 +151,7 @@ 'globalInhibition': 1, # Number of cell columns in the cortical region (same number for - # SP and TP) + # SP and TM) # (see also tpNCellsPerCol) 'columnCount': 2048, @@ -177,7 +177,7 @@ # level before inhibition falls below minDutyCycleBeforeInh # will have their own internal synPermConnectedCell # threshold set below this default value. - # (This concept applies to both SP and TP and so 'cells' + # (This concept applies to both SP and TM and so 'cells' # is correct here as opposed to 'columns') 'synPermConnected': 0.1, @@ -186,20 +186,20 @@ 'synPermInactiveDec': 0.01, }, - # Controls whether TP is enabled or disabled; - # TP is necessary for making temporal predictions, such as predicting - # the next inputs. Without TP, the model is only capable of + # Controls whether TM is enabled or disabled; + # TM is necessary for making temporal predictions, such as predicting + # the next inputs. Without TM, the model is only capable of # reconstructing missing sensor inputs (via SP). 'tmEnable' : True, 'tmParams': { - # TP diagnostic output verbosity control; + # TM diagnostic output verbosity control; # 0: silent; [1..6]: increasing levels of verbosity - # (see verbosity in nupic/trunk/py/nupic/research/TP.py and TP10X*.py) + # (see verbosity in nupic/trunk/py/nupic/research/BacktrackingTM.py and BacktrackingTMCPP.py) 'verbosity': 0, # Number of cell columns in the cortical region (same number for - # SP and TP) + # SP and TM) # (see also tpNCellsPerCol) 'columnCount': 2048, @@ -224,7 +224,7 @@ # > 0 for fixed-size CLA # -1 for non-fixed-size CLA # - # TODO: for Ron: once the appropriate value is placed in TP + # TODO: for Ron: once the appropriate value is placed in TM # constructor, see if we should eliminate this parameter from # description.py. 'maxSynapsesPerSegment': 32, @@ -233,7 +233,7 @@ # > 0 for fixed-size CLA # -1 for non-fixed-size CLA # - # TODO: for Ron: once the appropriate value is placed in TP + # TODO: for Ron: once the appropriate value is placed in TM # constructor, see if we should eliminate this parameter from # description.py. 'maxSegmentsPerCell': 128, @@ -269,7 +269,7 @@ 'outputType': 'normal', - # "Pay Attention Mode" length. This tells the TP how many new + # "Pay Attention Mode" length. This tells the TM how many new # elements to append to the end of a learned sequence at a time. # Smaller values are better for datasets with short sequences, # higher values are better for datasets with long sequences. diff --git a/examples/opf/experiments/opfrunexperiment_test/simpleOPF/hotgym_1hr_agg/description.py b/examples/opf/experiments/opfrunexperiment_test/simpleOPF/hotgym_1hr_agg/description.py index 83cd6a6b32..6bb213e6ea 100644 --- a/examples/opf/experiments/opfrunexperiment_test/simpleOPF/hotgym_1hr_agg/description.py +++ b/examples/opf/experiments/opfrunexperiment_test/simpleOPF/hotgym_1hr_agg/description.py @@ -172,7 +172,7 @@ 'globalInhibition': 1, # Number of cell columns in the cortical region (same number for - # SP and TP) + # SP and TM) # (see also tpNCellsPerCol) 'columnCount': 2048, @@ -198,7 +198,7 @@ # level before inhibition falls below minDutyCycleBeforeInh # will have their own internal synPermConnectedCell # threshold set below this default value. - # (This concept applies to both SP and TP and so 'cells' + # (This concept applies to both SP and TM and so 'cells' # is correct here as opposed to 'columns') 'synPermConnected': 0.1, @@ -207,20 +207,20 @@ 'synPermInactiveDec': 0.01, }, - # Controls whether TP is enabled or disabled; - # TP is necessary for making temporal predictions, such as predicting - # the next inputs. Without TP, the model is only capable of + # Controls whether TM is enabled or disabled; + # TM is necessary for making temporal predictions, such as predicting + # the next inputs. Without TM, the model is only capable of # reconstructing missing sensor inputs (via SP). 'tmEnable' : True, 'tmParams': { - # TP diagnostic output verbosity control; + # TM diagnostic output verbosity control; # 0: silent; [1..6]: increasing levels of verbosity - # (see verbosity in nupic/trunk/py/nupic/research/TP.py and TP10X*.py) + # (see verbosity in nupic/trunk/py/nupic/research/BacktrackingTM.py and BacktrackingTMCPP.py) 'verbosity': 0, # Number of cell columns in the cortical region (same number for - # SP and TP) + # SP and TM) # (see also tpNCellsPerCol) 'columnCount': 2048, @@ -245,7 +245,7 @@ # > 0 for fixed-size CLA # -1 for non-fixed-size CLA # - # TODO: for Ron: once the appropriate value is placed in TP + # TODO: for Ron: once the appropriate value is placed in TM # constructor, see if we should eliminate this parameter from # description.py. 'maxSynapsesPerSegment': 32, @@ -254,7 +254,7 @@ # > 0 for fixed-size CLA # -1 for non-fixed-size CLA # - # TODO: for Ron: once the appropriate value is placed in TP + # TODO: for Ron: once the appropriate value is placed in TM # constructor, see if we should eliminate this parameter from # description.py. 'maxSegmentsPerCell': 128, @@ -290,7 +290,7 @@ 'outputType': 'normal', - # "Pay Attention Mode" length. This tells the TP how many new + # "Pay Attention Mode" length. This tells the TM how many new # elements to append to the end of a learned sequence at a time. # Smaller values are better for datasets with short sequences, # higher values are better for datasets with long sequences. diff --git a/examples/opf/experiments/opfrunexperiment_test/simpleOPF/hotgym_no_agg/description.py b/examples/opf/experiments/opfrunexperiment_test/simpleOPF/hotgym_no_agg/description.py index 6490f18fd0..52d945a3c2 100644 --- a/examples/opf/experiments/opfrunexperiment_test/simpleOPF/hotgym_no_agg/description.py +++ b/examples/opf/experiments/opfrunexperiment_test/simpleOPF/hotgym_no_agg/description.py @@ -172,7 +172,7 @@ 'globalInhibition': 1, # Number of cell columns in the cortical region (same number for - # SP and TP) + # SP and TM) # (see also tpNCellsPerCol) 'columnCount': 2048, @@ -198,7 +198,7 @@ # level before inhibition falls below minDutyCycleBeforeInh # will have their own internal synPermConnectedCell # threshold set below this default value. - # (This concept applies to both SP and TP and so 'cells' + # (This concept applies to both SP and TM and so 'cells' # is correct here as opposed to 'columns') 'synPermConnected': 0.1, @@ -207,20 +207,20 @@ 'synPermInactiveDec': 0.01, }, - # Controls whether TP is enabled or disabled; - # TP is necessary for making temporal predictions, such as predicting - # the next inputs. Without TP, the model is only capable of + # Controls whether TM is enabled or disabled; + # TM is necessary for making temporal predictions, such as predicting + # the next inputs. Without TM, the model is only capable of # reconstructing missing sensor inputs (via SP). 'tmEnable' : True, 'tmParams': { - # TP diagnostic output verbosity control; + # TM diagnostic output verbosity control; # 0: silent; [1..6]: increasing levels of verbosity - # (see verbosity in nupic/trunk/py/nupic/research/TP.py and TP10X*.py) + # (see verbosity in nupic/trunk/py/nupic/research/BacktrackingTM.py and BacktrackingTMCPP.py) 'verbosity': 0, # Number of cell columns in the cortical region (same number for - # SP and TP) + # SP and TM) # (see also tpNCellsPerCol) 'columnCount': 2048, @@ -245,7 +245,7 @@ # > 0 for fixed-size CLA # -1 for non-fixed-size CLA # - # TODO: for Ron: once the appropriate value is placed in TP + # TODO: for Ron: once the appropriate value is placed in TM # constructor, see if we should eliminate this parameter from # description.py. 'maxSynapsesPerSegment': 32, @@ -254,7 +254,7 @@ # > 0 for fixed-size CLA # -1 for non-fixed-size CLA # - # TODO: for Ron: once the appropriate value is placed in TP + # TODO: for Ron: once the appropriate value is placed in TM # constructor, see if we should eliminate this parameter from # description.py. 'maxSegmentsPerCell': 128, @@ -290,7 +290,7 @@ 'outputType': 'normal', - # "Pay Attention Mode" length. This tells the TP how many new + # "Pay Attention Mode" length. This tells the TM how many new # elements to append to the end of a learned sequence at a time. # Smaller values are better for datasets with short sequences, # higher values are better for datasets with long sequences. diff --git a/examples/opf/experiments/spatial_classification/auto_generated/UNDER_DEVELOPMENT b/examples/opf/experiments/spatial_classification/auto_generated/UNDER_DEVELOPMENT index 7b7a3ed2d1..49601e41aa 100644 --- a/examples/opf/experiments/spatial_classification/auto_generated/UNDER_DEVELOPMENT +++ b/examples/opf/experiments/spatial_classification/auto_generated/UNDER_DEVELOPMENT @@ -156,7 +156,7 @@ config = { 'globalInhibition': 1, # Number of cell columns in the cortical region (same number for - # SP and TP) + # SP and TM) # (see also tpNCellsPerCol) 'columnCount': 2048, @@ -182,7 +182,7 @@ config = { # level before inhibition falls below minDutyCycleBeforeInh # will have their own internal synPermConnectedCell # threshold set below this default value. - # (This concept applies to both SP and TP and so 'cells' + # (This concept applies to both SP and TM and so 'cells' # is correct here as opposed to 'columns') 'synPermConnected': 0.1, @@ -191,20 +191,20 @@ config = { 'synPermInactiveDec': 0.01, }, - # Controls whether TP is enabled or disabled; - # TP is necessary for making temporal predictions, such as predicting + # Controls whether TM is enabled or disabled; + # TM is necessary for making temporal predictions, such as predicting # the next inputs. Without TP, the model is only capable of # reconstructing missing sensor inputs (via SP). 'tmEnable' : True, 'tmParams': { - # TP diagnostic output verbosity control; + # TM diagnostic output verbosity control; # 0: silent; [1..6]: increasing levels of verbosity - # (see verbosity in nta/trunk/py/nupic/research/TP.py and TP10X*.py) + # (see verbosity in nta/trunk/py/nupic/research/TP.py and BacktrackingTMCPP.py) 'verbosity': 0, # Number of cell columns in the cortical region (same number for - # SP and TP) + # SP and TM) # (see also tpNCellsPerCol) 'columnCount': 2048, @@ -274,7 +274,7 @@ config = { 'outputType': 'normal', - # "Pay Attention Mode" length. This tells the TP how many new + # "Pay Attention Mode" length. This tells the TM how many new # elements to append to the end of a learned sequence at a time. # Smaller values are better for datasets with short sequences, # higher values are better for datasets with long sequences. diff --git a/examples/opf/experiments/spatial_classification/base/description.py b/examples/opf/experiments/spatial_classification/base/description.py index fddf39055e..3f74396179 100644 --- a/examples/opf/experiments/spatial_classification/base/description.py +++ b/examples/opf/experiments/spatial_classification/base/description.py @@ -165,7 +165,7 @@ 'globalInhibition': 1, # Number of cell columns in the cortical region (same number for - # SP and TP) + # SP and TM) # (see also tpNCellsPerCol) 'columnCount': 2048, @@ -191,7 +191,7 @@ # level before inhibition falls below minDutyCycleBeforeInh # will have their own internal synPermConnectedCell # threshold set below this default value. - # (This concept applies to both SP and TP and so 'cells' + # (This concept applies to both SP and TM and so 'cells' # is correct here as opposed to 'columns') 'synPermConnected': 0.1, @@ -200,20 +200,20 @@ 'synPermInactiveDec': 0.01, }, - # Controls whether TP is enabled or disabled; - # TP is necessary for making temporal predictions, such as predicting - # the next inputs. Without TP, the model is only capable of + # Controls whether TM is enabled or disabled; + # TM is necessary for making temporal predictions, such as predicting + # the next inputs. Without TM, the model is only capable of # reconstructing missing sensor inputs (via SP). 'tmEnable' : False, 'tmParams': { - # TP diagnostic output verbosity control; + # TM diagnostic output verbosity control; # 0: silent; [1..6]: increasing levels of verbosity - # (see verbosity in nupic/trunk/py/nupic/research/TP.py and TP10X*.py) + # (see verbosity in nupic/trunk/py/nupic/research/BacktrackingTM.py and BacktrackingTMCPP.py) 'verbosity': 0, # Number of cell columns in the cortical region (same number for - # SP and TP) + # SP and TM) # (see also tpNCellsPerCol) 'columnCount': 2048, @@ -238,7 +238,7 @@ # > 0 for fixed-size CLA # -1 for non-fixed-size CLA # - # TODO: for Ron: once the appropriate value is placed in TP + # TODO: for Ron: once the appropriate value is placed in TM # constructor, see if we should eliminate this parameter from # description.py. 'maxSynapsesPerSegment': 32, @@ -247,7 +247,7 @@ # > 0 for fixed-size CLA # -1 for non-fixed-size CLA # - # TODO: for Ron: once the appropriate value is placed in TP + # TODO: for Ron: once the appropriate value is placed in TM # constructor, see if we should eliminate this parameter from # description.py. 'maxSegmentsPerCell': 128, @@ -283,7 +283,7 @@ 'outputType': 'normal', - # "Pay Attention Mode" length. This tells the TP how many new + # "Pay Attention Mode" length. This tells the TM how many new # elements to append to the end of a learned sequence at a time. # Smaller values are better for datasets with short sequences, # higher values are better for datasets with long sequences. diff --git a/examples/opf/experiments/template/base/descriptionTemplate.py b/examples/opf/experiments/template/base/descriptionTemplate.py index cfaa1eb4f9..2b3c3fdece 100644 --- a/examples/opf/experiments/template/base/descriptionTemplate.py +++ b/examples/opf/experiments/template/base/descriptionTemplate.py @@ -220,7 +220,7 @@ # General CLA Region Parameters ############################################################################## - # Number of cell columns in the cortical region (same number for SP and TP) + # Number of cell columns in the cortical region (same number for SP and TM) # (see also tpNCellsPerCol) # Replaces: spCoincCount 'claRegionNColumns' : 2048, @@ -252,12 +252,12 @@ ############################################################################## - # Temporal Pooler (TP) Parameters + # Temporal Pooler (TM) Parameters ############################################################################## - # TP diagnostic output verbosity control; + # TM diagnostic output verbosity control; # 0: silent; [1..6]: increasing levels of verbosity - # (see verbosity in nupic/trunk/py/nupic/research/TP.py and TP10X*.py) + # (see verbosity in nupic/trunk/py/nupic/research/BacktrackingTM.py and BacktrackingTMCPP.py) # 'tpVerbosity' : 0, @@ -269,9 +269,9 @@ # by LPF; solve in OPF. 'tpTrainPrintStatsPeriodIter' : 0, - # Controls whether TP is enabled or disabled; - # TP is necessary for making temporal predictions, such as predicting the next - # inputs. Without TP, the model is only capable of reconstructing missing sensor + # Controls whether TM is enabled or disabled; + # TM is necessary for making temporal predictions, such as predicting the next + # inputs. Without TM, the model is only capable of reconstructing missing sensor # inputs (via SP). # 'tmEnable' : True, @@ -302,7 +302,7 @@ # > 0 for fixed-size CLA # -1 for non-fixed-size CLA # - # TODO for Ron: once the appropriate value is placed in TP constructor, see if + # TODO for Ron: once the appropriate value is placed in TM constructor, see if # we should eliminate this parameter from description.py # 'tpMaxSegmentsPerCell' : 128, @@ -324,7 +324,7 @@ # > 0 for fixed-size CLA # -1 for non-fixed-size CLA # - # TODO for Ron: once the appropriate value is placed in TP constructor, see if + # TODO for Ron: once the appropriate value is placed in TM constructor, see if # we should eliminate this parameter from description.py # 'tpMaxSynapsesPerSegment' : 32, diff --git a/examples/opf/simple_server/model_params.py b/examples/opf/simple_server/model_params.py index a4ff193312..e7a07a97e7 100644 --- a/examples/opf/simple_server/model_params.py +++ b/examples/opf/simple_server/model_params.py @@ -101,7 +101,7 @@ 'globalInhibition': 1, # Number of cell columns in the cortical region (same number for - # SP and TP) + # SP and TM) # (see also tpNCellsPerCol) 'columnCount': 2048, @@ -130,20 +130,20 @@ 'synPermInactiveDec': 0.005, }, - # Controls whether TP is enabled or disabled; - # TP is necessary for making temporal predictions, such as predicting - # the next inputs. Without TP, the model is only capable of + # Controls whether TM is enabled or disabled; + # TM is necessary for making temporal predictions, such as predicting + # the next inputs. Without TM, the model is only capable of # reconstructing missing sensor inputs (via SP). 'tmEnable' : True, 'tmParams': { - # TP diagnostic output verbosity control; + # TM diagnostic output verbosity control; # 0: silent; [1..6]: increasing levels of verbosity - # (see verbosity in nupic/trunk/py/nupic/research/TP.py and TP10X*.py) + # (see verbosity in nupic/trunk/py/nupic/research/BacktrackingTM.py and BacktrackingTMCPP.py) 'verbosity': 0, # Number of cell columns in the cortical region (same number for - # SP and TP) + # SP and TM) # (see also tpNCellsPerCol) 'columnCount': 2048, @@ -168,7 +168,7 @@ # > 0 for fixed-size CLA # -1 for non-fixed-size CLA # - # TODO: for Ron: once the appropriate value is placed in TP + # TODO: for Ron: once the appropriate value is placed in TM # constructor, see if we should eliminate this parameter from # description.py. 'maxSynapsesPerSegment': 32, @@ -177,7 +177,7 @@ # > 0 for fixed-size CLA # -1 for non-fixed-size CLA # - # TODO: for Ron: once the appropriate value is placed in TP + # TODO: for Ron: once the appropriate value is placed in TM # constructor, see if we should eliminate this parameter from # description.py. 'maxSegmentsPerCell': 128, @@ -213,7 +213,7 @@ 'outputType': 'normal', - # "Pay Attention Mode" length. This tells the TP how many new + # "Pay Attention Mode" length. This tells the TM how many new # elements to append to the end of a learned sequence at a time. # Smaller values are better for datasets with short sequences, # higher values are better for datasets with long sequences. diff --git a/examples/prediction/experiments/confidenceTest/base/description.py b/examples/prediction/experiments/confidenceTest/base/description.py index 4b9fe78942..b58dcc50e9 100644 --- a/examples/prediction/experiments/confidenceTest/base/description.py +++ b/examples/prediction/experiments/confidenceTest/base/description.py @@ -81,7 +81,7 @@ spCoincCount = 200, spNumActivePerInhArea = 3, - # TP params + # TM params tpNCellsPerCol = 20, tpInitialPerm = 0.6, tpPermanenceInc = 0.1, @@ -265,7 +265,7 @@ def getDescription(datasets): printPeriodicStats = int(config['spPrintPeriodicStats']), - # TP params + # TM params tpSeed = 1, disableTemporal = 0 if config['trainTP'] else 1, temporalImp = config['temporalImp'], @@ -359,7 +359,7 @@ def getDescription(datasets): ) ) - # Testing the training set on both the TP and n-grams. + # Testing the training set on both the TM and n-grams. inferSteps.append( dict(name = 'confidenceTrain_nonoise', iterationCount = min(config['evalTrainingSetNumIterations'], diff --git a/examples/prediction/experiments/confidenceTest/firstOrder/description.py b/examples/prediction/experiments/confidenceTest/firstOrder/description.py index c665c38d4f..8526d4e37f 100644 --- a/examples/prediction/experiments/confidenceTest/firstOrder/description.py +++ b/examples/prediction/experiments/confidenceTest/firstOrder/description.py @@ -39,7 +39,7 @@ little to no effect on the prediction accuracy. When you run this dataset against 1st order n-grams, you get 52.6% accuracy, -so we would expect roughly the same accuracy using the TP: +so we would expect roughly the same accuracy using the TM: inputPredScore_burnIn1 : 0.526 diff --git a/examples/prediction/experiments/dutyCycle/base/description.py b/examples/prediction/experiments/dutyCycle/base/description.py index 76c4c6f1b1..a211b77f81 100644 --- a/examples/prediction/experiments/dutyCycle/base/description.py +++ b/examples/prediction/experiments/dutyCycle/base/description.py @@ -280,7 +280,7 @@ def getDescription(datasets): spSeed = 1, printPeriodicStats = int(config['spPeriodicStats']), - # TP params + # TM params disableTemporal = 1, # General params diff --git a/examples/tp/README.md b/examples/tm/README.md similarity index 84% rename from examples/tp/README.md rename to examples/tm/README.md index e8880d78b2..97a53874b3 100644 --- a/examples/tp/README.md +++ b/examples/tm/README.md @@ -2,7 +2,7 @@ Temporal Pooler Sample Code ===== This directory contains a number of files that demonstrate how to use the -temporal pooler directly. Most of the files are currently implemented as tests +temporal memory directly. Most of the files are currently implemented as tests that test (and illustrate) various interesting properties of the temporal pooler. @@ -14,6 +14,6 @@ learning first order and high order sequences. You can run each file by invoking python on the file, as in "python tp_test.py" WARNING: understanding these files requires building up a very detailed -knowledge of how the temporal pooler works in CLA's. The documentation is not +knowledge of how the temporal memory works in CLA's. The documentation is not great at this level of detail - any suggestions or help appreciated! diff --git a/examples/tp/hello_tm.py b/examples/tm/hello_tm.py similarity index 95% rename from examples/tp/hello_tm.py rename to examples/tm/hello_tm.py index e8fb23a5e6..a2b0f1796a 100755 --- a/examples/tp/hello_tm.py +++ b/examples/tm/hello_tm.py @@ -26,7 +26,7 @@ inspect the state. The code here runs a very simple version of sequence learning, with one -cell per column. The TP is trained with the simple sequence A->B->C->D->E +cell per column. The TM is trained with the simple sequence A->B->C->D->E HOMEWORK: once you have understood exactly what is going on here, try changing cellsPerColumn to 4. What is the difference between once cell per column and 4 @@ -72,7 +72,7 @@ def formatRow(x): ) -# Step 2: create input vectors to feed to the temporal pooler. Each input vector +# Step 2: create input vectors to feed to the temporal memory. Each input vector # must be numberOfCols wide. Here we create a simple sequence of 5 vectors # representing the sequence A -> B -> C -> D -> E x = numpy.zeros((5, tm.numberOfColumns()), dtype="uint32") @@ -103,9 +103,9 @@ def formatRow(x): print("winner cells " + str(tm.getWinnerCells())) print("# of active segments " + str(tm.connections.numSegments())) - # The reset command tells the TP that a sequence just ended and essentially + # The reset command tells the TM that a sequence just ended and essentially # zeros out all the states. It is not strictly necessary but it's a bit - # messier without resets, and the TP learns quicker with resets. + # messier without resets, and the TM learns quicker with resets. tm.reset() diff --git a/examples/tp/tp_constant_test.py b/examples/tm/tm_constant_test.py similarity index 55% rename from examples/tp/tp_constant_test.py rename to examples/tm/tm_constant_test.py index 6cedbf25d1..35e48174ca 100644 --- a/examples/tp/tp_constant_test.py +++ b/examples/tm/tm_constant_test.py @@ -28,8 +28,8 @@ import unittest2 as unittest from nupic.research import fdrutilities as fdrutils -from nupic.research.TP import TP -from nupic.research.TP10X2 import TP10X2 +from nupic.research.BacktrackingTM import BacktrackingTM +from nupic.research.BacktrackingTMCPP import BacktrackingTMCPP from nupic.support.unittesthelpers.testcasebase import (TestCaseBase, TestOptionParser) @@ -53,9 +53,9 @@ def _getSimplePatterns(numOnes, numPatterns): return p -def _createTps(numCols): - """Create two instances of temporal poolers (TP.py and TP10X2.py) with - identical parameter settings.""" +def _createTms(numCols): + """Create two instances of temporal poolers (BacktrackingTM.py + and BacktrackingTMCPP.py) with identical parameter settings.""" # Keep these fixed: minThreshold = 4 @@ -68,36 +68,44 @@ def _createTps(numCols): globalDecay = 0 cellsPerColumn = 1 - cppTp = TP10X2(numberOfCols=numCols, cellsPerColumn=cellsPerColumn, - initialPerm=initialPerm, connectedPerm=connectedPerm, - minThreshold=minThreshold, newSynapseCount=newSynapseCount, - permanenceInc=permanenceInc, permanenceDec=permanenceDec, - activationThreshold=activationThreshold, - globalDecay=globalDecay, burnIn=1, - seed=SEED, verbosity=VERBOSITY, - checkSynapseConsistency=True, - pamLength=1000) + cppTm = BacktrackingTMCPP(numberOfCols=numCols, + cellsPerColumn=cellsPerColumn, + initialPerm=initialPerm, + connectedPerm=connectedPerm, + minThreshold=minThreshold, + newSynapseCount=newSynapseCount, + permanenceInc=permanenceInc, + permanenceDec=permanenceDec, + activationThreshold=activationThreshold, + globalDecay=globalDecay, burnIn=1, + seed=SEED, verbosity=VERBOSITY, + checkSynapseConsistency=True, + pamLength=1000) # Ensure we are copying over learning states for TPDiff - cppTp.retrieveLearningStates = True + cppTm.retrieveLearningStates = True - pyTp = TP(numberOfCols=numCols, cellsPerColumn=cellsPerColumn, - initialPerm=initialPerm, connectedPerm=connectedPerm, - minThreshold=minThreshold, newSynapseCount=newSynapseCount, - permanenceInc=permanenceInc, permanenceDec=permanenceDec, - activationThreshold=activationThreshold, - globalDecay=globalDecay, burnIn=1, - seed=SEED, verbosity=VERBOSITY, - pamLength=1000) + pyTm = BacktrackingTM(numberOfCols=numCols, + cellsPerColumn=cellsPerColumn, + initialPerm=initialPerm, + connectedPerm=connectedPerm, + minThreshold=minThreshold, + newSynapseCount=newSynapseCount, + permanenceInc=permanenceInc, + permanenceDec=permanenceDec, + activationThreshold=activationThreshold, + globalDecay=globalDecay, burnIn=1, + seed=SEED, verbosity=VERBOSITY, + pamLength=1000) - return cppTp, pyTp + return cppTm, pyTm -class TPConstantTest(TestCaseBase): +class TMConstantTest(TestCaseBase): def setUp(self): - self.cppTp, self.pyTp = _createTps(100) + self.cppTm, self.pyTm = _createTms(100) - def _basicTest(self, tp=None): + def _basicTest(self, tm=None): """Test creation, pickling, and basic run of learning and inference.""" trainingSet = _getSimplePatterns(10, 10) @@ -106,44 +114,44 @@ def _basicTest(self, tp=None): for _ in range(2): for seq in trainingSet[0:5]: for _ in range(10): - tp.learn(seq) - tp.reset() + tm.learn(seq) + tm.reset() print "Learning completed" # Infer print "Running inference" - tp.collectStats = True + tm.collectStats = True for seq in trainingSet[0:5]: - tp.reset() - tp.resetStats() + tm.reset() + tm.resetStats() for _ in range(10): - tp.infer(seq) + tm.infer(seq) if VERBOSITY > 1 : print _printOneTrainingVector(seq) - tp.printStates(False, False) + tm.printStates(False, False) print print if VERBOSITY > 1: - print tp.getStats() + print tm.getStats() # Ensure our predictions are accurate for each sequence - self.assertGreater(tp.getStats()['predictionScoreAvg2'], 0.8) - print ("tp.getStats()['predictionScoreAvg2'] = ", - tp.getStats()['predictionScoreAvg2']) + self.assertGreater(tm.getStats()['predictionScoreAvg2'], 0.8) + print ("tm.getStats()['predictionScoreAvg2'] = ", + tm.getStats()['predictionScoreAvg2']) - print "TPConstant basicTest ok" + print "TMConstantTest ok" - def testCppTpBasic(self): - self._basicTest(self.cppTp) + def testCppTmBasic(self): + self._basicTest(self.cppTm) - def testPyTpBasic(self): - self._basicTest(self.pyTp) + def testPyTmBasic(self): + self._basicTest(self.pyTm) - def testIdenticalTps(self): - self.assertTrue(fdrutils.tpDiff2(self.cppTp, self.pyTp)) + def testIdenticalTms(self): + self.assertTrue(fdrutils.tmDiff2(self.cppTm, self.pyTm)) if __name__=="__main__": diff --git a/examples/tp/tm_high_order.py b/examples/tm/tm_high_order.py similarity index 100% rename from examples/tp/tm_high_order.py rename to examples/tm/tm_high_order.py diff --git a/examples/tp/tp_overlapping_sequences.py b/examples/tm/tm_overlapping_sequences.py similarity index 80% rename from examples/tp/tp_overlapping_sequences.py rename to examples/tm/tm_overlapping_sequences.py index d173f55517..51dd7d4c3a 100644 --- a/examples/tp/tp_overlapping_sequences.py +++ b/examples/tm/tm_overlapping_sequences.py @@ -26,8 +26,8 @@ import random import unittest2 as unittest -from nupic.research.TP import TP -from nupic.research.TP10X2 import TP10X2 +from nupic.research.BacktrackingTM import BacktrackingTM +from nupic.research.BacktrackingTMCPP import BacktrackingTMCPP from nupic.research import fdrutilities as fdrutils from nupic.support.unittesthelpers import testcasebase @@ -43,7 +43,7 @@ Test 2 - Test with slow learning, make sure PAM allows us to train with fewer repeats of the training data. -Test 3 - Test with slow learning, some overlap in the patterns, and TP +Test 3 - Test with slow learning, some overlap in the patterns, and TM thresholds of 80% of newSynapseCount Test 4 - Test with "Forbes-like" data. A bunch of sequences of lengths between 2 @@ -58,8 +58,8 @@ # Whether to only run the short tests. SHORT = True -# If set to 0 the CPP TP will not be tested -INCLUDE_CPP_TP = 1 # Also test with CPP TP +# If set to 0 the CPP TM will not be tested +INCLUDE_CPP_TM = 1 # Also test with CPP TM @@ -126,7 +126,7 @@ def buildOverlappedSequences( numSequences = 2, seqLen: Overall length of each sequence sharedElements: Which element indices of each sequence are shared. These will be in the range between 0 and seqLen-1 - numOnBitsPerPattern: Number of ON bits in each TP input pattern + numOnBitsPerPattern: Number of ON bits in each TM input pattern patternOverlap: Max number of bits of overlap between any 2 patterns retval: (numCols, trainingSequences) numCols - width of the patterns @@ -193,7 +193,7 @@ def buildSequencePool(numSequences = 10, seqLen: List of possible sequence lengths numPatterns: How many possible patterns there are to use within sequences - numOnBitsPerPattern: Number of ON bits in each TP input pattern + numOnBitsPerPattern: Number of ON bits in each TM input pattern patternOverlap: Max number of bits of overlap between any 2 patterns retval: (numCols, trainingSequences) numCols - width of the patterns @@ -233,7 +233,7 @@ def buildSequencePool(numSequences = 10, -def createTPs(includeCPP = True, +def createTMs(includeCPP = True, includePy = True, numCols = 100, cellsPerCol = 4, @@ -251,118 +251,122 @@ def createTPs(includeCPP = True, **kwargs ): - """Create one or more TP instances, placing each into a dict keyed by + """Create one or more TM instances, placing each into a dict keyed by name. Parameters: ------------------------------------------------------------------ - retval: tps - dict of TP instances + retval: tms - dict of TM instances """ # Keep these fixed: connectedPerm = 0.5 - tps = dict() + tms = dict() if includeCPP: if VERBOSITY >= 2: - print "Creating TP10X2 instance" + print "Creating BacktrackingTMCPP instance" - cpp_tp = TP10X2(numberOfCols = numCols, cellsPerColumn = cellsPerCol, - initialPerm = initialPerm, connectedPerm = connectedPerm, - minThreshold = minThreshold, newSynapseCount = newSynapseCount, - permanenceInc = permanenceInc, permanenceDec = permanenceDec, - activationThreshold = activationThreshold, - globalDecay = globalDecay, burnIn = 1, - seed=SEED, verbosity=VERBOSITY, - checkSynapseConsistency = checkSynapseConsistency, - collectStats = True, - pamLength = pamLength, - maxInfBacktrack = maxInfBacktrack, - maxLrnBacktrack = maxLrnBacktrack, - ) + cpp_tm = BacktrackingTMCPP(numberOfCols = numCols, cellsPerColumn = cellsPerCol, + initialPerm = initialPerm, connectedPerm = connectedPerm, + minThreshold = minThreshold, newSynapseCount = newSynapseCount, + permanenceInc = permanenceInc, permanenceDec = permanenceDec, + activationThreshold = activationThreshold, + globalDecay = globalDecay, burnIn = 1, + seed=SEED, verbosity=VERBOSITY, + checkSynapseConsistency = checkSynapseConsistency, + collectStats = True, + pamLength = pamLength, + maxInfBacktrack = maxInfBacktrack, + maxLrnBacktrack = maxLrnBacktrack, + ) - # Ensure we are copying over learning states for TPDiff - cpp_tp.retrieveLearningStates = True + # Ensure we are copying over learning states for TMDiff + cpp_tm.retrieveLearningStates = True - tps['CPP'] = cpp_tp + tms['CPP'] = cpp_tm if includePy: if VERBOSITY >= 2: - print "Creating PY TP instance" + print "Creating PY TM instance" - py_tp = TP(numberOfCols = numCols, cellsPerColumn = cellsPerCol, - initialPerm = initialPerm, connectedPerm = connectedPerm, - minThreshold = minThreshold, newSynapseCount = newSynapseCount, - permanenceInc = permanenceInc, permanenceDec = permanenceDec, - activationThreshold = activationThreshold, - globalDecay = globalDecay, burnIn = 1, - seed=SEED, verbosity=VERBOSITY, - collectStats = True, - pamLength = pamLength, - maxInfBacktrack = maxInfBacktrack, - maxLrnBacktrack = maxLrnBacktrack, - ) + py_tm = BacktrackingTM(numberOfCols = numCols, + cellsPerColumn = cellsPerCol, + initialPerm = initialPerm, + connectedPerm = connectedPerm, + minThreshold = minThreshold, + newSynapseCount = newSynapseCount, + permanenceInc = permanenceInc, + permanenceDec = permanenceDec, + activationThreshold = activationThreshold, + globalDecay = globalDecay, burnIn = 1, + seed=SEED, verbosity=VERBOSITY, + collectStats = True, + pamLength = pamLength, + maxInfBacktrack = maxInfBacktrack, + maxLrnBacktrack = maxLrnBacktrack, + ) - tps['PY '] = py_tp + tms['PY '] = py_tm - return tps + return tms -def assertNoTPDiffs(tps): +def assertNoTMDiffs(tms): """ - Check for diffs among the TP instances in the passed in tps dict and + Check for diffs among the TM instances in the passed in tms dict and raise an assert if any are detected Parameters: --------------------------------------------------------------------- - tps: dict of TP instances + tms: dict of TM instances """ - if len(tps) == 1: + if len(tms) == 1: return - if len(tps) > 2: - raise "Not implemented for more than 2 TPs" + if len(tms) > 2: + raise "Not implemented for more than 2 TMs" - same = fdrutils.tpDiff2(tps.values(), verbosity=VERBOSITY) + same = fdrutils.tmDiff2(tms.values(), verbosity=VERBOSITY) assert(same) return -def evalSequences(tps, +def evalSequences(tms, trainingSequences, testSequences = None, nTrainRepetitions = 1, doResets = True, **kwargs): - """Train the TPs on the entire training set for nTrainRepetitions in a row. + """Train the TMs on the entire training set for nTrainRepetitions in a row. Then run the test set through inference once and return the inference stats. Parameters: --------------------------------------------------------------------- - tps: dict of TP instances + tms: dict of TM instances trainingSequences: list of training sequences. Each sequence is a list - of TP input patterns + of TM input patterns testSequences: list of test sequences. If None, we will test against the trainingSequences - nTrainRepetitions: Number of times to run the training set through the TP - doResets: If true, send a reset to the TP between each sequence + nTrainRepetitions: Number of times to run the training set through the TM + doResets: If true, send a reset to the TM between each sequence """ # If no test sequence is specified, use the first training sequence if testSequences == None: testSequences = trainingSequences - # First TP instance is used by default for verbose printing of input values, + # First TM instance is used by default for verbose printing of input values, # etc. - firstTP = tps.values()[0] + firstTM = tms.values()[0] - assertNoTPDiffs(tps) + assertNoTMDiffs(tms) # ===================================================================== # Loop through the training set nTrainRepetitions times @@ -372,10 +376,10 @@ def evalSequences(tps, print "\n##############################################################" print "################# Training round #%d of %d #################" \ % (trainingNum, nTrainRepetitions) - for (name,tp) in tps.iteritems(): - print "TP parameters for %s: " % (name) + for (name,tm) in tms.iteritems(): + print "TM parameters for %s: " % (name) print "---------------------" - tp.printParameters() + tm.printParameters() print # ====================================================================== @@ -389,8 +393,8 @@ def evalSequences(tps, % (sequenceNum, numSequences) if doResets: - for tp in tps.itervalues(): - tp.reset() + for tm in tms.itervalues(): + tm.reset() # -------------------------------------------------------------------- # Train each element of the sequence @@ -403,54 +407,54 @@ def evalSequences(tps, print "------------------------------------------------------------" print "--------- sequence: #%d of %d, timeStep: #%d of %d -----------" \ % (sequenceNum, numSequences, t, numTimeSteps) - firstTP.printInput(x) + firstTM.printInput(x) print "input nzs:", x.nonzero() # Train in this element x = numpy.array(x).astype('float32') - for tp in tps.itervalues(): - tp.learn(x, computeInfOutput=True) + for tm in tms.itervalues(): + tm.learn(x, computeInfOutput=True) # Print the input and output states if VERBOSITY >= 3: - for (name,tp) in tps.iteritems(): - print "I/O states of %s TP:" % (name) + for (name,tm) in tms.iteritems(): + print "I/O states of %s TM:" % (name) print "-------------------------------------", - tp.printStates(printPrevious = (VERBOSITY >= 5)) + tm.printStates(printPrevious = (VERBOSITY >= 5)) print - assertNoTPDiffs(tps) + assertNoTMDiffs(tms) # Print out number of columns that weren't predicted if VERBOSITY >= 2: - for (name,tp) in tps.iteritems(): - stats = tp.getStats() - print "# of unpredicted columns for %s TP: %d of %d" \ + for (name,tm) in tms.iteritems(): + stats = tm.getStats() + print "# of unpredicted columns for %s TM: %d of %d" \ % (name, stats['curMissing'], x.sum()) - numBurstingCols = tp.infActiveState['t'].min(axis=1).sum() - print "# of bursting columns for %s TP: %d of %d" \ + numBurstingCols = tm.infActiveState['t'].min(axis=1).sum() + print "# of bursting columns for %s TM: %d of %d" \ % (name, numBurstingCols, x.sum()) # Print the trained cells if VERBOSITY >= 4: print "Sequence %d finished." % (sequenceNum) - for (name,tp) in tps.iteritems(): - print "All cells of %s TP:" % (name) + for (name,tm) in tms.iteritems(): + print "All cells of %s TM:" % (name) print "-------------------------------------", - tp.printCells() + tm.printCells() print # -------------------------------------------------------------------- # Done training all sequences in this round, print the total number of - # missing, extra columns and make sure it's the same among the TPs + # missing, extra columns and make sure it's the same among the TMs if VERBOSITY >= 2: print prevResult = None - for (name,tp) in tps.iteritems(): - stats = tp.getStats() + for (name,tm) in tms.iteritems(): + stats = tm.getStats() if VERBOSITY >= 1: - print "Stats for %s TP over all sequences for training round #%d of %d:" \ + print "Stats for %s TM over all sequences for training round #%d of %d:" \ % (name, trainingNum, nTrainRepetitions) print " total missing:", stats['totalMissing'] print " total extra:", stats['totalExtra'] @@ -461,7 +465,7 @@ def evalSequences(tps, assert (stats['totalMissing'] == prevResult[0]) assert (stats['totalExtra'] == prevResult[1]) - tp.resetStats() + tm.resetStats() # ===================================================================== @@ -469,21 +473,21 @@ def evalSequences(tps, if VERBOSITY >= 3: print "Calling trim segments" prevResult = None - for tp in tps.itervalues(): - nSegsRemoved, nSynsRemoved = tp.trimSegments() + for tm in tms.itervalues(): + nSegsRemoved, nSynsRemoved = tm.trimSegments() if prevResult is None: prevResult = (nSegsRemoved, nSynsRemoved) else: assert (nSegsRemoved == prevResult[0]) assert (nSynsRemoved == prevResult[1]) - assertNoTPDiffs(tps) + assertNoTMDiffs(tms) if VERBOSITY >= 4: print "Training completed. Complete state:" - for (name,tp) in tps.iteritems(): + for (name,tm) in tms.iteritems(): print "%s:" % (name) - tp.printCells() + tm.printCells() print @@ -494,9 +498,9 @@ def evalSequences(tps, print "\n##############################################################" print "########################## Inference #########################" - # Reset stats in all TPs - for tp in tps.itervalues(): - tp.resetStats() + # Reset stats in all TMs + for tm in tms.itervalues(): + tm.resetStats() # ------------------------------------------------------------------- # Loop through the test sequences @@ -511,8 +515,8 @@ def evalSequences(tps, # Send in the rest if doResets: - for tp in tps.itervalues(): - tp.reset() + for tm in tms.itervalues(): + tm.reset() # ------------------------------------------------------------------- # Loop through the elements of this sequence @@ -525,39 +529,39 @@ def evalSequences(tps, print "------------------------------------------------------------" print "--------- sequence: #%d of %d, timeStep: #%d of %d -----------" \ % (sequenceNum, numSequences, t, numTimeSteps) - firstTP.printInput(x) + firstTM.printInput(x) print "input nzs:", x.nonzero() # Infer on this element - for tp in tps.itervalues(): - tp.infer(x) + for tm in tms.itervalues(): + tm.infer(x) - assertNoTPDiffs(tps) + assertNoTMDiffs(tms) # Print out number of columns that weren't predicted if VERBOSITY >= 2: - for (name,tp) in tps.iteritems(): - stats = tp.getStats() - print "# of unpredicted columns for %s TP: %d of %d" \ + for (name,tm) in tms.iteritems(): + stats = tm.getStats() + print "# of unpredicted columns for %s TM: %d of %d" \ % (name, stats['curMissing'], x.sum()) # Debug print of internal state if VERBOSITY >= 3: - for (name,tp) in tps.iteritems(): - print "I/O states of %s TP:" % (name) + for (name,tm) in tms.iteritems(): + print "I/O states of %s TM:" % (name) print "-------------------------------------", - tp.printStates(printPrevious = (VERBOSITY >= 5), + tm.printStates(printPrevious = (VERBOSITY >= 5), printLearnState = False) print # Done with this sequence - # Debug print of all stats of the TPs + # Debug print of all stats of the TMs if VERBOSITY >= 4: print - for (name,tp) in tps.iteritems(): - print "Interim internal stats for %s TP:" % (name) + for (name,tm) in tms.iteritems(): + print "Interim internal stats for %s TM:" % (name) print "---------------------------------" - pprint.pprint(tp.getStats()) + pprint.pprint(tm.getStats()) print @@ -565,36 +569,36 @@ def evalSequences(tps, print "\n##############################################################" print "####################### Inference Done #######################" - # Get the overall stats for each TP and return them - tpStats = dict() - for (name,tp) in tps.iteritems(): - tpStats[name] = stats = tp.getStats() + # Get the overall stats for each TM and return them + tmStats = dict() + for (name,tm) in tms.iteritems(): + tmStats[name] = stats = tm.getStats() if VERBOSITY >= 2: - print "Stats for %s TP over all sequences:" % (name) + print "Stats for %s TM over all sequences:" % (name) print " total missing:", stats['totalMissing'] print " total extra:", stats['totalExtra'] - for (name,tp) in tps.iteritems(): + for (name,tm) in tms.iteritems(): if VERBOSITY >= 3: - print "\nAll internal stats for %s TP:" % (name) + print "\nAll internal stats for %s TM:" % (name) print "-------------------------------------", - pprint.pprint(tpStats[name]) + pprint.pprint(tmStats[name]) print - return tpStats + return tmStats def testConfig(baseParams, expMissingMin=0, expMissingMax=0, **mods): """ - Build up a set of sequences, create the TP(s), train them, test them, + Build up a set of sequences, create the TM(s), train them, test them, and check that we got the expected number of missing predictions during inference. Parameters: ----------------------------------------------------------------------- baseParams: dict of all of the parameters for building sequences, - creating the TPs, and training and testing them. This + creating the TMs, and training and testing them. This gets updated from 'mods' before we use it. expMissingMin: Minimum number of expected missing predictions during testing. @@ -614,21 +618,21 @@ def testConfig(baseParams, expMissingMin=0, expMissingMax=0, **mods): (numCols, trainingSequences) = func(**params) # -------------------------------------------------------------------- - # Create the TPs + # Create the TMs if params['numCols'] is None: params['numCols'] = numCols - tps = createTPs(**params) + tms = createTMs(**params) # -------------------------------------------------------------------- # Train and get test results - tpStats = evalSequences(tps = tps, + tmStats = evalSequences(tms= tms, trainingSequences=trainingSequences, testSequences=None, **params) # ----------------------------------------------------------------------- # Make sure there are the expected number of missing predictions - for (name, stats) in tpStats.iteritems(): + for (name, stats) in tmStats.iteritems(): print "Detected %d missing predictions overall during inference" \ % (stats['totalMissing']) if expMissingMin is not None and stats['totalMissing'] < expMissingMin: @@ -644,7 +648,7 @@ def testConfig(baseParams, expMissingMin=0, expMissingMax=0, **mods): return True -class TPOverlappingSeqsTest(testcasebase.TestCaseBase): +class TMOverlappingSeqsTest(testcasebase.TestCaseBase): def testFastLearning(self): """ @@ -664,8 +668,8 @@ def testFastLearning(self): sharedElements = [2,3], numOnBitsPerPattern = numOnBitsPerPattern, - # TP construction - includeCPP = INCLUDE_CPP_TP, + # TM construction + includeCPP = INCLUDE_CPP_TM, numCols = None, # filled in based on generated sequences activationThreshold = numOnBitsPerPattern, minThreshold = numOnBitsPerPattern, @@ -714,8 +718,8 @@ def testSlowLearning(self): sharedElements = [2,3], numOnBitsPerPattern = numOnBitsPerPattern, - # TP construction - includeCPP = INCLUDE_CPP_TP, + # TM construction + includeCPP = INCLUDE_CPP_TM, numCols = None, # filled in based on generated sequences activationThreshold = numOnBitsPerPattern, minThreshold = numOnBitsPerPattern, @@ -749,7 +753,7 @@ def testSlowLearning(self): def testSlowLearningWithOverlap(self): """ - Test with slow learning, some overlap in the patterns, and TP thresholds + Test with slow learning, some overlap in the patterns, and TM thresholds of 80% of newSynapseCount Make sure PAM allows us to train with fewer repeats of the training data. @@ -771,8 +775,8 @@ def testSlowLearningWithOverlap(self): numOnBitsPerPattern = numOnBitsPerPattern, patternOverlap = 2, - # TP construction - includeCPP = INCLUDE_CPP_TP, + # TM construction + includeCPP = INCLUDE_CPP_TM, numCols = None, # filled in based on generated sequences activationThreshold = int(0.8 * numOnBitsPerPattern), minThreshold = int(0.8 * numOnBitsPerPattern), @@ -829,8 +833,8 @@ def testForbesLikeData(self): numOnBitsPerPattern = numOnBitsPerPattern, patternOverlap = 1, - # TP construction - includeCPP = INCLUDE_CPP_TP, + # TM construction + includeCPP = INCLUDE_CPP_TM, numCols = None, # filled in based on generated sequences activationThreshold = int(0.8 * numOnBitsPerPattern), minThreshold = int(0.8 * numOnBitsPerPattern), @@ -902,9 +906,9 @@ def testForbesLikeData(self): rgen = numpy.random.RandomState(SEED) random.seed(SEED) - if not INCLUDE_CPP_TP: + if not INCLUDE_CPP_TM: print "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!" - print "!! WARNING: C++ TP testing is DISABLED until it can be updated." + print "!! WARNING: C++ TM testing is DISABLED until it can be updated." print "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!" # Form the command line for the unit test framework. diff --git a/examples/tp/tp_segment_learning.py b/examples/tm/tm_segment_learning.py similarity index 72% rename from examples/tp/tp_segment_learning.py rename to examples/tm/tm_segment_learning.py index 4b5841997d..993b4335d8 100644 --- a/examples/tp/tp_segment_learning.py +++ b/examples/tm/tm_segment_learning.py @@ -25,7 +25,7 @@ Multi-attribute sequence tests. -SL1) Train the TP repeatedly using a single sequence plus noise. The sequence +SL1) Train the TM repeatedly using a single sequence plus noise. The sequence can be relatively short, say 5 patterns. Add random noise each time a pattern is presented. The noise should be different for each presentation and can be equal to the number of on bits in the pattern. @@ -35,7 +35,7 @@ will be in the left half of the input vector. The noise bits will be in the right half of the input vector. -After several iterations of each sequence, the TP should should achieve perfect +After several iterations of each sequence, the TM should should achieve perfect inference on the true sequence. There should be resets between each presentation of the sequence. Check predictions in the sequence part only (it's ok to predict random bits in the right half of the column space), and test with clean @@ -56,13 +56,13 @@ import numpy import unittest2 as unittest -from nupic.research.TP import TP -from nupic.research.TP10X2 import TP10X2 +from nupic.research.BacktrackingTM import BacktrackingTM +from nupic.research.BacktrackingTMCPP import BacktrackingTMCPP from nupic.research import fdrutilities as fdrutils from nupic.support.unittesthelpers import testcasebase -g_testCPPTP = True +g_testCPPTM = True @@ -92,16 +92,16 @@ def _printAllTrainingSequences(self, trainingSequences): self._printOneTrainingVector(pattern) - def _setVerbosity(self, verbosity, tp, tpPy): - """Set verbosity level on the TP""" - tp.cells4.setVerbosity(verbosity) - tp.verbosity = verbosity - tpPy.verbosity = verbosity + def _setVerbosity(self, verbosity, tm, tmPy): + """Set verbosity level on the TM""" + tm.cells4.setVerbosity(verbosity) + tm.verbosity = verbosity + tmPy.verbosity = verbosity - def _createTPs(self, numCols, fixedResources=False, - checkSynapseConsistency = True): - """Create an instance of the appropriate temporal pooler. We isolate + def _createTMs(self, numCols, fixedResources=False, + checkSynapseConsistency = True): + """Create an instance of the appropriate temporal memory. We isolate all parameters as constants specified here.""" # Keep these fixed: @@ -127,45 +127,49 @@ def _createTPs(self, numCols, fixedResources=False, maxAge = 1 - if g_testCPPTP: + if g_testCPPTM: if g_options.verbosity > 1: - print "Creating TP10X2 instance" - - cppTP = TP10X2(numberOfCols = numCols, cellsPerColumn = 4, - initialPerm = initialPerm, connectedPerm = connectedPerm, - minThreshold = minThreshold, - newSynapseCount = newSynapseCount, - permanenceInc = permanenceInc, - permanenceDec = permanenceDec, - activationThreshold = activationThreshold, - globalDecay = globalDecay, maxAge=maxAge, burnIn = 1, - seed=g_options.seed, verbosity=g_options.verbosity, - checkSynapseConsistency = checkSynapseConsistency, - pamLength = 1000, - maxSegmentsPerCell = maxSegmentsPerCell, - maxSynapsesPerSegment = maxSynapsesPerSegment, - ) - # Ensure we are copying over learning states for TPDiff - cppTP.retrieveLearningStates = True + print "Creating BacktrackingTMCPP instance" + + cppTM = BacktrackingTMCPP(numberOfCols = numCols, cellsPerColumn = 4, + initialPerm = initialPerm, connectedPerm = connectedPerm, + minThreshold = minThreshold, + newSynapseCount = newSynapseCount, + permanenceInc = permanenceInc, + permanenceDec = permanenceDec, + activationThreshold = activationThreshold, + globalDecay = globalDecay, maxAge=maxAge, burnIn = 1, + seed=g_options.seed, verbosity=g_options.verbosity, + checkSynapseConsistency = checkSynapseConsistency, + pamLength = 1000, + maxSegmentsPerCell = maxSegmentsPerCell, + maxSynapsesPerSegment = maxSynapsesPerSegment, + ) + # Ensure we are copying over learning states for TMDiff + cppTM.retrieveLearningStates = True else: - cppTP = None + cppTM = None if g_options.verbosity > 1: - print "Creating PY TP instance" - pyTP = TP(numberOfCols = numCols, cellsPerColumn = 4, - initialPerm = initialPerm, connectedPerm = connectedPerm, - minThreshold = minThreshold, newSynapseCount = newSynapseCount, - permanenceInc = permanenceInc, permanenceDec = permanenceDec, - activationThreshold = activationThreshold, - globalDecay = globalDecay, maxAge=maxAge, burnIn = 1, - seed=g_options.seed, verbosity=g_options.verbosity, - pamLength = 1000, - maxSegmentsPerCell = maxSegmentsPerCell, - maxSynapsesPerSegment = maxSynapsesPerSegment, - ) - - return cppTP, pyTP + print "Creating PY TM instance" + pyTM = BacktrackingTM(numberOfCols = numCols, + cellsPerColumn = 4, + initialPerm = initialPerm, + connectedPerm = connectedPerm, + minThreshold = minThreshold, + newSynapseCount = newSynapseCount, + permanenceInc = permanenceInc, + permanenceDec = permanenceDec, + activationThreshold = activationThreshold, + globalDecay = globalDecay, maxAge=maxAge, burnIn = 1, + seed=g_options.seed, verbosity=g_options.verbosity, + pamLength = 1000, + maxSegmentsPerCell = maxSegmentsPerCell, + maxSynapsesPerSegment = maxSynapsesPerSegment, + ) + + return cppTM, pyTM def _getSimplePatterns(self, numOnes, numPatterns): @@ -303,12 +307,12 @@ def _buildSL2TrainingSet(self, numOnes=10, numRepetitions= 10): return (trainingSequences, testSequences) - def _testSegmentLearningSequence(self, tps, - trainingSequences, - testSequences, - doResets = True): + def _testSegmentLearningSequence(self, tms, + trainingSequences, + testSequences, + doResets = True): - """Train the given TP once on the entire training set. on the Test a single + """Train the given TM once on the entire training set. on the Test a single set of sequences once and check that individual predictions reflect the true relative frequencies. Return a success code. Success code is 1 for pass, 0 for fail.""" @@ -317,21 +321,21 @@ def _testSegmentLearningSequence(self, tps, if testSequences == None: testSequences = trainingSequences - cppTP, pyTP = tps[0], tps[1] + cppTM, pyTM = tms[0], tms[1] - if cppTP is not None: - assert fdrutils.tpDiff2(cppTP, pyTP, g_options.verbosity) == True + if cppTM is not None: + assert fdrutils.tmDiff2(cppTM, pyTM, g_options.verbosity) == True #-------------------------------------------------------------------------- # Learn if g_options.verbosity > 0: print "============= Training =================" - print "TP parameters:" + print "TM parameters:" print "CPP" - if cppTP is not None: - print cppTP.printParameters() + if cppTM is not None: + print cppTM.printParameters() print "\nPY" - print pyTP.printParameters() + print pyTM.printParameters() for sequenceNum, trainingSequence in enumerate(trainingSequences): @@ -339,76 +343,76 @@ def _testSegmentLearningSequence(self, tps, print "============= New sequence =================" if doResets: - if cppTP is not None: - cppTP.reset() - pyTP.reset() + if cppTM is not None: + cppTM.reset() + pyTM.reset() for t, x in enumerate(trainingSequence): if g_options.verbosity > 1: print "Time step", t, "sequence number", sequenceNum - print "Input: ", pyTP.printInput(x) + print "Input: ", pyTM.printInput(x) print "NNZ:", x.nonzero() x = numpy.array(x).astype('float32') - if cppTP is not None: - cppTP.learn(x) - pyTP.learn(x) + if cppTM is not None: + cppTM.learn(x) + pyTM.learn(x) - if cppTP is not None: - assert fdrutils.tpDiff2(cppTP, pyTP, g_options.verbosity, + if cppTM is not None: + assert fdrutils.tmDiff2(cppTM, pyTM, g_options.verbosity, relaxSegmentTests = False) == True if g_options.verbosity > 2: - if cppTP is not None: + if cppTM is not None: print "CPP" - cppTP.printStates(printPrevious = (g_options.verbosity > 4)) + cppTM.printStates(printPrevious = (g_options.verbosity > 4)) print "\nPY" - pyTP.printStates(printPrevious = (g_options.verbosity > 4)) + pyTM.printStates(printPrevious = (g_options.verbosity > 4)) print if g_options.verbosity > 4: print "Sequence finished. Complete state after sequence" - if cppTP is not None: + if cppTM is not None: print "CPP" - cppTP.printCells() + cppTM.printCells() print "\nPY" - pyTP.printCells() + pyTM.printCells() print if g_options.verbosity > 2: print "Calling trim segments" - if cppTP is not None: - nSegsRemovedCPP, nSynsRemovedCPP = cppTP.trimSegments() - nSegsRemoved, nSynsRemoved = pyTP.trimSegments() - if cppTP is not None: + if cppTM is not None: + nSegsRemovedCPP, nSynsRemovedCPP = cppTM.trimSegments() + nSegsRemoved, nSynsRemoved = pyTM.trimSegments() + if cppTM is not None: assert nSegsRemovedCPP == nSegsRemoved assert nSynsRemovedCPP == nSynsRemoved - if cppTP is not None: - assert fdrutils.tpDiff2(cppTP, pyTP, g_options.verbosity) == True + if cppTM is not None: + assert fdrutils.tmDiff2(cppTM, pyTM, g_options.verbosity) == True print "Training completed. Stats:" - info = pyTP.getSegmentInfo() + info = pyTM.getSegmentInfo() print " nSegments:", info[0] print " nSynapses:", info[1] if g_options.verbosity > 3: print "Complete state:" - if cppTP is not None: + if cppTM is not None: print "CPP" - cppTP.printCells() + cppTM.printCells() print "\nPY" - pyTP.printCells() + pyTM.printCells() #--------------------------------------------------------------------------- # Infer if g_options.verbosity > 1: print "============= Inference =================" - if cppTP is not None: - cppTP.collectStats = True - pyTP.collectStats = True + if cppTM is not None: + cppTM.collectStats = True + pyTM.collectStats = True nPredictions = 0 cppNumCorrect, pyNumCorrect = 0, 0 @@ -421,58 +425,58 @@ def _testSegmentLearningSequence(self, tps, slen = len(testSequence) if doResets: - if cppTP is not None: - cppTP.reset() - pyTP.reset() + if cppTM is not None: + cppTM.reset() + pyTM.reset() for t, x in enumerate(testSequence): if g_options.verbosity >= 2: print "Time step", t, '\nInput:' - pyTP.printInput(x) + pyTM.printInput(x) - if cppTP is not None: - cppTP.infer(x) - pyTP.infer(x) + if cppTM is not None: + cppTM.infer(x) + pyTM.infer(x) - if cppTP is not None: - assert fdrutils.tpDiff2(cppTP, pyTP, g_options.verbosity) == True + if cppTM is not None: + assert fdrutils.tmDiff2(cppTM, pyTM, g_options.verbosity) == True if g_options.verbosity > 2: - if cppTP is not None: + if cppTM is not None: print "CPP" - cppTP.printStates(printPrevious = (g_options.verbosity > 4), + cppTM.printStates(printPrevious = (g_options.verbosity > 4), printLearnState = False) print "\nPY" - pyTP.printStates(printPrevious = (g_options.verbosity > 4), + pyTM.printStates(printPrevious = (g_options.verbosity > 4), printLearnState = False) - if cppTP is not None: - cppScores = cppTP.getStats() - pyScores = pyTP.getStats() + if cppTM is not None: + cppScores = cppTM.getStats() + pyScores = pyTM.getStats() if g_options.verbosity >= 2: - if cppTP is not None: + if cppTM is not None: print "CPP" print cppScores print "\nPY" print pyScores - if t < slen-1 and t > pyTP.burnIn: + if t < slen-1 and t > pyTM.burnIn: nPredictions += 1 - if cppTP is not None: + if cppTM is not None: if cppScores['curPredictionScore2'] > 0.3: cppNumCorrect += 1 if pyScores['curPredictionScore2'] > 0.3: pyNumCorrect += 1 # Check that every inference was correct, excluding the very last inference - if cppTP is not None: - cppScores = cppTP.getStats() - pyScores = pyTP.getStats() + if cppTM is not None: + cppScores = cppTM.getStats() + pyScores = pyTM.getStats() passTest = False - if cppTP is not None: + if cppTM is not None: if cppNumCorrect == nPredictions and pyNumCorrect == nPredictions: passTest = True else: @@ -502,10 +506,10 @@ def _testSL1(self, numOnes = 10, numRepetitions = 6, fixedResources = False, numRepetitions) numCols = len(trainingSet[0][0]) - tps = self._createTPs(numCols = numCols, fixedResources=fixedResources, - checkSynapseConsistency = checkSynapseConsistency) + tms = self._createTMs(numCols = numCols, fixedResources=fixedResources, + checkSynapseConsistency = checkSynapseConsistency) - testResult = self._testSegmentLearningSequence(tps, trainingSet, testSet) + testResult = self._testSegmentLearningSequence(tms, trainingSet, testSet) if testResult: print "%s PASS" % testName @@ -529,10 +533,10 @@ def _testSL2(self, numOnes = 10, numRepetitions = 10, fixedResources = False, trainingSet, testSet = self._buildSL2TrainingSet(numOnes, numRepetitions) numCols = len(trainingSet[0][0]) - tps = self._createTPs(numCols = numCols, fixedResources=fixedResources, - checkSynapseConsistency = checkSynapseConsistency) + tms = self._createTMs(numCols = numCols, fixedResources=fixedResources, + checkSynapseConsistency = checkSynapseConsistency) - testResult = self._testSegmentLearningSequence(tps, trainingSet, testSet) + testResult = self._testSegmentLearningSequence(tms, trainingSet, testSet) if testResult: print "%s PASS" % testName @@ -543,7 +547,7 @@ def _testSL2(self, numOnes = 10, numRepetitions = 10, fixedResources = False, -class TPSegmentLearningTests(ExperimentTestBaseClass): +class TMSegmentLearningTests(ExperimentTestBaseClass): """Our high level tests""" diff --git a/examples/tp/tp_test.py b/examples/tm/tm_test.py similarity index 90% rename from examples/tp/tp_test.py rename to examples/tm/tm_test.py index 4fad9fc5f4..6cf64bc877 100755 --- a/examples/tp/tp_test.py +++ b/examples/tm/tm_test.py @@ -20,12 +20,12 @@ # ---------------------------------------------------------------------- """ -This file performs a variety of tests on the reference temporal pooler code. +This file performs a variety of tests on the reference temporal memory code. basic_test ========== -Tests creation and serialization of the TP class. Sets parameters and ensures +Tests creation and serialization of the TM class. Sets parameters and ensures they are the same after a serialization and de-serialization step. Runs learning and inference on a small number of random patterns and ensures it doesn't crash. @@ -65,7 +65,7 @@ constructed so that consecutive patterns within a sequence don't share any columns. -Training: The TP is trained with P passes of the M sequences. There +Training: The TM is trained with P passes of the M sequences. There should be a reset between sequences. The total number of iterations during training is P*N*M. @@ -88,8 +88,8 @@ B4) N=100, M=3, P=1 (See how high we can go with N*M) -B5) Like B1) but only have newSynapseCount columns ON in each pattern (instead of -between 21 and 25), and set activationThreshold to newSynapseCount. +B5) Like B1) but only have newSynapseCount columns ON in each pattern (instead +of between 21 and 25), and set activationThreshold to newSynapseCount. B6) Like B1 but with cellsPerCol = 4. First order sequences should still work just fine. @@ -102,7 +102,7 @@ connectedPerm = 0.7 permanenceInc = 0.2 -Now we train the TP with the B1 sequence 4 times (P=4). This will increment +Now we train the TM with the B1 sequence 4 times (P=4). This will increment the permanences to be above 0.8 and at that point the inference will be correct. This test will ensure the basic match function and segment activation rules are working correctly. @@ -232,14 +232,14 @@ Note: for pooling tests the density of input patterns should be pretty low since each pooling step increases the output density. At the same time, we need -enough bits on in the input for the temporal pooler to find enough synapses. So, +enough bits on in the input for the temporal memory to find enough synapses. So, for the tests, constraints should be something like: (Input Density) * (Number of pooling steps) < 25 %. AND sum(Input) > newSynapseCount*1.5 -Training: The TP is trained with P passes of the M sequences. There +Training: The TM is trained with P passes of the M sequences. There should be a reset between sequences. The total number of iterations during training is P*N*M. @@ -250,14 +250,14 @@ with no extra columns. We report the number of columns that are incorrect and report a failure if more than 2 columns are incorrectly predicted. -P1) Train the TP two times (P=2) on a single long sequence consisting of random +P1) Train the TM two times (P=2) on a single long sequence consisting of random patterns (N=20, M=1). There should be no overlapping columns between successive -patterns. During inference, the TP should be able reliably predict the pattern +patterns. During inference, the TM should be able reliably predict the pattern two time steps in advance. numCols should be about 350 to meet the above constraints and also to maintain consistency with test P2. -P2) Increase TP rate to 3 time steps in advance (P=3). At each step during -inference, the TP should be able to reliably predict the pattern coming up at +P2) Increase TM rate to 3 time steps in advance (P=3). At each step during +inference, the TM should be able to reliably predict the pattern coming up at t+1, t+2, and t+3.. P3) Set segUpdateValidDuration to 2 and set P=3. This should behave almost @@ -283,22 +283,24 @@ HiLo Tests ========== -A high order sequence memory like the TP can memorize very long sequences. In +A high order sequence memory like the TM can memorize very long sequences. In many applications though you don't want to memorize. You see a long sequence of patterns but there are actually lower order repeating sequences embedded within -it. A simplistic example is words in a sentence. Words such as You'd like the TP to learn those sequences. +it. A simplistic example is words in a sentence. Words such as You'd like the +TM to learn those sequences. Tests should capture number of synapses learned and compare against theoretically optimal numbers to pass/fail. -HL0a) For debugging, similar to H0. We want to learn a 3 pattern long sequence presented -with noise before and after, with no resets. Two steps of noise will be presented. +HL0a) For debugging, similar to H0. We want to learn a 3 pattern long sequence +presented with noise before and after, with no resets. Two steps of noise will +be presented. The noise will be 20 patterns, presented in random order. Every pattern has a consecutive set of 5 bits on, so the vector will be 115 bits long. No pattern shares any columns with the others. These sequences are easy to visualize and is very useful for debugging. -TP parameters should be the same as B7 except that permanenceDec should be 0.05: +TM parameters should be the same as B7 except that permanenceDec should be 0.05: activationThreshold = newSynapseCount minThreshold = activationThreshold @@ -310,22 +312,23 @@ So, this means it should learn a sequence after 4 repetitions. It will take 4 orphan decay steps to get an incorrect synapse to go away completely. -HL0b) Like HL0a, but after the 3-sequence is learned, try to learn a 4-sequence that -builds on the 3-sequence. For example, if learning A-B-C we train also on +HL0b) Like HL0a, but after the 3-sequence is learned, try to learn a 4-sequence +that builds on the 3-sequence. For example, if learning A-B-C we train also on D-A-B-C. It should learn that ABC is separate from DABC. Note: currently this -test is disabled in the code. It is a bit tricky to test this. When you present DAB, -you should predict the same columns as when you present AB (i.e. in both cases -C should be predicted). However, the representation for C in DABC should be -different than the representation for C in ABC. Furthermore, when you present -AB, the representation for C should be an OR of the representation in DABC and ABC -since you could also be starting in the middle of the DABC sequence. All this is -actually happening in the code, but verified by visual inspection only. +test is disabled in the code. It is a bit tricky to test this. When you present +DAB, you should predict the same columns as when you present AB (i.e. in both +cases C should be predicted). However, the representation for C in DABC should +be different than the representation for C in ABC. Furthermore, when you present +AB, the representation for C should be an OR of the representation in DABC and +ABC since you could also be starting in the middle of the DABC sequence. All +this is actually happening in the code, but verified by visual inspection only. HL1) Noise + sequence + noise + sequence repeatedly without resets until it has -learned that sequence. Train the TP repeatedly with N random sequences that all +learned that sequence. Train the TM repeatedly with N random sequences that all share a single subsequence. Each random sequence can be 10 patterns long, sharing a subsequence that is 5 patterns long. There should be no resets -between presentations. Inference should then be on that 5 long shared subsequence. +between presentations. Inference should then be on that 5 long shared +subsequence. Example (3-long shared subsequence): @@ -334,10 +337,10 @@ R S T D E F U V W X Y Z 1 D E F 2 3 4 5 -TP parameters should be the same as HL0. +TM parameters should be the same as HL0. -HL2) Like HL1, but after A B C has learned, try to learn D A B C . It should learn -ABC is separate from DABC. +HL2) Like HL1, but after A B C has learned, try to learn D A B C . It should +learn ABC is separate from DABC. HL3) Like HL2, but test with resets. @@ -365,7 +368,7 @@ Sequence Likelihood Tests ========================= -These tests are in the file TPLikelihood.py +These tests are in the file TMLikelihood.py Segment Learning Tests [UNIMPLEMENTED] @@ -373,12 +376,12 @@ Multi-attribute sequence tests. -SL1) Train the TP repeatedly using a single (multiple) sequence plus noise. The +SL1) Train the TM repeatedly using a single (multiple) sequence plus noise. The sequence can be relatively short, say 20 patterns. No two consecutive patterns in the sequence should share columns. Add random noise each time a pattern is presented. The noise should be different for each presentation and can be equal to the number of on bits in the pattern. After N iterations of the noisy -sequences, the TP should should achieve perfect inference on the true sequence. +sequences, the TM should should achieve perfect inference on the true sequence. There should be resets between each presentation of the sequence. Check predictions in the sequence only. And test with clean sequences. @@ -412,7 +415,7 @@ Capacity Tests [UNIMPLEMENTED] ============== -These are stress tests that verify that the temporal pooler can learn a large +These are stress tests that verify that the temporal memory can learn a large number of sequences and can predict a large number of possible next steps. Some research needs to be done first to understand the capacity of the system as it relates to the number of columns, cells per column, etc. @@ -424,8 +427,8 @@ Online Learning Tests [UNIMPLEMENTED] ===================== -These tests will verify that the temporal pooler continues to work even if -sequence statistics (and the actual sequences) change slowly over time. The TP +These tests will verify that the temporal memory continues to work even if +sequence statistics (and the actual sequences) change slowly over time. The TM should adapt to the changes and learn to recognize newer sequences (and forget the older sequences?). @@ -441,22 +444,22 @@ import cPickle import pprint -from nupic.research.TP import TP -from nupic.research.TP10X2 import TP10X2 +from nupic.research.BacktrackingTM import BacktrackingTM +from nupic.research.BacktrackingTMCPP import BacktrackingTMCPP from nupic.research import fdrutilities as fdrutils -#--------------------------------------------------------------------------------- -TEST_CPP_TP = 1 # temporarily disabled until it can be updated +#------------------------------------------------------------------------------- +TEST_CPP_TM = 1 # temporarily disabled until it can be updated VERBOSITY = 0 # how chatty the unit tests should be SEED = 33 # the random seed used throughout -TPClass = TP +TMClass = BacktrackingTM checkSynapseConsistency = False rgen = numpy.random.RandomState(SEED) # always call this rgen, NOT random -#--------------------------------------------------------------------------------- +#------------------------------------------------------------------------------- # Helper routines -#-------------------------------------------------------------------------------- +#------------------------------------------------------------------------------- def printOneTrainingVector(x): @@ -523,7 +526,8 @@ def buildTrainingSet(numSequences = 2, every sequence. If sequenceLength is 100 and pctShared is 0.2, then a subsequence consisting of 20 patterns will be in every sequence. Can also be the keyword - 'one pattern', in which case a single time step is shared. + 'one pattern', in which case a single time step is + shared. seqGenMode: What kind of sequence to generate. If contains 'shared' generates shared subsequence. If contains 'no shared', does not generate any shared subsequence. If contains @@ -537,7 +541,8 @@ def buildTrainingSet(numSequences = 2, disjointConsecutive: Whether to generate disjoint consecutive patterns or not. """ - # Calculate the set of column indexes once to be used in each call to generatePattern() + # Calculate the set of column indexes once to be used in each call to + # generatePattern() colSet = set(range(numCols)) if 'beginning' in seqGenMode: @@ -546,7 +551,7 @@ def buildTrainingSet(numSequences = 2, if 'no shared' in seqGenMode or numSequences == 1: pctShared = 0.0 - #-------------------------------------------------------------------------------- + #----------------------------------------------------------------------------- # Build shared subsequence if 'no shared' not in seqGenMode and 'one pattern' not in seqGenMode: sharedSequenceLength = int(pctShared*sequenceLength) @@ -561,12 +566,13 @@ def buildTrainingSet(numSequences = 2, for i in xrange(sharedSequenceLength): if disjointConsecutive and i > 0: - x = generatePattern(numCols, minOnes, maxOnes, colSet, sharedSequence[i-1]) + x = generatePattern(numCols, minOnes, maxOnes, colSet, + sharedSequence[i-1]) else: x = generatePattern(numCols, minOnes, maxOnes, colSet) sharedSequence.append(x) - #-------------------------------------------------------------------------------- + #----------------------------------------------------------------------------- # Build random training set, splicing in the shared subsequence trainingSequences = [] @@ -747,9 +753,9 @@ def buildHL0bTrainingSet(numOnes=5): # Basic test (creation, pickling, basic run of learning and inference) def basicTest(): - global TPClass, SEED, VERBOSITY, checkSynapseConsistency + global TMClass, SEED, VERBOSITY, checkSynapseConsistency #-------------------------------------------------------------------------------- - # Create TP object + # Create TM object numberOfCols =10 cellsPerColumn =3 initialPerm =.2 @@ -766,39 +772,39 @@ def basicTest(): seed =SEED verbosity =VERBOSITY - tp = TPClass(numberOfCols, cellsPerColumn, - initialPerm, connectedPerm, - minThreshold, newSynapseCount, - permanenceInc, permanenceDec, permanenceMax, - globalDecay, activationThreshold, - doPooling, segUpdateValidDuration, - seed=seed, verbosity=verbosity, - pamLength = 1000, - checkSynapseConsistency=checkSynapseConsistency) + tm = TMClass(numberOfCols, cellsPerColumn, + initialPerm, connectedPerm, + minThreshold, newSynapseCount, + permanenceInc, permanenceDec, permanenceMax, + globalDecay, activationThreshold, + doPooling, segUpdateValidDuration, + seed=seed, verbosity=verbosity, + pamLength = 1000, + checkSynapseConsistency=checkSynapseConsistency) print "Creation ok" #-------------------------------------------------------------------------------- # Save and reload - pickle.dump(tp, open("test_tp.pkl", "wb")) - tp2 = pickle.load(open("test_tp.pkl")) - - assert tp2.numberOfCols == numberOfCols - assert tp2.cellsPerColumn == cellsPerColumn - print tp2.initialPerm - assert tp2.initialPerm == numpy.float32(.2) - assert tp2.connectedPerm == numpy.float32(.8) - assert tp2.minThreshold == minThreshold - assert tp2.newSynapseCount == newSynapseCount - assert tp2.permanenceInc == numpy.float32(.1) - assert tp2.permanenceDec == numpy.float32(.05) - assert tp2.permanenceMax == 1 - assert tp2.globalDecay == numpy.float32(.05) - assert tp2.activationThreshold == activationThreshold - assert tp2.doPooling == doPooling - assert tp2.segUpdateValidDuration == segUpdateValidDuration - assert tp2.seed == SEED - assert tp2.verbosity == verbosity + pickle.dump(tm, open("test_tm.pkl", "wb")) + tm2 = pickle.load(open("test_tm.pkl")) + + assert tm2.numberOfCols == numberOfCols + assert tm2.cellsPerColumn == cellsPerColumn + print tm2.initialPerm + assert tm2.initialPerm == numpy.float32(.2) + assert tm2.connectedPerm == numpy.float32(.8) + assert tm2.minThreshold == minThreshold + assert tm2.newSynapseCount == newSynapseCount + assert tm2.permanenceInc == numpy.float32(.1) + assert tm2.permanenceDec == numpy.float32(.05) + assert tm2.permanenceMax == 1 + assert tm2.globalDecay == numpy.float32(.05) + assert tm2.activationThreshold == activationThreshold + assert tm2.doPooling == doPooling + assert tm2.segUpdateValidDuration == segUpdateValidDuration + assert tm2.seed == SEED + assert tm2.verbosity == verbosity print "Save/load ok" @@ -807,7 +813,7 @@ def basicTest(): for i in xrange(5): xi = rgen.randint(0,2,(numberOfCols)) x = numpy.array(xi, dtype="uint32") - y = tp.learn(x) + y = tm.learn(x) #-------------------------------------------------------------------------------- # Infer @@ -815,20 +821,20 @@ def basicTest(): for i in xrange(10): xi = rgen.randint(0,2,(numberOfCols)) x = numpy.array(xi, dtype="uint32") - y = tp.infer(x) + y = tm.infer(x) if i > 0: - p = tp.checkPrediction2([pattern.nonzero()[0] for pattern in patterns]) + p = tm.checkPrediction2([pattern.nonzero()[0] for pattern in patterns]) print "basicTest ok" #--------------------------------------------------------------------------------- # Figure out acceptable patterns if none were passed to us. -def findAcceptablePatterns(tp, t, whichSequence, trainingSequences, nAcceptable = 1): +def findAcceptablePatterns(tm, t, whichSequence, trainingSequences, nAcceptable = 1): """ Tries to infer the set of acceptable patterns for prediction at the given time step and for the give sequence. Acceptable patterns are: the current one, - plus a certain number of patterns after timeStep, in the sequence that the TP + plus a certain number of patterns after timeStep, in the sequence that the TM is currently tracking. Any other pattern is not acceptable. TODO: @@ -838,7 +844,7 @@ def findAcceptablePatterns(tp, t, whichSequence, trainingSequences, nAcceptable Parameters: ========== - tp the whole TP, so that we can look at its parameters + tm the whole TM, so that we can look at its parameters t the current time step whichSequence the sequence we are currently tracking trainingSequences all the training sequences @@ -846,7 +852,7 @@ def findAcceptablePatterns(tp, t, whichSequence, trainingSequences, nAcceptable we are willing to consider acceptable. In the case of pooling, it is less than or equal to the min of the number of training reps and the segUpdateValidDuration - parameter of the TP, depending on the test case. + parameter of the TM, depending on the test case. The default value is 1, because by default, the pattern after the current one should always be predictable. @@ -859,9 +865,9 @@ def findAcceptablePatterns(tp, t, whichSequence, trainingSequences, nAcceptable # Determine how many steps forward we want to see in the prediction upTo = t + 2 # always predict current and next - # If the TP is pooling, more steps can be predicted - if tp.doPooling: - upTo += min(tp.segUpdateValidDuration, nAcceptable) + # If the TM is pooling, more steps can be predicted + if tm.doPooling: + upTo += min(tm.segUpdateValidDuration, nAcceptable) assert upTo <= len(trainingSequences[whichSequence]) @@ -917,7 +923,7 @@ def testSequence(trainingSequences, prediction failures, the number of errors, and the number of perfect predictions""" - global TP, SEED, checkSynapseConsistency, VERBOSITY + global BacktrackingTM, SEED, checkSynapseConsistency, VERBOSITY numPerfect = 0 # When every column is correct in the prediction numStrictErrors = 0 # When at least one column is incorrect @@ -929,7 +935,7 @@ def testSequence(trainingSequences, # override default maxSeqLEngth value for high-order sequences if highOrder: - tp = TPClass(numberOfCols, cellsPerColumn, + tm = TMClass(numberOfCols, cellsPerColumn, initialPerm, connectedPerm, minThreshold, newSynapseCount, permanenceInc, permanenceDec, permanenceMax, @@ -941,7 +947,7 @@ def testSequence(trainingSequences, maxSeqLength=0 ) else: - tp = TPClass(numberOfCols, cellsPerColumn, + tm = TMClass(numberOfCols, cellsPerColumn, initialPerm, connectedPerm, minThreshold, newSynapseCount, permanenceInc, permanenceDec, permanenceMax, @@ -955,26 +961,26 @@ def testSequence(trainingSequences, if compareToPy: # override default maxSeqLEngth value for high-order sequences if highOrder: - py_tp = TP(numberOfCols, cellsPerColumn, - initialPerm, connectedPerm, - minThreshold, newSynapseCount, - permanenceInc, permanenceDec, permanenceMax, - globalDecay, activationThreshold, - doPooling, segUpdateValidDuration, - seed=SEED, verbosity=verbosity, - pamLength=pamLength, - maxSeqLength=0 - ) + py_tm = BacktrackingTM(numberOfCols, cellsPerColumn, + initialPerm, connectedPerm, + minThreshold, newSynapseCount, + permanenceInc, permanenceDec, permanenceMax, + globalDecay, activationThreshold, + doPooling, segUpdateValidDuration, + seed=SEED, verbosity=verbosity, + pamLength=pamLength, + maxSeqLength=0 + ) else: - py_tp = TP(numberOfCols, cellsPerColumn, - initialPerm, connectedPerm, - minThreshold, newSynapseCount, - permanenceInc, permanenceDec, permanenceMax, - globalDecay, activationThreshold, - doPooling, segUpdateValidDuration, - seed=SEED, verbosity=verbosity, - pamLength=pamLength, - ) + py_tm = BacktrackingTM(numberOfCols, cellsPerColumn, + initialPerm, connectedPerm, + minThreshold, newSynapseCount, + permanenceInc, permanenceDec, permanenceMax, + globalDecay, activationThreshold, + doPooling, segUpdateValidDuration, + seed=SEED, verbosity=verbosity, + pamLength=pamLength, + ) trainingSequences = trainingSequences[0] if testSequences == None: testSequences = trainingSequences @@ -989,9 +995,9 @@ def testSequence(trainingSequences, if VERBOSITY > 1: print "============= New sequence =================" if doResets: - tp.reset() + tm.reset() if compareToPy: - py_tp.reset() + py_tm.reset() for t,x in enumerate(trainingSequence): if noiseModel is not None and \ 'xor' in noiseModel and 'binomial' in noiseModel \ @@ -1000,28 +1006,28 @@ def testSequence(trainingSequences, x = logical_xor(x, noise_vector) if VERBOSITY > 2: print "Time step",t, "learning round",r, "sequence number", sequenceNum - print "Input: ",tp.printInput(x) + print "Input: ",tm.printInput(x) print "NNZ:", x.nonzero() x = numpy.array(x).astype('float32') - y = tp.learn(x) + y = tm.learn(x) if compareToPy: - py_y = py_tp.learn(x) + py_y = py_tm.learn(x) if t % 25 == 0: # To track bugs, do that every iteration, but very slow - assert fdrutils.tpDiff(tp, py_tp, VERBOSITY) == True + assert fdrutils.tmDiff(tm, py_tm, VERBOSITY) == True if VERBOSITY > 3: - tp.printStates(printPrevious = (VERBOSITY > 4)) + tm.printStates(printPrevious = (VERBOSITY > 4)) print if VERBOSITY > 3: print "Sequence finished. Complete state after sequence" - tp.printCells() + tm.printCells() print numPerfectAtHub = 0 if compareToPy: print "End of training" - assert fdrutils.tpDiff(tp, py_tp, VERBOSITY) == True + assert fdrutils.tmDiff(tm, py_tm, VERBOSITY) == True #-------------------------------------------------------------------------------- # Infer @@ -1032,9 +1038,9 @@ def testSequence(trainingSequences, if VERBOSITY > 1: print "============= New sequence =================" if doResets: - tp.reset() + tm.reset() if compareToPy: - py_tp.reset() + py_tm.reset() slen = len(testSequence) @@ -1047,33 +1053,33 @@ def testSequence(trainingSequences, noise_vector = rgen.binomial(len(x), noiseLevel, (len(x))) x = logical_xor(x, noise_vector) - if VERBOSITY > 2: print "Time step",t, '\nInput:', tp.printInput(x) + if VERBOSITY > 2: print "Time step",t, '\nInput:', tm.printInput(x) x = numpy.array(x).astype('float32') - y = tp.infer(x) + y = tm.infer(x) if compareToPy: - py_y = py_tp.infer(x) - assert fdrutils.tpDiff(tp, py_tp, VERBOSITY) == True + py_y = py_tm.infer(x) + assert fdrutils.tmDiff(tm, py_tm, VERBOSITY) == True # if t == predJustAfterHubOnly: # z = sum(y, axis = 1) # print '\t\t', # print ''.join('.' if z[i] == 0 else '1' for i in xrange(len(z))) - if VERBOSITY > 3: tp.printStates(printPrevious = (VERBOSITY > 4), + if VERBOSITY > 3: tm.printStates(printPrevious = (VERBOSITY > 4), printLearnState = False); print if nMultiStepPrediction > 0: - y_ms = tp.predict(nSteps=nMultiStepPrediction) + y_ms = tm.predict(nSteps=nMultiStepPrediction) if VERBOSITY > 3: print "Multi step prediction at Time step", t for i in range(nMultiStepPrediction): print "Prediction at t+", i+1 - tp.printColConfidence(y_ms[i]) + tm.printColConfidence(y_ms[i]) # Error Checking for i in range(nMultiStepPrediction): @@ -1125,10 +1131,10 @@ def testSequence(trainingSequences, # nAcceptable is used to reduce the number of automatically determined # acceptable patterns. if inferAcceptablePatterns: - acceptablePatterns = findAcceptablePatterns(tp, t, s, testSequences, + acceptablePatterns = findAcceptablePatterns(tm, t, s, testSequences, nAcceptable) - scores = tp.checkPrediction2([pattern.nonzero()[0] \ + scores = tm.checkPrediction2([pattern.nonzero()[0] \ for pattern in acceptablePatterns]) falsePositives, falseNegatives = scores[0], scores[1] @@ -1167,7 +1173,7 @@ def testSequence(trainingSequences, print '\t\t',; printOneTrainingVector(p) print 'Output' diagnostic = '' - output = sum(tp.currentOutput,axis=1) + output = sum(tm.currentOutput,axis=1) print '\t\t',; printOneTrainingVector(output) else: @@ -1177,9 +1183,9 @@ def testSequence(trainingSequences, numPerfectAtHub += 1 if predJustAfterHubOnly is None: - return numFailures, numStrictErrors, numPerfect, tp + return numFailures, numStrictErrors, numPerfect, tm else: - return numFailures, numStrictErrors, numPerfect, numPerfectAtHub, tp + return numFailures, numStrictErrors, numPerfect, numPerfectAtHub, tm @@ -1202,7 +1208,7 @@ def TestB1(numUniquePatterns, nTests, cellsPerColumn = 1, name = "B1"): numCols = numCols, minOnes = 15, maxOnes = 20) - numFailures, numStrictErrors, numPerfect, tp = \ + numFailures, numStrictErrors, numPerfect, tm = \ testSequence(trainingSet, nTrainingReps = 1, numberOfCols = numCols, @@ -1247,7 +1253,7 @@ def TestB7(numUniquePatterns, nTests, cellsPerColumn = 1, name = "B7"): numCols = numCols, minOnes = 15, maxOnes = 20) - numFailures, numStrictErrors, numPerfect, tp = \ + numFailures, numStrictErrors, numPerfect, tm = \ testSequence(trainingSet, nTrainingReps = 4, numberOfCols = numCols, @@ -1299,7 +1305,7 @@ def TestB2(numUniquePatterns, nTests, cellsPerColumn = 1, name = "B2"): minOnes = 15, maxOnes = 20) # Do one pass through the training set - numFailures1, numStrictErrors1, numPerfect1, tp1 = \ + numFailures1, numStrictErrors1, numPerfect1, tm1 = \ testSequence(trainingSet, nTrainingReps = 1, numberOfCols = numCols, @@ -1315,7 +1321,7 @@ def TestB2(numUniquePatterns, nTests, cellsPerColumn = 1, name = "B2"): activationThreshold = 8) # Do two passes through the training set - numFailures, numStrictErrors, numPerfect, tp2 = \ + numFailures, numStrictErrors, numPerfect, tm2 = \ testSequence(trainingSet, nTrainingReps = 2, numberOfCols = numCols, @@ -1331,8 +1337,8 @@ def TestB2(numUniquePatterns, nTests, cellsPerColumn = 1, name = "B2"): activationThreshold = 8) # Check that training with a second pass did not result in more synapses - segmentInfo1 = tp1.getSegmentInfo() - segmentInfo2 = tp2.getSegmentInfo() + segmentInfo1 = tm1.getSegmentInfo() + segmentInfo2 = tm2.getSegmentInfo() if (segmentInfo1[0] != segmentInfo2[0]) or \ (segmentInfo1[1] != segmentInfo2[1]) : print "Training twice incorrectly resulted in more segments or synapses" @@ -1371,7 +1377,7 @@ def TestB3(numUniquePatterns, nTests): numCols = numCols, minOnes = 15, maxOnes = 20) - numFailures, numStrictErrors, numPerfect, tp = \ + numFailures, numStrictErrors, numPerfect, tm = \ testSequence(trainingSet, nTrainingReps = 2, numberOfCols = numCols, @@ -1407,7 +1413,7 @@ def TestH0(numOnes = 5,nMultiStepPrediction=0): trainingSet = buildSimpleTrainingSet(numOnes) - numFailures, numStrictErrors, numPerfect, tp = \ + numFailures, numStrictErrors, numPerfect, tm = \ testSequence(trainingSet, nTrainingReps = 20, numberOfCols = trainingSet[0][0][0].size, @@ -1461,7 +1467,7 @@ def TestH(sequenceLength, nTests, cellsPerColumn, numCols =100, nSequences =[2], numCols = numCols, minOnes = 21, maxOnes = 25) - numFailures, numStrictErrors, numPerfect, tp = \ + numFailures, numStrictErrors, numPerfect, tm = \ testSequence(trainingSet, nTrainingReps = nTrainingReps, numberOfCols = numCols, @@ -1506,7 +1512,7 @@ def TestH11(numOnes = 3): trainingSet = buildAlternatingTrainingSet(numOnes= 3) - numFailures, numStrictErrors, numPerfect, tp = \ + numFailures, numStrictErrors, numPerfect, tm = \ testSequence(trainingSet, nTrainingReps = 1, numberOfCols = trainingSet[0][0][0].size, @@ -1569,7 +1575,7 @@ def TestH2a(sequenceLength, nTests, cellsPerColumn, numCols =100, nSequences =[2 print "============== 10 ======================" - numFailures3, numStrictErrors3, numPerfect3, tp3 = \ + numFailures3, numStrictErrors3, numPerfect3, tm3 = \ testSequence(trainingSet, nTrainingReps = 10, numberOfCols = numCols, @@ -1588,7 +1594,7 @@ def TestH2a(sequenceLength, nTests, cellsPerColumn, numCols =100, nSequences =[2 print "============== 2 ======================" - numFailures, numStrictErrors, numPerfect, tp2 = \ + numFailures, numStrictErrors, numPerfect, tm2 = \ testSequence(trainingSet, nTrainingReps = 2, numberOfCols = numCols, @@ -1607,7 +1613,7 @@ def TestH2a(sequenceLength, nTests, cellsPerColumn, numCols =100, nSequences =[2 print "============== 1 ======================" - numFailures1, numStrictErrors1, numPerfect1, tp1 = \ + numFailures1, numStrictErrors1, numPerfect1, tm1 = \ testSequence(trainingSet, nTrainingReps = 1, numberOfCols = numCols, @@ -1625,16 +1631,16 @@ def TestH2a(sequenceLength, nTests, cellsPerColumn, numCols =100, nSequences =[2 shouldFail = shouldFail) # Check that training with a second pass did not result in more synapses - segmentInfo1 = tp1.getSegmentInfo() - segmentInfo2 = tp2.getSegmentInfo() + segmentInfo1 = tm1.getSegmentInfo() + segmentInfo2 = tm2.getSegmentInfo() if (abs(segmentInfo1[0] - segmentInfo2[0]) > 3) or \ (abs(segmentInfo1[1] - segmentInfo2[1]) > 3*15) : print "Training twice incorrectly resulted in too many segments or synapses" print segmentInfo1 print segmentInfo2 - print tp3.getSegmentInfo() - tp3.trimSegments() - print tp3.getSegmentInfo() + print tm3.getSegmentInfo() + tm3.trimSegments() + print tm3.getSegmentInfo() print "Failures for 1, 2, and N reps" print numFailures1, numStrictErrors1, numPerfect1 @@ -1690,7 +1696,7 @@ def TestP(sequenceLength, nTests, cellsPerColumn, numCols =300, nSequences =[2], numCols = numCols, minOnes = minOnes, maxOnes = maxOnes) - numFailures, numStrictErrors, numPerfect, tp = \ + numFailures, numStrictErrors, numPerfect, tm = \ testSequence(trainingSet, nTrainingReps = nTrainingReps, numberOfCols = numCols, @@ -1732,7 +1738,7 @@ def TestHL0a(numOnes = 5): trainingSet, testSet = buildHL0aTrainingSet() numCols = trainingSet[0][0].size - numFailures, numStrictErrors, numPerfect, tp = \ + numFailures, numStrictErrors, numPerfect, tm = \ testSequence([trainingSet], nTrainingReps = 1, numberOfCols = numCols, @@ -1750,8 +1756,8 @@ def TestHL0a(numOnes = 5): doPooling = False, testSequences = testSet) - tp.trimSegments() - retAfter = tp.getSegmentInfo() + tm.trimSegments() + retAfter = tm.getSegmentInfo() print retAfter[0], retAfter[1] if retAfter[0] > 20: print "Too many segments" @@ -1784,7 +1790,7 @@ def TestHL0b(numOnes = 5): numCols = trainingSet[0][0].size print "numCols=", numCols - numFailures, numStrictErrors, numPerfect, tp = \ + numFailures, numStrictErrors, numPerfect, tm = \ testSequence([trainingSet], nTrainingReps = 1, numberOfCols = numCols, @@ -1801,9 +1807,9 @@ def TestHL0b(numOnes = 5): doPooling = False, testSequences = testSet) - tp.trimSegments() - retAfter = tp.getSegmentInfo() - tp.printCells() + tm.trimSegments() + retAfter = tm.getSegmentInfo() + tm.printCells() if numFailures == 0: print "Test HL0 ok" @@ -1852,7 +1858,7 @@ def TestHL(sequenceLength, nTests, cellsPerColumn, numCols =200, nSequences =[2] numCols = numCols, minOnes = minOnes, maxOnes = maxOnes) - numFailures, numStrictErrors, numPerfect, tp = \ + numFailures, numStrictErrors, numPerfect, tm = \ testSequence(trainingSet, nTrainingReps = nTrainingReps, numberOfCols = numCols, @@ -1904,7 +1910,7 @@ def worker(x): numCols = numCols, minOnes = 21, maxOnes = 25) - numFailures1, numStrictErrors1, numPerfect1, atHub, tp = \ + numFailures1, numStrictErrors1, numPerfect1, atHub, tm = \ testSequence(trainingSet, nTrainingReps = nTrainingReps, numberOfCols = numCols, @@ -1930,7 +1936,7 @@ def worker(x): numCols = numCols, minOnes = 21, maxOnes = 25) - numFailures2, numStrictErrors2, numPerfect2, tp = \ + numFailures2, numStrictErrors2, numPerfect2, tm = \ testSequence(trainingSet, nTrainingReps = nTrainingReps, numberOfCols = numCols, @@ -2255,9 +2261,9 @@ def runTests(testLength = "short"): if __name__=="__main__": - if not TEST_CPP_TP: + if not TEST_CPP_TM: print "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!" - print "!! WARNING: C++ TP testing is DISABLED until it can be updated." + print "!! WARNING: C++ TM testing is DISABLED until it can be updated." print "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!" # Three different test lengths are passed in through the command line. @@ -2279,7 +2285,7 @@ def runTests(testLength = "short"): if 'verbosity' in arg: VERBOSITY = int(sys.argv[i+1]) if 'help' in arg: - print "TPTest.py --short|long --seed number|'rand' --verbosity number" + print "TMTest.py --short|long --seed number|'rand' --verbosity number" sys.exit() if "short" in arg: testLength = "short" @@ -2300,13 +2306,13 @@ def runTests(testLength = "short"): numUniquePatterns = 100 nTests = 3 - print "TP tests", testLength, "numUniquePatterns=", numUniquePatterns, "nTests=", nTests, + print "TM tests", testLength, "numUniquePatterns=", numUniquePatterns, "nTests=", nTests, print "seed=", SEED print if testLength == "long": - print 'Testing Python TP' - TPClass = TP + print 'Testing BacktrackingTM' + TMClass = BacktrackingTM runTests(testLength) if testLength != 'long': @@ -2316,7 +2322,7 @@ def runTests(testLength = "short"): # Temporarily turned off so we can investigate checkSynapseConsistency = False - if TEST_CPP_TP: - print 'Testing C++ TP' - TPClass = TP10X2 + if TEST_CPP_TM: + print 'Testing C++ TM' + TMClass = BacktrackingTMCPP runTests(testLength) diff --git a/scripts/profiling/tp_profile.py b/scripts/profiling/tm_profile.py similarity index 74% rename from scripts/profiling/tp_profile.py rename to scripts/profiling/tm_profile.py index 96402fbc62..ea99045306 100644 --- a/scripts/profiling/tp_profile.py +++ b/scripts/profiling/tm_profile.py @@ -19,31 +19,31 @@ # http://numenta.org/licenses/ # ---------------------------------------------------------------------- -## run python -m cProfile --sort cumtime $NUPIC/scripts/profiling/tp_profile.py [nColumns nEpochs] +## run python -m cProfile --sort cumtime $NUPIC/scripts/profiling/tm_profile.py [nColumns nEpochs] import sys import numpy -# chose desired TP implementation to compare: -from nupic.research.TP10X2 import TP10X2 as CppTP -from nupic.research.TP import TP as PyTP +# chose desired TM implementation to compare: +from nupic.research.BacktrackingTMCPP import BacktrackingTMCPP as CppTM +from nupic.research.BacktrackingTM import BacktrackingTM as PyTM -def profileTP(tpClass, tpDim, nRuns): +def profileTM(tmClass, tmDim, nRuns): """ - profiling performance of TemporalPooler (TP) + profiling performance of TemporalMemory (TM) using the python cProfile module and ordered by cumulative time, see how to run on command-line above. - @param tpClass implementation of TP (cpp, py, ..) - @param tpDim number of columns in TP + @param tmClass implementation of TM (cpp, py, ..) + @param tmDim number of columns in TM @param nRuns number of calls of the profiled code (epochs) """ - # create TP instance to measure - tp = tpClass(numberOfCols=tpDim) + # create TM instance to measure + tm = tmClass(numberOfCols=tmDim) # generate input data - data = numpy.random.randint(0, 2, [tpDim, nRuns]).astype('float32') + data = numpy.random.randint(0, 2, [tmDim, nRuns]).astype('float32') for i in xrange(nRuns): # new data every time, this is the worst case performance @@ -51,7 +51,7 @@ def profileTP(tpClass, tpDim, nRuns): d = data[:,i] # the actual function to profile! - tp.compute(d, True) + tm.compute(d, True) @@ -63,4 +63,4 @@ def profileTP(tpClass, tpDim, nRuns): columns=int(sys.argv[1]) epochs=int(sys.argv[2]) - profileTP(CppTP, columns, epochs) + profileTM(CppTM, columns, epochs) diff --git a/scripts/temporal_memory_performance_benchmark.py b/scripts/temporal_memory_performance_benchmark.py index 13987b14c1..a917bd0760 100755 --- a/scripts/temporal_memory_performance_benchmark.py +++ b/scripts/temporal_memory_performance_benchmark.py @@ -318,17 +318,17 @@ def tpComputeFn(instance, encoding, activeBits): name="tm_py") if "tp_py" in args.implementations: - import nupic.research.TP + import nupic.research.BacktrackingTM benchmark.addContestant( - nupic.research.TP.TP, + nupic.research.BacktrackingTM.BacktrackingTM, paramsFn=tmParamsFn, computeFn=tpComputeFn, name="tp_py") if "tp_cpp" in args.implementations: - import nupic.research.TP10X2 + import nupic.research.BacktrackingTMCPP benchmark.addContestant( - nupic.research.TP10X2.TP10X2, + nupic.research.BacktrackingTMCPP.BacktrackingTMCPP, paramsFn=tmParamsFn, computeFn=tpComputeFn, name="tp_cpp") diff --git a/src/nupic/algorithms/KNNClassifier.py b/src/nupic/algorithms/KNNClassifier.py index 2b27ac06d8..4ab5853e8a 100755 --- a/src/nupic/algorithms/KNNClassifier.py +++ b/src/nupic/algorithms/KNNClassifier.py @@ -150,7 +150,7 @@ class labels even if distThreshold is 0. Should be True for online learning :param cellsPerCol: (int) If >= 1, input is assumed to be organized into - columns, in the same manner as the temporal pooler AND whenever a new + columns, in the same manner as the temporal memory AND whenever a new prototype is stored, only the start cell (first cell) is stored in any bursting column diff --git a/src/nupic/data/generators/data_generator.py b/src/nupic/data/generators/data_generator.py index 0927aa0476..96c9b64ab7 100644 --- a/src/nupic/data/generators/data_generator.py +++ b/src/nupic/data/generators/data_generator.py @@ -33,7 +33,7 @@ class DataGenerator(): """The DataGenerator provides a framework for generating, encoding, saving and exporting records. Each column of the output contains records with a specific set of parameters such as encoderType, n, w, etc. This interface - is intended to be used for testing the spatial pooler, temporal pooler and + is intended to be used for testing the spatial pooler, temporal memory and for generating artificial datasets. """ diff --git a/src/nupic/engine/__init__.py b/src/nupic/engine/__init__.py index 0817a61461..b3bb89f318 100644 --- a/src/nupic/engine/__init__.py +++ b/src/nupic/engine/__init__.py @@ -54,7 +54,7 @@ ("nupic.regions.SPRegion", "SPRegion"), ("nupic.regions.SVMClassifierNode", "SVMClassifierNode"), ("nupic.regions.TestRegion", "TestRegion"), - ("nupic.regions.TPRegion", "TPRegion"), + ("nupic.regions.TMRegion", "TMRegion"), ("nupic.regions.UnimportableNode", "UnimportableNode"), ) diff --git a/src/nupic/frameworks/opf/htm_prediction_model.py b/src/nupic/frameworks/opf/htm_prediction_model.py index 514cffdc7c..17d0f112a8 100644 --- a/src/nupic/frameworks/opf/htm_prediction_model.py +++ b/src/nupic/frameworks/opf/htm_prediction_model.py @@ -142,9 +142,9 @@ def __init__(self, are passed to the spatial pooler. trainSPNetOnlyIfRequested: If set, don't create an SP network unless the user requests SP metrics. - tmEnable: Whether to use a temporal pooler. - tmParams: A dictionary specifying the temporal pooler parameters. These - are passed to the temporal pooler. + tmEnable: Whether to use a temporal memory. + tmParams: A dictionary specifying the temporal memory parameters. These + are passed to the temporal memory. clEnable: Whether to use the classifier. If false, the classifier will not be created and no predictions will be generated. clParams: A dictionary specifying the classifier parameters. These are @@ -177,11 +177,11 @@ def __init__(self, self._maxPredictionsPerStep = maxPredictionsPerStep # set up learning parameters (note: these may be replaced via - # enable/disable//SP/TP//Learning methods) + # enable/disable//SP/TM//Learning methods) self.__spLearningEnabled = bool(spEnable) self.__tpLearningEnabled = bool(tmEnable) - # Explicitly exclude the TP if this type of inference doesn't require it + # Explicitly exclude the TM if this type of inference doesn't require it if not InferenceType.isTemporal(self.getInferenceType()) \ or self.getInferenceType() == InferenceType.NontemporalMultiStep: tmEnable = False @@ -246,7 +246,7 @@ def resetSequenceStates(self): """ if self._hasTP: - # Reset TP's sequence states + # Reset TM's sequence states self._getTPRegion().executeCommand(['resetSequenceStates']) self.__logger.debug("HTMPredictionModel.resetSequenceStates(): reset temporal " @@ -273,10 +273,10 @@ def finishLearning(self): "HTMPredictionModel.finishLearning(): finished SP learning") if self._hasTP: - # Finish temporal network's TP learning + # Finish temporal network's TM learning self._getTPRegion().executeCommand(['finishLearning']) self.__logger.debug( - "HTMPredictionModel.finishLearning(): finished TP learning") + "HTMPredictionModel.finishLearning(): finished TM learning") self.__spLearningEnabled = self.__tpLearningEnabled = False self.__finishedLearning = True @@ -490,8 +490,8 @@ def _spCompute(self): def _tpCompute(self): - tp = self._getTPRegion() - if tp is None: + tm = self._getTPRegion() + if tm is None: return if (self.getInferenceType() == InferenceType.TemporalAnomaly or @@ -500,12 +500,12 @@ def _tpCompute(self): else: topDownCompute = False - tp = self._getTPRegion() - tp.setParameter('topDownMode', topDownCompute) - tp.setParameter('inferenceMode', self.isInferenceEnabled()) - tp.setParameter('learningMode', self.isLearningEnabled()) - tp.prepareInputs() - tp.compute() + tm = self._getTPRegion() + tm.setParameter('topDownMode', topDownCompute) + tm.setParameter('inferenceMode', self.isInferenceEnabled()) + tm.setParameter('learningMode', self.isLearningEnabled()) + tm.prepareInputs() + tm.compute() def _isReconstructionModel(self): @@ -534,8 +534,8 @@ def _isClassificationModel(self): def _multiStepCompute(self, rawInput): patternNZ = None if self._getTPRegion() is not None: - tp = self._getTPRegion() - tpOutput = tp.getSelf()._tfdr.infActiveState['t'] + tm = self._getTPRegion() + tpOutput = tm.getSelf()._tfdr.infActiveState['t'] patternNZ = tpOutput.reshape(-1).nonzero()[0] elif self._getSPRegion() is not None: sp = self._getSPRegion() @@ -547,7 +547,7 @@ def _multiStepCompute(self, rawInput): patternNZ = sensorOutput.nonzero()[0] else: raise RuntimeError("Attempted to make multistep prediction without" - "TP, SP, or Sensor regions") + "TM, SP, or Sensor regions") inputTSRecordIdx = rawInput.get('_timestampRecordIdx') return self._handleCLAClassifierMultiStep( @@ -627,7 +627,7 @@ def _anomalyCompute(self): score = sp.getOutputData("anomalyScore")[0] #TODO move from SP to Anomaly ? elif inferenceType == InferenceType.TemporalAnomaly: - tp = self._getTPRegion() + tm = self._getTPRegion() if sp is not None: activeColumns = sp.getOutputData("bottomUpOut").nonzero()[0] @@ -642,7 +642,7 @@ def _anomalyCompute(self): ) # Calculate the anomaly score using the active columns # and previous predicted columns. - score = tp.getOutputData("anomalyScore")[0] + score = tm.getOutputData("anomalyScore")[0] # Calculate the classifier's output and use the result as the anomaly # label. Stores as string of results. @@ -667,7 +667,7 @@ def _handleCLAClassifierMultiStep(self, patternNZ, """ Handle the CLA Classifier compute logic when implementing multi-step prediction. This is where the patternNZ is associated with one of the other fields from the dataset 0 to N steps in the future. This method is - used by each type of network (encoder only, SP only, SP +TP) to handle the + used by each type of network (encoder only, SP only, SP +TM) to handle the compute logic through the CLA Classifier. It fills in the inference dict with the results of the compute. @@ -1008,9 +1008,9 @@ def _getSPRegion(self): def _getTPRegion(self): """ - Returns reference to the network's TP region + Returns reference to the network's TM region """ - return self._netInfo.net.regions.get('TP', None) + return self._netInfo.net.regions.get('TM', None) def _getSensorRegion(self): @@ -1082,8 +1082,8 @@ def __createCLANetwork(self, sensorParams, spEnable, spParams, tmEnable, enabledEncoders.pop(name) # Disabled encoders are encoders that are fed to CLAClassifierRegion but not - # SP or TP Regions. This is to handle the case where the predicted field - # is not fed through the SP/TP. We typically just have one of these now. + # SP or TM Regions. This is to handle the case where the predicted field + # is not fed through the SP/TM. We typically just have one of these now. disabledEncoders = copy.deepcopy(sensorParams['encoders']) for name, params in disabledEncoders.items(): if params is None: @@ -1130,21 +1130,21 @@ def __createCLANetwork(self, sensorParams, spEnable, spParams, tmEnable, assert tmParams['columnCount'] == prevRegionWidth tmParams['inputWidth'] = tmParams['columnCount'] - self.__logger.debug("Adding TPRegion; tmParams: %r" % tmParams) - n.addRegion("TP", "py.TPRegion", json.dumps(tmParams)) + self.__logger.debug("Adding TMRegion; tmParams: %r" % tmParams) + n.addRegion("TM", "py.TMRegion", json.dumps(tmParams)) - # Link TP region - n.link(prevRegion, "TP", "UniformLink", "") + # Link TM region + n.link(prevRegion, "TM", "UniformLink", "") if prevRegion != "sensor": - n.link("TP", prevRegion, "UniformLink", "", srcOutput="topDownOut", + n.link("TM", prevRegion, "UniformLink", "", srcOutput="topDownOut", destInput="topDownIn") else: - n.link("TP", prevRegion, "UniformLink", "", srcOutput="topDownOut", + n.link("TM", prevRegion, "UniformLink", "", srcOutput="topDownOut", destInput="temporalTopDownIn") - n.link("sensor", "TP", "UniformLink", "", srcOutput="resetOut", + n.link("sensor", "TM", "UniformLink", "", srcOutput="resetOut", destInput="resetIn") - prevRegion = "TP" + prevRegion = "TM" prevRegionWidth = tmParams['inputWidth'] if clEnable and clParams is not None: @@ -1303,7 +1303,7 @@ def read(cls, proto): network = Network.read(proto.network) spEnable = ("SP" in network.regions) - tmEnable = ("TP" in network.regions) + tmEnable = ("TM" in network.regions) clEnable = ("Classifier" in network.regions) model = cls(spEnable=spEnable, @@ -1412,7 +1412,7 @@ def _deSerializeExtraData(self, extraDataDir): self._classifier_helper.saved_categories) self._getAnomalyClassifier().getSelf()._knnclassifier = knnRegion - # Set TP to output neccessary information + # Set TM to output neccessary information self._getTPRegion().setParameter('anomalyMode', True) # Remove old classifier_helper @@ -1439,7 +1439,7 @@ def _addAnomalyClassifierRegion(self, network, params, spEnable, tmEnable): network - network to add the AnomalyClassifier region params - parameters to pass to the region spEnable - True if network has an SP region - tmEnable - True if network has a TP region; Currently requires True + tmEnable - True if network has a TM region; Currently requires True """ allParams = copy.deepcopy(params) @@ -1475,14 +1475,14 @@ def _addAnomalyClassifierRegion(self, network, params, spEnable, tmEnable): network.link("sensor", "AnomalyClassifier", "UniformLink", "", srcOutput="dataOut", destInput="spBottomUpOut") - # Attach link to TP + # Attach link to TM if tmEnable: - network.link("TP", "AnomalyClassifier", "UniformLink", "", + network.link("TM", "AnomalyClassifier", "UniformLink", "", srcOutput="topDownOut", destInput="tpTopDownOut") - network.link("TP", "AnomalyClassifier", "UniformLink", "", + network.link("TM", "AnomalyClassifier", "UniformLink", "", srcOutput="lrnActiveStateT", destInput="tpLrnActiveStateT") else: - raise RuntimeError("TemporalAnomaly models require a TP region.") + raise RuntimeError("TemporalAnomaly models require a TM region.") def __getNetworkStateDirectory(self, extraDataDir): diff --git a/src/nupic/frameworks/opf/htm_prediction_model_callbacks.py b/src/nupic/frameworks/opf/htm_prediction_model_callbacks.py index 1ad4b1b060..75800fd604 100644 --- a/src/nupic/frameworks/opf/htm_prediction_model_callbacks.py +++ b/src/nupic/frameworks/opf/htm_prediction_model_callbacks.py @@ -85,7 +85,7 @@ def htmPredictionModelControlEnableTPLearningCb(htmPredictionModel): def htmPredictionModelControlDisableTPLearningCb(htmPredictionModel): """ Disables learning in the HTMPredictionModel's Temporal Pooler, while - retaining the ability to re-enable TP learning in the future. + retaining the ability to re-enable TM learning in the future. See also: htmPredictionModelControlEnableTPLearningCb. See also: model_callbacks.modelControlFinishLearningCb. @@ -143,11 +143,11 @@ def __call__(self, htmPredictionModel): class HTMPredictionModelPickleTPInitArgs(object): - """ Saves TP10X2 initialization args + """ Saves BacktrackingTMCPP initialization args """ def __init__(self, filePath): """ - filePath: path of file where TP __init__ args are to be saved + filePath: path of file where TM __init__ args are to be saved """ self.__filePath = filePath @@ -159,7 +159,7 @@ def __call__(self, htmPredictionModel): import pickle - # Get the TP args dictionary + # Get the TM args dictionary assert isinstance(htmPredictionModel, HTMPredictionModel) tpRegion = htmPredictionModel._getTPRegion().getSelf() diff --git a/src/nupic/frameworks/opf/htm_prediction_model_classifier_helper.py b/src/nupic/frameworks/opf/htm_prediction_model_classifier_helper.py index 5a95d4b91b..e01fa1db68 100644 --- a/src/nupic/frameworks/opf/htm_prediction_model_classifier_helper.py +++ b/src/nupic/frameworks/opf/htm_prediction_model_classifier_helper.py @@ -43,7 +43,7 @@ class HTMPredictionModelClassifierHelper(object): This class implements a record classifier used to classify prediction records. It currently depends on the KNN classifier within the parent model. - Currently it is classifying based on SP / TP properties and has a sliding + Currently it is classifying based on SP / TM properties and has a sliding window of 1000 records. The model should call the compute() method for each iteration that will be @@ -442,12 +442,12 @@ def _constructClassificationRecord(self): htm_prediction_model of this classifier. ***This will look into the internals of the model and may depend on the - SP, TP, and KNNClassifier*** + SP, TM, and KNNClassifier*** """ model = self.htm_prediction_model sp = model._getSPRegion() - tp = model._getTPRegion() - tpImp = tp.getSelf()._tfdr + tm = model._getTPRegion() + tpImp = tm.getSelf()._tfdr # Count the number of unpredicted columns activeColumns = sp.getOutputData("bottomUpOut").nonzero()[0] @@ -455,19 +455,19 @@ def _constructClassificationRecord(self): score = (self._activeColumnCount - score)/float(self._activeColumnCount) spSize = sp.getParameter('activeOutputCount') - tpSize = tp.getParameter('cellsPerColumn') * tp.getParameter('columnCount') + tpSize = tm.getParameter('cellsPerColumn') * tm.getParameter('columnCount') classificationVector = numpy.array([]) if self._vectorType == 'tpc': - # Classification Vector: [---TP Cells---] + # Classification Vector: [---TM Cells---] classificationVector = numpy.zeros(tpSize) activeCellMatrix = tpImp.getLearnActiveStateT().reshape(tpSize, 1) activeCellIdx = numpy.where(activeCellMatrix > 0)[0] if activeCellIdx.shape[0] > 0: classificationVector[numpy.array(activeCellIdx, dtype=numpy.uint16)] = 1 elif self._vectorType == 'sp_tpe': - # Classification Vecotr: [---SP---|---(TP-SP)----] + # Classification Vecotr: [---SP---|---(TM-SP)----] classificationVector = numpy.zeros(spSize+spSize) if activeColumns.shape[0] > 0: classificationVector[activeColumns] = 1.0 @@ -483,7 +483,7 @@ def _constructClassificationRecord(self): # Store the state for next time step numPredictedCols = len(self._prevPredictedColumns) - predictedColumns = tp.getOutputData("topDownOut").nonzero()[0] + predictedColumns = tm.getOutputData("topDownOut").nonzero()[0] self._prevPredictedColumns = copy.deepcopy(predictedColumns) if self._anomalyVectorLength is None: diff --git a/src/nupic/frameworks/opf/opf_utils.py b/src/nupic/frameworks/opf/opf_utils.py index 5b0ca2ef03..8f7023de30 100644 --- a/src/nupic/frameworks/opf/opf_utils.py +++ b/src/nupic/frameworks/opf/opf_utils.py @@ -169,7 +169,7 @@ class InferenceType(Enum("TemporalNextStep", @staticmethod def isTemporal(inferenceType): """ Returns True if the inference type is 'temporal', i.e. requires a - temporal pooler in the network. + temporal memory in the network. """ if InferenceType.__temporalInferenceTypes is None: InferenceType.__temporalInferenceTypes = \ diff --git a/src/nupic/regions/KNNAnomalyClassifierRegion.py b/src/nupic/regions/KNNAnomalyClassifierRegion.py index d921476d7b..9ce73618b7 100644 --- a/src/nupic/regions/KNNAnomalyClassifierRegion.py +++ b/src/nupic/regions/KNNAnomalyClassifierRegion.py @@ -85,7 +85,7 @@ def getSpec(cls): requireSplitterMap=False), tpLrnActiveStateT=dict( - description="""Active cells in the learn state at time T from TP. + description="""Active cells in the learn state at time T from TM. This is used to classify on.""", dataType='Real32', count=0, @@ -135,7 +135,7 @@ def getSpec(cls): classificationVectorType=dict( description="""Vector type to use when classifying. - 1 - Vector Column with Difference (TP and SP) + 1 - Vector Column with Difference (TM and SP) """, dataType='UInt32', count=1, @@ -400,8 +400,8 @@ def constructClassificationRecord(self, inputs): passed in through the inputs. Types for self.classificationVectorType: - 1 - TP active cells in learn state - 2 - SP columns concatenated with error from TP column predictions and SP + 1 - TM active cells in learn state + 2 - SP columns concatenated with error from TM column predictions and SP """ # Count the number of unpredicted columns allSPColumns = inputs["spBottomUpOut"] @@ -419,14 +419,14 @@ def constructClassificationRecord(self, inputs): classificationVector = numpy.array([]) if self.classificationVectorType == 1: - # Classification Vector: [---TP Cells---] + # Classification Vector: [---TM Cells---] classificationVector = numpy.zeros(tpSize) activeCellMatrix = inputs["tpLrnActiveStateT"].reshape(tpSize, 1) activeCellIdx = numpy.where(activeCellMatrix > 0)[0] if activeCellIdx.shape[0] > 0: classificationVector[numpy.array(activeCellIdx, dtype=numpy.uint16)] = 1 elif self.classificationVectorType == 2: - # Classification Vecotr: [---SP---|---(TP-SP)----] + # Classification Vecotr: [---SP---|---(TM-SP)----] classificationVector = numpy.zeros(spSize+spSize) if activeSPColumns.shape[0] > 0: classificationVector[activeSPColumns] = 1.0 diff --git a/src/nupic/regions/KNNClassifierRegion.py b/src/nupic/regions/KNNClassifierRegion.py index a4526ac141..34e3009575 100644 --- a/src/nupic/regions/KNNClassifierRegion.py +++ b/src/nupic/regions/KNNClassifierRegion.py @@ -423,7 +423,7 @@ def getSpec(cls): cellsPerCol=dict( description='If >= 1, we assume the input is organized into columns, ' - 'in the same manner as the temporal pooler AND ' + 'in the same manner as the temporal memory AND ' 'whenever we store a new prototype, we only store the ' 'start cell (first cell) in any column which is bursting.' 'colum ', diff --git a/src/nupic/regions/RecordSensor.py b/src/nupic/regions/RecordSensor.py index b49333670c..2ed21c856e 100644 --- a/src/nupic/regions/RecordSensor.py +++ b/src/nupic/regions/RecordSensor.py @@ -166,7 +166,7 @@ def getSpec(cls): isDefaultOutput=False), temporalTopDownOut=dict( description="The top-down output signal, generated from " - "feedback from TP through SP", + "feedback from TM through SP", dataType='Real32', count=0, regionLevel=True, @@ -184,7 +184,7 @@ def getSpec(cls): requireSplitterMap=False), temporalTopDownIn=dict( description="The top-down input signal, generated from " - "feedback from TP through SP", + "feedback from TM through SP", dataType='Real32', count=0, required=False, @@ -481,7 +481,7 @@ def compute(self, inputs, outputs): # ======================================================================== ## TODO: Add temporal top-down loop - # We get the temporal pooler's topDownOut passed through the spatial + # We get the temporal memory's topDownOut passed through the spatial # pooler as temporalTopDownIn temporalTopDownIn = inputs['temporalTopDownIn'] temporalTopDownOut = self.encoder.topDownCompute(temporalTopDownIn) diff --git a/src/nupic/regions/SPRegion.py b/src/nupic/regions/SPRegion.py index 39842c7157..617dba90f4 100644 --- a/src/nupic/regions/SPRegion.py +++ b/src/nupic/regions/SPRegion.py @@ -84,7 +84,7 @@ def _buildArgs(f, self=None, kwargs={}): init = SPRegion.__init__ ourArgNames = [t[0] for t in getArgumentDescriptions(init)] # Also remove a few other names that aren't in our constructor but are - # computed automatically (e.g. numberOfCols for the TP) + # computed automatically (e.g. numberOfCols for the TM) # TODO: where does numberOfCols come into SPRegion? ourArgNames += [ 'numberOfCols', @@ -710,7 +710,7 @@ def getBaseSpec(cls): spatialTopDownOut = dict( description="""The top-down output, generated only from the current SP output. This can be used to evaluate how well the - SP is representing the inputs independent of the TP.""", + SP is representing the inputs independent of the TM.""", dataType='Real32', count=0, regionLevel=True, @@ -718,7 +718,7 @@ def getBaseSpec(cls): temporalTopDownOut = dict( description="""The top-down output, generated only from the current - TP output feedback down through the SP.""", + TM output feedback down through the SP.""", dataType='Real32', count=0, regionLevel=True, diff --git a/src/nupic/regions/TPRegion.py b/src/nupic/regions/TMRegion.py similarity index 93% rename from src/nupic/regions/TPRegion.py rename to src/nupic/regions/TMRegion.py index c6bdbce1c8..bd6123f727 100644 --- a/src/nupic/regions/TPRegion.py +++ b/src/nupic/regions/TMRegion.py @@ -23,9 +23,9 @@ import numpy from nupic.algorithms import anomaly -from nupic.research import TP -from nupic.research import TP10X2 -from nupic.research import TP_shim +from nupic.research import BacktrackingTM +from nupic.research import BacktrackingTMCPP +from nupic.research import TM_shim from nupic.support import getArgumentDescriptions from nupic.bindings.regions.PyRegion import PyRegion @@ -38,17 +38,17 @@ def _getTPClass(temporalImp): """ if temporalImp == 'py': - return TP.TP + return BacktrackingTM.BacktrackingTM elif temporalImp == 'cpp': - return TP10X2.TP10X2 + return BacktrackingTMCPP.BacktrackingTMCPP elif temporalImp == 'tm_py': - return TP_shim.TPShim + return TM_shim.TMShim elif temporalImp == 'tm_cpp': - return TP_shim.TPCPPShim + return TM_shim.TMCPPShim elif temporalImp == 'tm_py_fast': - return TP_shim.FastTPShim + return TM_shim.FastTMShim elif temporalImp == 'monitored_tm_py': - return TP_shim.MonitoredTPShim + return TM_shim.MonitoredTMShim else: raise RuntimeError("Invalid temporalImp '%s'. Legal values are: 'py', " "'cpp', 'tm_py', 'monitored_tm_py'" % (temporalImp)) @@ -62,7 +62,7 @@ def _buildArgs(f, self=None, kwargs={}): Return a list of 3-tuples with (name, description, defaultValue) for each argument to the function. - Assigns all arguments to the function as instance variables of TPRegion. + Assigns all arguments to the function as instance variables of TMRegion. If the argument was not provided, uses the default value. Pops any values from kwargs that go to the function. @@ -76,12 +76,12 @@ def _buildArgs(f, self=None, kwargs={}): # __init__'s signature will be just (self, *args, **kw), but # _original_init is created with the original signature #init = getattr(self, '_original_init', self.__init__) - init = TPRegion.__init__ + init = TMRegion.__init__ ourArgNames = [t[0] for t in getArgumentDescriptions(init)] # Also remove a few other names that aren't in our constructor but are - # computed automatically (e.g. numberOfCols for the TP) + # computed automatically (e.g. numberOfCols for the TM) ourArgNames += [ - 'numberOfCols', # TP + 'numberOfCols', # TM ] for argTuple in argTuples[:]: if argTuple[0] in ourArgNames: @@ -114,7 +114,7 @@ def _getAdditionalSpecs(temporalImp, kwargs={}): to 'Byte' for None and complex types Determines the spatial parameters based on the selected implementation. - It defaults to TemporalPooler. + It defaults to TemporalMemory. Determines the temporal parameters based on the temporalImp """ typeNames = {int: 'UInt32', float: 'Real32', str: 'Byte', bool: 'bool', tuple: 'tuple'} @@ -138,7 +138,7 @@ def getConstraints(arg): else: return '' - # Build up parameters from temporal pooler's constructor + # Build up parameters from temporal memory's constructor TemporalClass = _getTPClass(temporalImp) tArgTuples = _buildArgs(TemporalClass.__init__) temporalSpec = {} @@ -168,7 +168,7 @@ def getConstraints(arg): constraints=''), inputWidth=dict( - description='Number of inputs to the TP.', + description='Number of inputs to the TM.', accessMode='Read', dataType='UInt32', count=1, @@ -183,7 +183,7 @@ def getConstraints(arg): orColumnOutputs=dict( description="""OR together the cell outputs from each column to produce - the temporal pooler output. When this mode is enabled, the number of + the temporal memory output. When this mode is enabled, the number of cells per column must also be specified and the output size of the region should be set the same as columnCount""", accessMode='Read', @@ -192,7 +192,7 @@ def getConstraints(arg): constraints='bool'), cellsSavePath=dict( - description="""Optional path to file in which large temporal pooler cells + description="""Optional path to file in which large temporal memory cells data structure is to be saved.""", accessMode='ReadWrite', dataType='Byte', @@ -200,7 +200,7 @@ def getConstraints(arg): constraints=''), temporalImp=dict( - description="""Which temporal pooler implementation to use. Set to either + description="""Which temporal memory implementation to use. Set to either 'py' or 'cpp'. The 'cpp' implementation is optimized for speed in C++.""", accessMode='ReadWrite', dataType='Byte', @@ -278,31 +278,31 @@ def getConstraints(arg): -class TPRegion(PyRegion): +class TMRegion(PyRegion): """ - TPRegion is designed to implement the temporal pooler compute for a given + TMRegion is designed to implement the temporal memory compute for a given CLA level. - Uses a subclass of TP to do most of the work. The specific TP implementation + Uses a subclass of TM to do most of the work. The specific TM implementation is specified using the temporalImp parameter. Automatic parameter handling: Parameter names, default values, and descriptions are retrieved automatically - from the temporal pooler class. Thus, there are only a few hardcoded + from the temporal memory class. Thus, there are only a few hardcoded arguments in __init__, and the rest are passed to the appropriate underlying class. The RegionSpec is mostly built automatically from these parameters. - If you add a parameter to a TP class, it will be exposed through TPRegion - automatically as if it were in TPRegion.__init__, with the right default + If you add a parameter to a TM class, it will be exposed through TMRegion + automatically as if it were in TMRegion.__init__, with the right default value. Add an entry in the __init__ docstring for it too, and that will be - brought into the RegionSpec. TPRegion will maintain the parameter as its own - instance variable and also pass it to the temporal pooler instance. If the - parameter is changed, TPRegion will propagate the change. + brought into the RegionSpec. TMRegion will maintain the parameter as its own + instance variable and also pass it to the temporal memory instance. If the + parameter is changed, TMRegion will propagate the change. If you want to do something different with the parameter, add it as an - argument into TPRegion.__init__, which will override all the default handling. + argument into TMRegion.__init__, which will override all the default handling. """ @@ -313,7 +313,7 @@ def __init__(self, cellsPerColumn, # Number of cells per column, required # Constructor arguments are picked up automatically. There is no - # need to add them anywhere in TPRegion, unless you need to do + # need to add them anywhere in TMRegion, unless you need to do # something special with them. See docstring above. orColumnOutputs=False, @@ -329,7 +329,7 @@ def __init__(self, # Make a list of automatic temporal arg names for later use # Pull out the temporal arguments automatically - # These calls whittle down kwargs and create instance variables of TPRegion + # These calls whittle down kwargs and create instance variables of TMRegion tArgTuples = _buildArgs(TemporalClass.__init__, self, kwargs) self._temporalArgNames = [t[0] for t in tArgTuples] @@ -355,7 +355,7 @@ def __init__(self, self.breakPdb = False self.breakKomodo = False - # TPRegion only, or special handling + # TMRegion only, or special handling self.orColumnOutputs = orColumnOutputs self.temporalImp = temporalImp @@ -415,7 +415,7 @@ class the opportunity to do the same by invoking the def initialize(self): - # Allocate appropriate temporal pooler object + # Allocate appropriate temporal memory object # Retrieve the necessary extra arguments that were handled automatically autoArgs = dict((name, getattr(self, name)) for name in self._temporalArgNames) @@ -444,7 +444,7 @@ def initialize(self): ############################################################################# def compute(self, inputs, outputs): """ - Run one iteration of TPRegion's compute, profiling it if requested. + Run one iteration of TMRegion's compute, profiling it if requested. The guts of the compute are contained in the _compute() call so that we can profile it if requested. @@ -481,7 +481,7 @@ def compute(self, inputs, outputs): def _compute(self, inputs, outputs): """ - Run one iteration of TPRegion's compute + Run one iteration of TMRegion's compute """ #if self.topDownMode and (not 'topDownIn' in inputs): @@ -489,7 +489,7 @@ def _compute(self, inputs, outputs): # "topDownMode is True") if self._tfdr is None: - raise RuntimeError("TP has not been initialized") + raise RuntimeError("TM has not been initialized") # Conditional compute break self._conditionalBreak() @@ -522,7 +522,7 @@ def _compute(self, inputs, outputs): tpOutput= tpOutput.reshape(self.columnCount, self.cellsPerColumn).max(axis=1) - # Direct logging of non-zero TP outputs + # Direct logging of non-zero TM outputs if self._fpLogTPOutput: output = tpOutput.reshape(-1) outputNZ = tpOutput.nonzero()[0] @@ -567,12 +567,12 @@ def _compute(self, inputs, outputs): ############################################################################# @classmethod def getBaseSpec(cls): - """Return the base Spec for TPRegion. + """Return the base Spec for TMRegion. Doesn't include the spatial, temporal and other parameters """ spec = dict( - description=TPRegion.__doc__, + description=TMRegion.__doc__, singleNodeOnly=True, inputs=dict( bottomUpIn=dict( @@ -683,7 +683,7 @@ def getBaseSpec(cls): @classmethod def getSpec(cls): - """Return the Spec for TPRegion. + """Return the Spec for TMRegion. The parameters collection is constructed based on the parameters specified by the variosu components (spatialSpec, temporalSpec and otherSpec) @@ -751,7 +751,7 @@ def resetSequenceStates(self): """ Resets the region's sequence states """ #print "#############" - #print "############# TPRegion: got resetSequenceStates() call" + #print "############# TMRegion: got resetSequenceStates() call" #print "#############" self._tfdr.reset() @@ -764,7 +764,7 @@ def finishLearning(self): all potential inputs to each column. """ if self._tfdr is None: - raise RuntimeError("Temporal pooler has not been initialized") + raise RuntimeError("Temporal memory has not been initialized") if hasattr(self._tfdr, 'finishLearning'): self.resetSequenceStates() diff --git a/src/nupic/research/TP.py b/src/nupic/research/BacktrackingTM.py similarity index 97% rename from src/nupic/research/TP.py rename to src/nupic/research/BacktrackingTM.py index 6dc00ea547..48eb580693 100644 --- a/src/nupic/research/TP.py +++ b/src/nupic/research/BacktrackingTM.py @@ -19,9 +19,9 @@ # http://numenta.org/licenses/ # ---------------------------------------------------------------------- -""" @file TP.py +""" @file BacktrackingTM.py -Temporal pooler implementation. +Temporal memory implementation. This is the Python implementation and is used as the base class for the C++ implementation. @@ -41,17 +41,17 @@ # Default verbosity while running unit tests VERBOSITY = 0 -# The current TP version used to track the checkpoint state. -TP_VERSION = 1 +# The current TM version used to track the checkpoint state. +TM_VERSION = 1 # The numpy equivalent to the floating point type used by NTA dtype = GetNTAReal() -class TP(ConsolePrinterMixin): +class BacktrackingTM(ConsolePrinterMixin): """ - Class implementing the temporal pooler algorithm as described in the + Class implementing the temporal memory algorithm as described in the published Cortical Learning Algorithm documentation. The implementation here attempts to closely match the pseudocode in the documentation. This implementation does contain several additional bells and whistles such as @@ -90,7 +90,7 @@ def __init__(self, outputType='normal', ): """ - Construct the TP + Construct the TM @param pamLength Number of time steps to remain in "Pay Attention Mode" after we detect we've reached the end of a learned sequence. Setting @@ -111,13 +111,13 @@ def __init__(self, only do the global decay loop every maxAge iterations. The default (maxAge=1) reverts to the behavior where global decay is applied every iteration to every segment. Using maxAge > 1 - can significantly speed up the TP when global decay is used. + can significantly speed up the TM when global decay is used. @param maxSeqLength If not 0, we will never learn more than maxSeqLength inputs in a row without starting over at start cells. This sets an upper bound on the length of learned sequences and thus is another means (besides maxAge and globalDecay) by which to - limit how much the TP tries to learn. + limit how much the TM tries to learn. @param maxSegmentsPerCell The maximum number of segments allowed on a cell. This is used to turn on "fixed size CLA" mode. When in effect, @@ -150,7 +150,7 @@ def __init__(self, """ ## @todo document - self.version = TP_VERSION + self.version = TM_VERSION ConsolePrinterMixin.__init__(self, verbosity) @@ -163,7 +163,7 @@ def __init__(self, assert (globalDecay == 0.0) assert (maxAge == 0) - assert maxSynapsesPerSegment >= newSynapseCount, ("TP requires that " + assert maxSynapsesPerSegment >= newSynapseCount, ("TM requires that " "maxSynapsesPerSegment >= newSynapseCount. (Currently %s >= %s)" % ( maxSynapsesPerSegment, newSynapseCount)) @@ -261,7 +261,7 @@ def __init__(self, self.pamCounter = self.pamLength - ## If True, the TP will compute a signature for each sequence + ## If True, the TM will compute a signature for each sequence self.collectSequenceStats = False ## This gets set when we receive a reset and cleared on the first compute @@ -396,11 +396,11 @@ def __setstate__(self, state): self.setRandomState(state['_random']) del state['_random'] self.__dict__.update(state) - # Check the version of the checkpointed TP and update it to the current + # Check the version of the checkpointed TM and update it to the current # version if necessary. if not hasattr(self, 'version'): self._initEphemerals() - self.version = TP_VERSION + self.version = TM_VERSION def __getattr__(self, name): @@ -420,26 +420,26 @@ def __getattr__(self, name): we'll just return what it gives us. """ try: - return super(TP, self).__getattr__(name) + return super(BacktrackingTM, self).__getattr__(name) except AttributeError: - raise AttributeError("'TP' object has no attribute '%s'" % name) + raise AttributeError("'TM' object has no attribute '%s'" % name) def __del__(self): pass - def __ne__(self, tp): - return not self == tp + def __ne__(self, tm): + return not self == tm - def __eq__(self, tp): - return not self.diff(tp) + def __eq__(self, tm): + return not self.diff(tm) - def diff(self, tp): + def diff(self, tm): diff = [] - toCheck = [((), self.__getstate__(), tp.__getstate__())] + toCheck = [((), self.__getstate__(), tm.__getstate__())] while toCheck: keys, a, b = toCheck.pop() if type(a) != type(b): @@ -486,14 +486,14 @@ def getLearnActiveStateT(self): def saveToFile(self, filePath): """ - Implemented in TP10X2.TP10X2.saveToFile + Implemented in BacktrackingTMCPP.BacktrackingTMCPP.saveToFile """ pass def loadFromFile(self, filePath): """ - Implemented in TP10X2.TP10X2.loadFromFile + Implemented in BacktrackingTMCPP.BacktrackingTMCPP.loadFromFile """ pass @@ -874,7 +874,7 @@ def printInput(self, x): def printParameters(self): """ - Print the parameter settings for the TP. + Print the parameter settings for the TM. """ print "numberOfCols=", self.numberOfCols print "cellsPerColumn=", self.cellsPerColumn @@ -999,8 +999,8 @@ def printComputeEnd(self, output, learn=False): print elif self.verbosity >= 1: - print "TP: learn:", learn - print "TP: active outputs(%d):" % len(output.nonzero()[0]), + print "TM: learn:", learn + print "TM: active outputs(%d):" % len(output.nonzero()[0]), self.printActiveIndices(output.reshape(self.numberOfCols, self.cellsPerColumn)) @@ -1237,7 +1237,7 @@ def computeOutput(self): dtype='float32') # Turn on the most confident cell in each column. Note here that - # Columns refers to TP columns, even though each TP column is a row + # Columns refers to TM columns, even though each TM column is a row # in the numpy array. numCols = self.currentOutput.shape[0] self.currentOutput[(xrange(numCols), mostActiveCellPerCol)] = 1 @@ -1263,7 +1263,7 @@ def computeOutput(self): def getActiveState(self): """ Return the current active state. This is called by the node to - obtain the sequence output of the TP. + obtain the sequence output of the TM. """ # TODO: This operation can be sped up by making activeState of # type 'float32' up front. @@ -1288,24 +1288,24 @@ def getPredictedState(self): def predict(self, nSteps): """ This function gives the future predictions for timesteps starting - from the current TP state. The TP is returned to its original state at the + from the current TM state. The TM is returned to its original state at the end before returning. - -# We save the TP state. + -# We save the TM state. -# Loop for nSteps -# Turn-on with lateral support from the current active cells -# Set the predicted cells as the next step's active cells. This step in learn and infer methods use input here to correct the predictions. We don't use any input here. - -# Revert back the TP state to the time before prediction + -# Revert back the TM state to the time before prediction @param nSteps The number of future time steps to be predicted @returns all the future predictions - a numpy array of type "float32" and shape (nSteps, numberOfCols). - The ith row gives the tp prediction for each column at + The ith row gives the tm prediction for each column at a future timestep (t+i+1). """ - # Save the TP dynamic state, we will use to revert back in the end + # Save the TM dynamic state, we will use to revert back in the end pristineTPDynamicState = self._getTPDynamicState() assert (nSteps>0) @@ -1350,11 +1350,11 @@ def predict(self, nSteps): def _getTPDynamicStateVariableNames(self): """ - Any newly added dynamic states in the TP should be added to this list. + Any newly added dynamic states in the TM should be added to this list. Parameters: -------------------------------------------- - retval: The list of names of TP dynamic state variables. + retval: The list of names of TM dynamic state variables. """ return ["infActiveState", "infPredictedState", @@ -1385,7 +1385,7 @@ def _setTPDynamicState(self, tpDynamicState): dict has all the dynamic state variable names as keys and their values at this instant as values. - We set the dynamic state variables in the tp object with these items. + We set the dynamic state variables in the tm object with these items. """ for variableName in self._getTPDynamicStateVariableNames(): self.__dict__[variableName] = tpDynamicState.pop(variableName) @@ -2391,7 +2391,7 @@ def compute(self, bottomUpInput, enableLearn, computeInfOutput=None): predictedState, self.colConfidence['t-1']) - # Finally return the TP output + # Finally return the TM output output = self.computeOutput() # Print diagnostic information based on the current verbosity level @@ -2448,11 +2448,11 @@ def columnConfidences(self, cellConfidences=None): def topDownCompute(self, topDownIn=None): """ - Top-down compute - generate expected input given output of the TP + Top-down compute - generate expected input given output of the TM @param topDownIn top down input from the level above us - @returns best estimate of the TP input that would have generated bottomUpOut. + @returns best estimate of the TM input that would have generated bottomUpOut. """ # For now, we will assume there is no one above us and that bottomUpOut is # simply the output that corresponds to our currently stored column @@ -2604,18 +2604,18 @@ def checkPrediction2(self, patternNZs, output=None, colConfidence=None, This function produces goodness-of-match scores for a set of input patterns, by checking for their presence in the current and predicted output of the - TP. Returns a global count of the number of extra and missing bits, the + TM. Returns a global count of the number of extra and missing bits, the confidence scores for each input pattern, and (if requested) the - bits in each input pattern that were not present in the TP's prediction. + bits in each input pattern that were not present in the TM's prediction. @param patternNZs a list of input patterns that we want to check for. Each element is a list of the non-zeros in that pattern. - @param output The output of the TP. If not specified, then use the - TP's current output. This can be specified if you are + @param output The output of the TM. If not specified, then use the + TM's current output. This can be specified if you are trying to check the prediction metric for an output from the past. @param colConfidence The column confidences. If not specified, then use the - TP's current self.colConfidence. This can be specified if you + TM's current self.colConfidence. This can be specified if you are trying to check the prediction metrics for an output from the past. @param details if True, also include details of missing bits per pattern. @@ -2939,7 +2939,7 @@ def getSegmentActiveSynapses(self, c, i, s, activeState, newSynapses=False): # that we will need to update. # - pairs represent source (colIdx, cellIdx) of new synapses to create on # the segment - update = TP.SegmentUpdate(c, i, s, activeSynapses) + update = BacktrackingTM.SegmentUpdate(c, i, s, activeSynapses) return update @@ -3142,7 +3142,7 @@ def adaptSegment(self, segUpdate): # (segID, sequenceSegment flag, frequency, positiveActivations, # totalActivations, lastActiveIteration) - newSegment = Segment(tp=self, isSequenceSeg=segUpdate.sequenceSegment) + newSegment = Segment(tm=self, isSequenceSeg=segUpdate.sequenceSegment) # numpy.float32 important so that we can match with C++ for synapse in activeSynapses: @@ -3159,7 +3159,7 @@ def adaptSegment(self, segUpdate): def getSegmentInfo(self, collectActiveData = False): """Returns information about the distribution of segments, synapses and - permanence values in the current TP. If requested, also returns information + permanence values in the current TM. If requested, also returns information regarding the number of currently active segments and synapses. @returns tuple described below: @@ -3259,13 +3259,13 @@ class Segment(object): 0.0000010] - def __init__(self, tp, isSequenceSeg): - self.tp = tp - self.segID = tp.segID - tp.segID += 1 + def __init__(self, tm, isSequenceSeg): + self.tm = tm + self.segID = tm.segID + tm.segID += 1 self.isSequenceSeg = isSequenceSeg - self.lastActiveIteration = tp.lrnIterationIdx + self.lastActiveIteration = tm.lrnIterationIdx self.positiveActivations = 1 self.totalActivations = 1 @@ -3273,8 +3273,8 @@ def __init__(self, tp, isSequenceSeg): # These are internal variables used to compute the positive activations # duty cycle. # Callers should use dutyCycle() - self._lastPosDutyCycle = 1.0 / tp.lrnIterationIdx - self._lastPosDutyCycleIteration = tp.lrnIterationIdx + self._lastPosDutyCycle = 1.0 / tm.lrnIterationIdx + self._lastPosDutyCycleIteration = tm.lrnIterationIdx # Each synapse is a tuple (srcCellCol, srcCellIdx, permanence) self.syns = [] @@ -3290,7 +3290,7 @@ def __eq__(self, s): if set(d1) != set(d2): return False for k, v in d1.iteritems(): - if k in ('tp',): + if k in ('tm',): continue elif v != d2[k]: return False @@ -3336,16 +3336,16 @@ def dutyCycle(self, active=False, readOnly=False): @ref dutyCycleTiers. """ # For tier #0, compute it from total number of positive activations seen - if self.tp.lrnIterationIdx <= self.dutyCycleTiers[1]: + if self.tm.lrnIterationIdx <= self.dutyCycleTiers[1]: dutyCycle = float(self.positiveActivations) \ - / self.tp.lrnIterationIdx + / self.tm.lrnIterationIdx if not readOnly: - self._lastPosDutyCycleIteration = self.tp.lrnIterationIdx + self._lastPosDutyCycleIteration = self.tm.lrnIterationIdx self._lastPosDutyCycle = dutyCycle return dutyCycle # How old is our update? - age = self.tp.lrnIterationIdx - self._lastPosDutyCycleIteration + age = self.tm.lrnIterationIdx - self._lastPosDutyCycleIteration # If it's already up to date, we can returned our cached value. if age == 0 and not active: @@ -3353,7 +3353,7 @@ def dutyCycle(self, active=False, readOnly=False): # Figure out which alpha we're using for tierIdx in range(len(self.dutyCycleTiers)-1, 0, -1): - if self.tp.lrnIterationIdx > self.dutyCycleTiers[tierIdx]: + if self.tm.lrnIterationIdx > self.dutyCycleTiers[tierIdx]: alpha = self.dutyCycleAlphas[tierIdx] break @@ -3364,7 +3364,7 @@ def dutyCycle(self, active=False, readOnly=False): # Update cached values if not read-only if not readOnly: - self._lastPosDutyCycleIteration = self.tp.lrnIterationIdx + self._lastPosDutyCycleIteration = self.tm.lrnIterationIdx self._lastPosDutyCycle = dutyCycle return dutyCycle @@ -3403,7 +3403,7 @@ def debugPrint(self): self.totalActivations), # Age - print "%4d" % (self.tp.lrnIterationIdx - self.lastActiveIteration), + print "%4d" % (self.tm.lrnIterationIdx - self.lastActiveIteration), # Print each synapses on this segment as: srcCellCol/srcCellIdx/perm # if the permanence is above connected, put [] around the synapse info @@ -3489,7 +3489,7 @@ def addSynapse(self, srcCellCol, srcCellIdx, perm): def updateSynapses(self, synapses, delta): """Update a set of synapses in the segment. - @param tp The owner TP + @param tm The owner TM @param synapses List of synapse indices to update @param delta How much to add to each permanence @@ -3502,8 +3502,8 @@ def updateSynapses(self, synapses, delta): self.syns[synapse][2] = newValue = self.syns[synapse][2] + delta # Cap synapse permanence at permanenceMax - if newValue > self.tp.permanenceMax: - self.syns[synapse][2] = self.tp.permanenceMax + if newValue > self.tm.permanenceMax: + self.syns[synapse][2] = self.tm.permanenceMax else: for synapse in synapses: @@ -3521,4 +3521,4 @@ def updateSynapses(self, synapses, delta): # This is necessary for unpickling objects that have instances of the nested # class since the loading process looks for the class at the top level of the # module. -SegmentUpdate = TP.SegmentUpdate +SegmentUpdate = BacktrackingTM.SegmentUpdate diff --git a/src/nupic/research/TP10X2.py b/src/nupic/research/BacktrackingTMCPP.py similarity index 92% rename from src/nupic/research/TP10X2.py rename to src/nupic/research/BacktrackingTMCPP.py index 0d36155ed3..0c18dd2de8 100644 --- a/src/nupic/research/TP10X2.py +++ b/src/nupic/research/BacktrackingTMCPP.py @@ -23,7 +23,7 @@ from numpy import * import nupic.math -from nupic.research.TP import TP +from nupic.research.BacktrackingTM import BacktrackingTM from nupic.bindings.algorithms import Cells4 @@ -60,8 +60,8 @@ def _extractCallingMethodArgs(): -class TP10X2(TP): - """Class implementing the temporal pooler algorithm as described in the +class BacktrackingTMCPP(BacktrackingTM): + """Class implementing the temporal memory algorithm as described in the published Cortical Learning Algorithm documentation. The implementation here attempts to closely match the pseudocode in the documentation. This implementation does contain several additional bells and whistles such as @@ -69,7 +69,7 @@ class TP10X2(TP): """ - # We use the same keyword arguments as TP() + # We use the same keyword arguments as TM() def __init__(self, numberOfCols = 500, cellsPerColumn = 10, @@ -115,46 +115,46 @@ def __init__(self, self.checkSynapseConsistency = checkSynapseConsistency # If set to False, Cells4 will *not* be treated as an ephemeral member - # and full TP10X pickling is possible. This is useful for testing + # and full BacktrackingTMCPP pickling is possible. This is useful for testing # pickle/unpickle without saving Cells4 to an external file self.makeCells4Ephemeral = True #--------------------------------------------------------------------------------- # Init the base class - TP.__init__(self, - numberOfCols = numberOfCols, - cellsPerColumn = cellsPerColumn, - initialPerm = initialPerm, - connectedPerm = connectedPerm, - minThreshold = minThreshold, - newSynapseCount = newSynapseCount, - permanenceInc = permanenceInc, - permanenceDec = permanenceDec, - permanenceMax = permanenceMax, # never exceed this value + BacktrackingTM.__init__(self, + numberOfCols = numberOfCols, + cellsPerColumn = cellsPerColumn, + initialPerm = initialPerm, + connectedPerm = connectedPerm, + minThreshold = minThreshold, + newSynapseCount = newSynapseCount, + permanenceInc = permanenceInc, + permanenceDec = permanenceDec, + permanenceMax = permanenceMax, # never exceed this value globalDecay = globalDecay, - activationThreshold = activationThreshold, - doPooling = doPooling, - segUpdateValidDuration = segUpdateValidDuration, - burnIn = burnIn, - collectStats = collectStats, - seed = seed, - verbosity = verbosity, - pamLength = pamLength, - maxInfBacktrack = maxInfBacktrack, - maxLrnBacktrack = maxLrnBacktrack, - maxAge = maxAge, - maxSeqLength = maxSeqLength, - maxSegmentsPerCell = maxSegmentsPerCell, - maxSynapsesPerSegment = maxSynapsesPerSegment, - outputType = outputType, - ) + activationThreshold = activationThreshold, + doPooling = doPooling, + segUpdateValidDuration = segUpdateValidDuration, + burnIn = burnIn, + collectStats = collectStats, + seed = seed, + verbosity = verbosity, + pamLength = pamLength, + maxInfBacktrack = maxInfBacktrack, + maxLrnBacktrack = maxLrnBacktrack, + maxAge = maxAge, + maxSeqLength = maxSeqLength, + maxSegmentsPerCell = maxSegmentsPerCell, + maxSynapsesPerSegment = maxSynapsesPerSegment, + outputType = outputType, + ) def __setstate__(self, state): """ Set the state of ourself from a serialized state. """ - super(TP10X2, self).__setstate__(state) + super(BacktrackingTMCPP, self).__setstate__(state) if self.makeCells4Ephemeral: self.cells4 = Cells4(self.numberOfCols, self.cellsPerColumn, @@ -190,7 +190,7 @@ def _getEphemeralMembers(self): """ List of our member variables that we don't need to be saved """ - e = TP._getEphemeralMembers(self) + e = BacktrackingTM._getEphemeralMembers(self) if self.makeCells4Ephemeral: e.extend(['cells4']) return e @@ -200,7 +200,7 @@ def _initEphemerals(self): """ Initialize all ephemeral members after being restored to a pickled state. """ - TP._initEphemerals(self) + BacktrackingTM._initEphemerals(self) #--------------------------------------------------------------------------------- # cells4 specific initialization @@ -273,9 +273,9 @@ def __getattr__(self, name): """ try: - return super(TP, self).__getattr__(name) + return super(BacktrackingTM, self).__getattr__(name) except AttributeError: - raise AttributeError("'TP' object has no attribute '%s'" % name) + raise AttributeError("'TM' object has no attribute '%s'" % name) def compute(self, bottomUpInput, enableLearn, computeInfOutput=None): @@ -285,7 +285,7 @@ def compute(self, bottomUpInput, enableLearn, computeInfOutput=None): slows things down, but you can override this by passing in True for computeInfOutput """ - # The C++ TP takes 32 bit floats as input. uint32 works as well since the + # The C++ TM takes 32 bit floats as input. uint32 works as well since the # code only checks whether elements are non-zero assert (bottomUpInput.dtype == numpy.dtype('float32')) or \ (bottomUpInput.dtype == numpy.dtype('uint32')) or \ @@ -330,7 +330,7 @@ def compute(self, bottomUpInput, enableLearn, computeInfOutput=None): - # Finally return the TP output + # Finally return the TM output output = self.computeOutput() # Print diagnostic information based on the current verbosity level @@ -400,10 +400,10 @@ def reset(self): are reset to 0. """ if self.verbosity >= 3: - print "TP Reset" + print "TM Reset" self._setStatePointers() self.cells4.reset() - TP.reset(self) + BacktrackingTM.reset(self) def finishLearning(self): @@ -640,7 +640,7 @@ def getNumSegmentsInCell(self, c, i): def getSegmentInfo(self, collectActiveData = False): """Returns information about the distribution of segments, synapses and - permanence values in the current TP. If requested, also returns information + permanence values in the current TM. If requested, also returns information regarding the number of currently active segments and synapses. The method returns the following tuple: diff --git a/src/nupic/research/TP_shim.py b/src/nupic/research/TM_shim.py similarity index 88% rename from src/nupic/research/TP_shim.py rename to src/nupic/research/TM_shim.py index 67cb39a82c..4b0ac5e6ae 100644 --- a/src/nupic/research/TP_shim.py +++ b/src/nupic/research/TM_shim.py @@ -20,7 +20,7 @@ # ---------------------------------------------------------------------- """ -A shim for the TP class that transparently implements TemporalMemory, +A shim for the TM class that transparently implements TemporalMemory, for use with OPF. """ @@ -38,9 +38,9 @@ class MonitoredTemporalMemory(TemporalMemoryMonitorMixin, -class TPShimMixin(object): +class TMShimMixin(object): """ - TP => Temporal Memory shim class. + TM => Temporal Memory shim class. """ def __init__(self, numberOfCols=500, @@ -63,9 +63,9 @@ def __init__(self, outputType="normal", seed=42): """ - Translate parameters and initialize member variables specific to `TP.py`. + Translate parameters and initialize member variables specific to `BacktrackingTM.py`. """ - super(TPShimMixin, self).__init__( + super(TMShimMixin, self).__init__( columnDimensions=(numberOfCols,), cellsPerColumn=cellsPerColumn, activationThreshold=activationThreshold, @@ -85,7 +85,7 @@ def __init__(self, def compute(self, bottomUpInput, enableLearn, computeInfOutput=None): """ - (From `TP.py`) + (From `BacktrackingTM.py`) Handle one compute, possibly learning. @param bottomUpInput The bottom-up input, typically from a spatial pooler @@ -95,8 +95,8 @@ def compute(self, bottomUpInput, enableLearn, computeInfOutput=None): If true, compute the inference output If false, do not compute the inference output """ - super(TPShimMixin, self).compute(set(bottomUpInput.nonzero()[0]), - learn=enableLearn) + super(TMShimMixin, self).compute(set(bottomUpInput.nonzero()[0]), + learn=enableLearn) numberOfCells = self.numberOfCells() activeState = numpy.zeros(numberOfCells) @@ -112,12 +112,12 @@ def compute(self, bottomUpInput, enableLearn, computeInfOutput=None): def topDownCompute(self, topDownIn=None): """ - (From `TP.py`) - Top-down compute - generate expected input given output of the TP + (From `BacktrackingTM.py`) + Top-down compute - generate expected input given output of the TM @param topDownIn top down input from the level above us - @returns best estimate of the TP input that would have generated bottomUpOut. + @returns best estimate of the TM input that would have generated bottomUpOut. """ output = numpy.zeros(self.numberOfColumns()) columns = [self.columnForCell(idx) for idx in self.getPredictiveCells()] @@ -143,19 +143,19 @@ def getLearnActiveStateT(self): -class TPShim(TPShimMixin, TemporalMemory): +class TMShim(TMShimMixin, TemporalMemory): pass -class TPCPPShim(TPShimMixin, TemporalMemoryCPP): +class TMCPPShim(TMShimMixin, TemporalMemoryCPP): pass -class MonitoredTPShim(MonitoredTemporalMemory): +class MonitoredTMShim(MonitoredTemporalMemory): """ - TP => Monitored Temporal Memory shim class. + TM => Monitored Temporal Memory shim class. TODO: This class is not very DRY. This whole file needs to be replaced by a pure TemporalMemory region @@ -182,9 +182,9 @@ def __init__(self, outputType="normal", seed=42): """ - Translate parameters and initialize member variables specific to `TP.py`. + Translate parameters and initialize member variables specific to `BacktrackingTM.py`. """ - super(MonitoredTPShim, self).__init__( + super(MonitoredTMShim, self).__init__( columnDimensions=(numberOfCols,), cellsPerColumn=cellsPerColumn, activationThreshold=activationThreshold, @@ -204,7 +204,7 @@ def __init__(self, def compute(self, bottomUpInput, enableLearn, computeInfOutput=None): """ - (From `TP.py`) + (From `BacktrackingTM.py`) Handle one compute, possibly learning. @param bottomUpInput The bottom-up input, typically from a spatial pooler @@ -214,8 +214,8 @@ def compute(self, bottomUpInput, enableLearn, computeInfOutput=None): If true, compute the inference output If false, do not compute the inference output """ - super(MonitoredTPShim, self).compute(set(bottomUpInput.nonzero()[0]), - learn=enableLearn) + super(MonitoredTMShim, self).compute(set(bottomUpInput.nonzero()[0]), + learn=enableLearn) numberOfCells = self.numberOfCells() activeState = numpy.zeros(numberOfCells) @@ -229,12 +229,12 @@ def compute(self, bottomUpInput, enableLearn, computeInfOutput=None): def topDownCompute(self, topDownIn=None): """ - (From `TP.py`) - Top-down compute - generate expected input given output of the TP + (From `BacktrackingTM.py`) + Top-down compute - generate expected input given output of the TM @param topDownIn top down input from the level above us - @returns best estimate of the TP input that would have generated bottomUpOut. + @returns best estimate of the TM input that would have generated bottomUpOut. """ output = numpy.zeros(self.numberOfColumns()) columns = [self.columnForCell(idx) for idx in self.getPredictiveCells()] diff --git a/src/nupic/research/fdrutilities.py b/src/nupic/research/fdrutilities.py index a9f4828819..185feab447 100644 --- a/src/nupic/research/fdrutilities.py +++ b/src/nupic/research/fdrutilities.py @@ -322,10 +322,10 @@ def generateL2Sequences(nL1Patterns=10, l1Hubs=[2,6], l1SeqLength=[5,6,7], patternLen=500, patternActivity=50): """ Generate the simulated output from a spatial pooler that's sitting - on top of another spatial pooler / temporal pooler pair. The average on-time - of the outputs from the simulated TP is given by the l1Pooling argument. + on top of another spatial pooler / temporal memory pair. The average on-time + of the outputs from the simulated TM is given by the l1Pooling argument. - In this routine, L1 refers to the first spatial and temporal pooler and L2 + In this routine, L1 refers to the first spatial and temporal memory and L2 refers to the spatial pooler above that. Parameters: @@ -341,7 +341,7 @@ def generateL2Sequences(nL1Patterns=10, l1Hubs=[2,6], l1SeqLength=[5,6,7], perfectStability: If true, then the input patterns represented by the sequences generated will have perfect stability over l1Pooling time steps. This is the best case ideal input - to a TP. In actual situations, with an actual SP + to a TM. In actual situations, with an actual SP providing input, the stability will always be less than this. spHystereisFactor: The hysteresisFactor to use in the L2 spatial pooler. @@ -440,10 +440,10 @@ def vectorsFromSeqList(seqList, patternMatrix): ############################################################################### # The following three functions are used in tests to compare two different -# TP instances. +# TM instances. -def sameTPParams(tp1, tp2): - """Given two TP instances, see if any parameters are different.""" +def sameTMParams(tp1, tp2): + """Given two TM instances, see if any parameters are different.""" result = True for param in ["numberOfCols", "cellsPerColumn", "initialPerm", "connectedPerm", "minThreshold", "newSynapseCount", "permanenceInc", "permanenceDec", @@ -496,20 +496,20 @@ def sameSegment(seg1, seg2): return result -def tpDiff(tp1, tp2, verbosity = 0, relaxSegmentTests =True): +def tmDiff(tm1, tm2, verbosity = 0, relaxSegmentTests =True): """ - Given two TP instances, list the difference between them and returns False + Given two TM instances, list the difference between them and returns False if there is a difference. This function checks the major parameters. If this passes (and checkLearn is true) it checks the number of segments on each cell. If this passes, checks each synapse on each segment. When comparing C++ and Py, the segments are usually in different orders in the - cells. tpDiff ignores segment order when comparing TP's. + cells. tmDiff ignores segment order when comparing TM's. """ # First check basic parameters. If we fail here, don't continue - if sameTPParams(tp1, tp2) == False: - print "Two TP's have different parameters" + if sameTMParams(tm1, tm2) == False: + print "Two TM's have different parameters" return False result = True @@ -517,76 +517,76 @@ def tpDiff(tp1, tp2, verbosity = 0, relaxSegmentTests =True): # Compare states at t first, they usually diverge before the structure of the # cells starts diverging - if (tp1.activeState['t'] != tp2.activeState['t']).any(): - print 'Active states diverge', numpy.where(tp1.activeState['t'] != tp2.activeState['t']) + if (tm1.activeState['t'] != tm2.activeState['t']).any(): + print 'Active states diverge', numpy.where(tm1.activeState['t'] != tm2.activeState['t']) result = False - if (tp1.predictedState['t'] - tp2.predictedState['t']).any(): - print 'Predicted states diverge', numpy.where(tp1.predictedState['t'] != tp2.predictedState['t']) + if (tm1.predictedState['t'] - tm2.predictedState['t']).any(): + print 'Predicted states diverge', numpy.where(tm1.predictedState['t'] != tm2.predictedState['t']) result = False # TODO: check confidence at T (confT) # Now check some high level learned parameters. - if tp1.getNumSegments() != tp2.getNumSegments(): - print "Number of segments are different", tp1.getNumSegments(), tp2.getNumSegments() + if tm1.getNumSegments() != tm2.getNumSegments(): + print "Number of segments are different", tm1.getNumSegments(), tm2.getNumSegments() result = False - if tp1.getNumSynapses() != tp2.getNumSynapses(): - print "Number of synapses are different", tp1.getNumSynapses(), tp2.getNumSynapses() - tp1.printCells() - tp2.printCells() + if tm1.getNumSynapses() != tm2.getNumSynapses(): + print "Number of synapses are different", tm1.getNumSynapses(), tm2.getNumSynapses() + tm1.printCells() + tm2.printCells() result = False # Check that each cell has the same number of segments and synapses - for c in xrange(tp1.numberOfCols): - for i in xrange(tp2.cellsPerColumn): - if tp1.getNumSegmentsInCell(c, i) != tp2.getNumSegmentsInCell(c, i): + for c in xrange(tm1.numberOfCols): + for i in xrange(tm2.cellsPerColumn): + if tm1.getNumSegmentsInCell(c, i) != tm2.getNumSegmentsInCell(c, i): print "Num segments different in cell:",c,i, - print tp1.getNumSegmentsInCell(c, i), tp2.getNumSegmentsInCell(c, i) + print tm1.getNumSegmentsInCell(c, i), tm2.getNumSegmentsInCell(c, i) result = False # If the above tests pass, then check each segment and report differences - # Note that segments in tp1 can be in a different order than tp2. Here we - # make sure that, for each segment in tp1, there is an identical segment - # in tp2. + # Note that segments in tm1 can be in a different order than tm2. Here we + # make sure that, for each segment in tm1, there is an identical segment + # in tm2. if result == True and not relaxSegmentTests: - for c in xrange(tp1.numberOfCols): - for i in xrange(tp2.cellsPerColumn): - nSegs = tp1.getNumSegmentsInCell(c, i) + for c in xrange(tm1.numberOfCols): + for i in xrange(tm2.cellsPerColumn): + nSegs = tm1.getNumSegmentsInCell(c, i) for segIdx in xrange(nSegs): - tp1seg = tp1.getSegmentOnCell(c, i, segIdx) + tm1seg = tm1.getSegmentOnCell(c, i, segIdx) - # Loop through all segments in tp2seg and see if any of them match tp1seg + # Loop through all segments in tm2seg and see if any of them match tm1seg res = False - for tp2segIdx in xrange(nSegs): - tp2seg = tp2.getSegmentOnCell(c, i, tp2segIdx) - if sameSegment(tp1seg, tp2seg) == True: + for tm2segIdx in xrange(nSegs): + tm2seg = tm2.getSegmentOnCell(c, i, tm2segIdx) + if sameSegment(tm1seg, tm2seg) == True: res = True break if res == False: print "\nSegments are different for cell:",c,i if verbosity >= 1: print "C++" - tp1.printCell(c,i) + tm1.printCell(c, i) print "Py" - tp2.printCell(c,i) + tm2.printCell(c, i) result = False if result == True and (verbosity > 1): - print "TP's match" + print "TM's match" return result -def tpDiff2(tp1, tp2, verbosity = 0, relaxSegmentTests =True, +def tmDiff2(tm1, tm2, verbosity = 0, relaxSegmentTests =True, checkLearn = True, checkStates = True): """ - Given two TP instances, list the difference between them and returns False + Given two TM instances, list the difference between them and returns False if there is a difference. This function checks the major parameters. If this passes (and checkLearn is true) it checks the number of segments on each cell. If this passes, checks each synapse on each segment. When comparing C++ and Py, the segments are usually in different orders in the - cells. tpDiff ignores segment order when comparing TP's. + cells. tmDiff ignores segment order when comparing TM's. If checkLearn is True, will check learn states as well as all the segments @@ -595,12 +595,12 @@ def tpDiff2(tp1, tp2, verbosity = 0, relaxSegmentTests =True, """ # First check basic parameters. If we fail here, don't continue - if sameTPParams(tp1, tp2) == False: - print "Two TP's have different parameters" + if sameTMParams(tm1, tm2) == False: + print "Two TM's have different parameters" return False - tp1Label = "" % tp1.__class__.__name__ - tp2Label = "" % tp2.__class__.__name__ + tm1Label = "" % tm1.__class__.__name__ + tm2Label = "" % tm2.__class__.__name__ result = True @@ -608,80 +608,80 @@ def tpDiff2(tp1, tp2, verbosity = 0, relaxSegmentTests =True, # Compare states at t first, they usually diverge before the structure of the # cells starts diverging - if (tp1.infActiveState['t'] != tp2.infActiveState['t']).any(): - print 'Active states diverged', numpy.where(tp1.infActiveState['t'] != tp2.infActiveState['t']) + if (tm1.infActiveState['t'] != tm2.infActiveState['t']).any(): + print 'Active states diverged', numpy.where(tm1.infActiveState['t'] != tm2.infActiveState['t']) result = False - if (tp1.infPredictedState['t'] - tp2.infPredictedState['t']).any(): - print 'Predicted states diverged', numpy.where(tp1.infPredictedState['t'] != tp2.infPredictedState['t']) + if (tm1.infPredictedState['t'] - tm2.infPredictedState['t']).any(): + print 'Predicted states diverged', numpy.where(tm1.infPredictedState['t'] != tm2.infPredictedState['t']) result = False - if checkLearn and (tp1.lrnActiveState['t'] - tp2.lrnActiveState['t']).any(): - print 'lrnActiveState[t] diverged', numpy.where(tp1.lrnActiveState['t'] != tp2.lrnActiveState['t']) + if checkLearn and (tm1.lrnActiveState['t'] - tm2.lrnActiveState['t']).any(): + print 'lrnActiveState[t] diverged', numpy.where(tm1.lrnActiveState['t'] != tm2.lrnActiveState['t']) result = False - if checkLearn and (tp1.lrnPredictedState['t'] - tp2.lrnPredictedState['t']).any(): - print 'lrnPredictedState[t] diverged', numpy.where(tp1.lrnPredictedState['t'] != tp2.lrnPredictedState['t']) + if checkLearn and (tm1.lrnPredictedState['t'] - tm2.lrnPredictedState['t']).any(): + print 'lrnPredictedState[t] diverged', numpy.where(tm1.lrnPredictedState['t'] != tm2.lrnPredictedState['t']) result = False - if checkLearn and abs(tp1.getAvgLearnedSeqLength() - tp2.getAvgLearnedSeqLength()) > 0.01: + if checkLearn and abs(tm1.getAvgLearnedSeqLength() - tm2.getAvgLearnedSeqLength()) > 0.01: print "Average learned sequence lengths differ: ", - print tp1.getAvgLearnedSeqLength()," vs ", tp2.getAvgLearnedSeqLength() + print tm1.getAvgLearnedSeqLength(), " vs ", tm2.getAvgLearnedSeqLength() result = False # TODO: check confidence at T (confT) # Now check some high level learned parameters. - if tp1.getNumSegments() != tp2.getNumSegments(): - print "Number of segments are different", tp1.getNumSegments(), tp2.getNumSegments() + if tm1.getNumSegments() != tm2.getNumSegments(): + print "Number of segments are different", tm1.getNumSegments(), tm2.getNumSegments() result = False - if tp1.getNumSynapses() != tp2.getNumSynapses(): - print "Number of synapses are different", tp1.getNumSynapses(), tp2.getNumSynapses() + if tm1.getNumSynapses() != tm2.getNumSynapses(): + print "Number of synapses are different", tm1.getNumSynapses(), tm2.getNumSynapses() if verbosity >= 3: - print "%s: " % tp1Label, - tp1.printCells() - print "\n%s : " % tp2Label, - tp2.printCells() + print "%s: " % tm1Label, + tm1.printCells() + print "\n%s : " % tm2Label, + tm2.printCells() #result = False # Check that each cell has the same number of segments and synapses - for c in xrange(tp1.numberOfCols): - for i in xrange(tp2.cellsPerColumn): - if tp1.getNumSegmentsInCell(c, i) != tp2.getNumSegmentsInCell(c, i): + for c in xrange(tm1.numberOfCols): + for i in xrange(tm2.cellsPerColumn): + if tm1.getNumSegmentsInCell(c, i) != tm2.getNumSegmentsInCell(c, i): print "Num segments different in cell:",c,i, - print tp1.getNumSegmentsInCell(c, i), tp2.getNumSegmentsInCell(c, i) + print tm1.getNumSegmentsInCell(c, i), tm2.getNumSegmentsInCell(c, i) result = False # If the above tests pass, then check each segment and report differences - # Note that segments in tp1 can be in a different order than tp2. Here we - # make sure that, for each segment in tp1, there is an identical segment - # in tp2. + # Note that segments in tm1 can be in a different order than tm2. Here we + # make sure that, for each segment in tm1, there is an identical segment + # in tm2. if result == True and not relaxSegmentTests and checkLearn: - for c in xrange(tp1.numberOfCols): - for i in xrange(tp2.cellsPerColumn): - nSegs = tp1.getNumSegmentsInCell(c, i) + for c in xrange(tm1.numberOfCols): + for i in xrange(tm2.cellsPerColumn): + nSegs = tm1.getNumSegmentsInCell(c, i) for segIdx in xrange(nSegs): - tp1seg = tp1.getSegmentOnCell(c, i, segIdx) + tm1seg = tm1.getSegmentOnCell(c, i, segIdx) - # Loop through all segments in tp2seg and see if any of them match tp1seg + # Loop through all segments in tm2seg and see if any of them match tm1seg res = False - for tp2segIdx in xrange(nSegs): - tp2seg = tp2.getSegmentOnCell(c, i, tp2segIdx) - if sameSegment(tp1seg, tp2seg) == True: + for tm2segIdx in xrange(nSegs): + tm2seg = tm2.getSegmentOnCell(c, i, tm2segIdx) + if sameSegment(tm1seg, tm2seg) == True: res = True break if res == False: print "\nSegments are different for cell:",c,i result = False if verbosity >= 0: - print "%s : " % tp1Label, - tp1.printCell(c,i) - print "\n%s : " % tp2Label, - tp2.printCell(c,i) + print "%s : " % tm1Label, + tm1.printCell(c, i) + print "\n%s : " % tm2Label, + tm2.printCell(c, i) if result == True and (verbosity > 1): - print "TP's match" + print "TM's match" return result @@ -955,7 +955,7 @@ def averageOnTimePerTimestep(vectors, numSamples=None): This metric is resiliant to the number of outputs that are on at each time step. That is, if time step 0 has many more outputs on than time step 100, it won't skew the results. This is particularly useful when measuring the - average on-time of things like the temporal pooler output where you might + average on-time of things like the temporal memory output where you might have many columns bursting at the start of a sequence - you don't want those start of sequence bursts to over-influence the calculated average on-time. @@ -1064,7 +1064,7 @@ def plotOutputsOverTime(vectors, buVectors=None, title='On-times'): ------------------------------------------------------------ vectors: the vectors to plot buVectors: These are normally specified when plotting the pooling - outputs of the temporal pooler over time. The 'buVectors' + outputs of the temporal memory over time. The 'buVectors' are the sequence outputs and the 'vectors' are the pooling outputs. The buVector (sequence) outputs will be drawn in a darker color than the vector (pooling) outputs to @@ -1310,13 +1310,13 @@ def checkMatch(input, prediction, sparse=True, verbosity=0): def predictionExtent(inputs, resets, outputs, minOverlapPct=100.0): """ - Computes the predictive ability of a temporal pooler (TP). This routine returns + Computes the predictive ability of a temporal memory (TM). This routine returns a value which is the average number of time steps of prediction provided - by the TP. It accepts as input the inputs, outputs, and resets provided to - the TP as well as a 'minOverlapPct' used to evalulate whether or not a + by the TM. It accepts as input the inputs, outputs, and resets provided to + the TM as well as a 'minOverlapPct' used to evalulate whether or not a prediction is a good enough match to the actual input. - The 'outputs' are the pooling outputs of the TP. This routine treats each output + The 'outputs' are the pooling outputs of the TM. This routine treats each output as a "manifold" that includes the active columns that should be present in the next N inputs. It then looks at each successive input and sees if it's active columns are within the manifold. For each output sample, it computes how @@ -1326,11 +1326,11 @@ def predictionExtent(inputs, resets, outputs, minOverlapPct=100.0): Parameters: ----------------------------------------------- - inputs: The inputs to the TP. Row 0 contains the inputs from time + inputs: The inputs to the TM. Row 0 contains the inputs from time step 0, row 1 from time step 1, etc. - resets: The reset input to the TP. Element 0 contains the reset from + resets: The reset input to the TM. Element 0 contains the reset from time step 0, element 1 from time step 1, etc. - outputs: The pooling outputs from the TP. Row 0 contains the outputs + outputs: The pooling outputs from the TM. Row 0 contains the outputs from time step 0, row 1 from time step 1, etc. minOverlapPct: How much each input's columns must overlap with the pooling output's columns to be considered a valid prediction. diff --git a/src/nupic/research/temporal_memory_shim.py b/src/nupic/research/temporal_memory_shim.py index d369fd54d4..80d898d0be 100644 --- a/src/nupic/research/temporal_memory_shim.py +++ b/src/nupic/research/temporal_memory_shim.py @@ -20,27 +20,27 @@ # ---------------------------------------------------------------------- """ -A shim for the TemporalMemory class that transparently implements TP, +A shim for the TemporalMemory class that transparently implements TM, for use with tests. """ import numpy from nupic.math import GetNTAReal -from nupic.research.TP import TP -from nupic.research.TP10X2 import TP10X2 +from nupic.research.BacktrackingTM import BacktrackingTM +from nupic.research.BacktrackingTMCPP import BacktrackingTMCPP from nupic.research.connections import Connections -TPClass = TP10X2 +TMClass = BacktrackingTMCPP dtype = GetNTAReal() -class TemporalMemoryShim(TPClass): +class TemporalMemoryShim(TMClass): """ - Temporal Memory => TP shim class. + Temporal Memory => TM shim class. """ def __init__(self, columnDimensions=(2048,), diff --git a/src/nupic/support/nupic-default.xml b/src/nupic/support/nupic-default.xml index e93c0d3e58..1229567764 100644 --- a/src/nupic/support/nupic-default.xml +++ b/src/nupic/support/nupic-default.xml @@ -65,7 +65,7 @@ Vector type used to classify anomalies. Types: tpc - classify on active cells in the TP sp_tpe - classify on the active SP columns concatenated with the predicted - TP columns that did not become active + TM columns that did not become active diff --git a/src/nupic/swarming/exp_generator/ExpGenerator.py b/src/nupic/swarming/exp_generator/ExpGenerator.py index 0647b8f094..b8702c77b3 100755 --- a/src/nupic/swarming/exp_generator/ExpGenerator.py +++ b/src/nupic/swarming/exp_generator/ExpGenerator.py @@ -1375,7 +1375,7 @@ def _generateExperiment(options, outputDirPath, hsVersion, tokenReplacements['\$PERM_SP_CHOICES'] = \ _ONE_INDENT +"'synPermInactiveDec': PermuteFloat(0.0003, 0.1),\n" - # The TP permutation parameters are not required for non-temporal networks + # The TM permutation parameters are not required for non-temporal networks if options['inferenceType'] in ['NontemporalMultiStep', 'NontemporalClassification']: tokenReplacements['\$PERM_TP_CHOICES'] = "" diff --git a/src/nupic/swarming/exp_generator/claDescriptionTemplate.tpl b/src/nupic/swarming/exp_generator/claDescriptionTemplate.tpl index 388a062bdd..607d3b6c78 100644 --- a/src/nupic/swarming/exp_generator/claDescriptionTemplate.tpl +++ b/src/nupic/swarming/exp_generator/claDescriptionTemplate.tpl @@ -115,7 +115,7 @@ config = { 'globalInhibition': 1, # Number of cell columns in the cortical region (same number for - # SP and TP) + # SP and TM) # (see also tpNCellsPerCol) 'columnCount': 2048, @@ -140,7 +140,7 @@ config = { # level before inhibition falls below minDutyCycleBeforeInh # will have their own internal synPermConnectedCell # threshold set below this default value. - # (This concept applies to both SP and TP and so 'cells' + # (This concept applies to both SP and TM and so 'cells' # is correct here as opposed to 'columns') 'synPermConnected': $SP_PERM_CONNECTED, @@ -152,20 +152,21 @@ config = { 'boostStrength': 0.0 }, - # Controls whether TP is enabled or disabled; - # TP is necessary for making temporal predictions, such as predicting - # the next inputs. Without TP, the model is only capable of + # Controls whether TM is enabled or disabled; + # TM is necessary for making temporal predictions, such as predicting + # the next inputs. Without TM, the model is only capable of # reconstructing missing sensor inputs (via SP). 'tmEnable' : $TP_ENABLE, 'tmParams': { - # TP diagnostic output verbosity control; + # TM diagnostic output verbosity control; # 0: silent; [1..6]: increasing levels of verbosity - # (see verbosity in nupic/trunk/py/nupic/research/TP.py and TP10X*.py) + # (see verbosity in nupic/trunk/py/nupic/research/BacktrackingTM.py + # and BacktrackingTMCPP.py) 'verbosity': 0, # Number of cell columns in the cortical region (same number for - # SP and TP) + # SP and TM) # (see also tpNCellsPerCol) 'columnCount': 2048, @@ -176,7 +177,7 @@ config = { 'seed': 1960, - # Temporal Pooler implementation selector (see _getTPClass in + # Temporal Pooler implementation selector (see _getTMClass in # CLARegion.py). 'temporalImp': 'cpp', @@ -220,7 +221,7 @@ config = { 'outputType': 'normal', - # "Pay Attention Mode" length. This tells the TP how many new + # "Pay Attention Mode" length. This tells the TM how many new # elements to append to the end of a learned sequence at a time. # Smaller values are better for datasets with short sequences, # higher values are better for datasets with long sequences. diff --git a/src/nupic/swarming/exp_generator/descriptionTemplate.tpl b/src/nupic/swarming/exp_generator/descriptionTemplate.tpl index 8ddeb27d61..3d47290cc4 100644 --- a/src/nupic/swarming/exp_generator/descriptionTemplate.tpl +++ b/src/nupic/swarming/exp_generator/descriptionTemplate.tpl @@ -226,7 +226,7 @@ config = { # General CLA Region Parameters ############################################################################## - # Number of cell columns in the cortical region (same number for SP and TP) + # Number of cell columns in the cortical region (same number for SP and TM) # (see also tpNCellsPerCol) # Replaces: spCoincCount 'claRegionNColumns' : 2048, @@ -263,7 +263,7 @@ config = { # level before inhibition falls below minDutyCycleBeforeInh # will have their own internal synPermConnectedCell # threshold set below this default value. - # (This concept applies to both SP and TP and so 'cells' + # (This concept applies to both SP and TM and so 'cells' # is correct here as opposed to 'columns') 'spSynPermConnected' : $SP_PERM_CONNECTED, @@ -271,12 +271,12 @@ config = { ############################################################################## - # Temporal Pooler (TP) Parameters + # Temporal Memory (TM) Parameters ############################################################################## - # TP diagnostic output verbosity control; + # TM diagnostic output verbosity control; # 0: silent; [1..6]: increasing levels of verbosity - # (see verbosity in nupic/trunk/py/nupic/research/TP.py and TP10X*.py) + # (see verbosity in nupic/trunk/py/nupic/research/BacktrackingTM.py and BacktrackingTMCPP.py) # 'tpVerbosity' : 0, @@ -288,8 +288,8 @@ config = { # by LPF; solve in OPF. 'tpTrainPrintStatsPeriodIter' : 0, - # Controls whether TP is enabled or disabled; - # TP is necessary for making temporal predictions, such as predicting the next + # Controls whether TM is enabled or disabled; + # TM is necessary for making temporal predictions, such as predicting the next # inputs. Without TP, the model is only capable of reconstructing missing sensor # inputs (via SP). # @@ -321,7 +321,7 @@ config = { # > 0 for fixed-size CLA # -1 for non-fixed-size CLA # - # TODO for Ron: once the appropriate value is placed in TP constructor, see if + # TODO for Ron: once the appropriate value is placed in TM constructor, see if # we should eliminate this parameter from description.py # 'tpMaxSegmentsPerCell' : 128, @@ -343,7 +343,7 @@ config = { # > 0 for fixed-size CLA # -1 for non-fixed-size CLA # - # TODO for Ron: once the appropriate value is placed in TP constructor, see if + # TODO for Ron: once the appropriate value is placed in TM constructor, see if # we should eliminate this parameter from description.py # 'tpMaxSynapsesPerSegment' : 32, @@ -355,7 +355,7 @@ config = { # 'tpNewSynapseCount' : 20, - # "Pay Attention Mode" length. This tells the TP how many new elements + # "Pay Attention Mode" length. This tells the TM how many new elements # to append to the end of a learned sequence at a time. Smaller values are # better for datasets with short sequences, higher values are better for # datasets with long sequences. diff --git a/src/nupic/swarming/experimentutils.py b/src/nupic/swarming/experimentutils.py index cfd7fd3cc5..1c2d748beb 100644 --- a/src/nupic/swarming/experimentutils.py +++ b/src/nupic/swarming/experimentutils.py @@ -153,7 +153,7 @@ class InferenceType(Enum("TemporalNextStep", @staticmethod def isTemporal(inferenceType): """ Returns True if the inference type is 'temporal', i.e. requires a - temporal pooler in the network. + temporal memory in the network. """ if InferenceType.__temporalInferenceTypes is None: InferenceType.__temporalInferenceTypes = \ diff --git a/tests/integration/nupic/algorithms/extensive_tm_test_base.py b/tests/integration/nupic/algorithms/extensive_tm_test_base.py index 30d5d136c5..957d76cb70 100755 --- a/tests/integration/nupic/algorithms/extensive_tm_test_base.py +++ b/tests/integration/nupic/algorithms/extensive_tm_test_base.py @@ -63,7 +63,7 @@ class ExtensiveTemporalMemoryTest(AbstractTemporalMemoryTest): Each input pattern can optionally have an amount of spatial noise represented by X, where X is the probability of switching an on bit with a random bit. - Training: The TP is trained with P passes of the M sequences. There + Training: The TM is trained with P passes of the M sequences. There should be a reset between sequences. The total number of iterations during training is P*N*M. @@ -96,7 +96,7 @@ class ExtensiveTemporalMemoryTest(AbstractTemporalMemoryTest): connectedPermanence = 0.7 permanenceIncrement = 0.2 - Now we train the TP with the B1 sequence 4 times (P=4). This will increment + Now we train the TM with the B1 sequence 4 times (P=4). This will increment the permanences to be above 0.8 and at that point the inference will be correct. This test will ensure the basic match function and segment activation rules are working correctly. @@ -289,7 +289,7 @@ def testB7(self): connectedPermanence = 0.7 permanenceIncrement = 0.2 - Now we train the TP with the B1 sequence 4 times (P=4). This will increment + Now we train the TM with the B1 sequence 4 times (P=4). This will increment the permanences to be above 0.8 and at that point the inference will be correct. This test will ensure the basic match function and segment activation rules are working correctly. diff --git a/tests/integration/nupic/algorithms/tp_likelihood_test.py b/tests/integration/nupic/algorithms/tm_likelihood_test.py similarity index 76% rename from tests/integration/nupic/algorithms/tp_likelihood_test.py rename to tests/integration/nupic/algorithms/tm_likelihood_test.py index 83fa82d811..2a1ef91bd1 100755 --- a/tests/integration/nupic/algorithms/tp_likelihood_test.py +++ b/tests/integration/nupic/algorithms/tm_likelihood_test.py @@ -35,10 +35,10 @@ probabilities for E, F and G, i.e. Run the test for several different probability combinations. -LI2) Given a TP trained with LI1, compute the prediction score across a +LI2) Given a TM trained with LI1, compute the prediction score across a list of sequences. -LI3) Given the following sequence and a one cell per column TP: +LI3) Given the following sequence and a one cell per column TM: Seq1: a-b-b-c-d @@ -48,8 +48,8 @@ import numpy import unittest2 as unittest -from nupic.research.TP import TP -from nupic.research.TP10X2 import TP10X2 +from nupic.research.BacktrackingTM import BacktrackingTM +from nupic.research.BacktrackingTMCPP import BacktrackingTMCPP from nupic.support.unittesthelpers import testcasebase SEED = 42 @@ -88,10 +88,10 @@ def _buildLikelihoodTrainingSet(numOnes=5, relativeFrequencies=None): return (trainingSequences, relativeFrequencies, allPatterns) -def _createTPs(numCols, cellsPerColumn=4, checkSynapseConsistency=True): - """Create TP and TP10X instances with identical parameters. """ +def _createTMs(numCols, cellsPerColumn=4, checkSynapseConsistency=True): + """Create TM and BacktrackingTMCPP instances with identical parameters. """ - # Keep these fixed for both TP's: + # Keep these fixed for both TM's: minThreshold = 4 activationThreshold = 4 newSynapseCount = 5 @@ -102,43 +102,43 @@ def _createTPs(numCols, cellsPerColumn=4, checkSynapseConsistency=True): globalDecay = 0.0 if VERBOSITY > 1: - print "Creating TP10X instance" - - cppTp = TP10X2(numberOfCols=numCols, cellsPerColumn=cellsPerColumn, - initialPerm=initialPerm, connectedPerm=connectedPerm, - minThreshold=minThreshold, newSynapseCount=newSynapseCount, - permanenceInc=permanenceInc, permanenceDec=permanenceDec, - activationThreshold=activationThreshold, - globalDecay=globalDecay, burnIn=1, - seed=SEED, verbosity=VERBOSITY, - checkSynapseConsistency=checkSynapseConsistency, - pamLength=1000) + print "Creating BacktrackingTMCPP instance" + + cppTm = BacktrackingTMCPP(numberOfCols=numCols, cellsPerColumn=cellsPerColumn, + initialPerm=initialPerm, connectedPerm=connectedPerm, + minThreshold=minThreshold, newSynapseCount=newSynapseCount, + permanenceInc=permanenceInc, permanenceDec=permanenceDec, + activationThreshold=activationThreshold, + globalDecay=globalDecay, burnIn=1, + seed=SEED, verbosity=VERBOSITY, + checkSynapseConsistency=checkSynapseConsistency, + pamLength=1000) if VERBOSITY > 1: - print "Creating PY TP instance" + print "Creating PY TM instance" - pyTp = TP(numberOfCols=numCols, cellsPerColumn=cellsPerColumn, - initialPerm=initialPerm, connectedPerm=connectedPerm, - minThreshold=minThreshold, newSynapseCount=newSynapseCount, - permanenceInc=permanenceInc, permanenceDec=permanenceDec, - activationThreshold=activationThreshold, - globalDecay=globalDecay, burnIn=1, - seed=SEED, verbosity=VERBOSITY, - pamLength=1000) + pyTm = BacktrackingTM(numberOfCols=numCols, cellsPerColumn=cellsPerColumn, + initialPerm=initialPerm, connectedPerm=connectedPerm, + minThreshold=minThreshold, newSynapseCount=newSynapseCount, + permanenceInc=permanenceInc, permanenceDec=permanenceDec, + activationThreshold=activationThreshold, + globalDecay=globalDecay, burnIn=1, + seed=SEED, verbosity=VERBOSITY, + pamLength=1000) - return cppTp, pyTp + return cppTm, pyTm -def _computeTPMetric(tp=None, sequences=None, useResets=True, verbosity=1): - """Given a trained TP and a list of sequences, compute the temporal pooler +def _computeTMMetric(tm=None, sequences=None, useResets=True, verbosity=1): + """Given a trained TM and a list of sequences, compute the temporal memory performance metric on those sequences. Parameters: =========== - tp: A trained temporal pooler. + tm: A trained temporal memory. sequences: A list of sequences. Each sequence is a list of numpy vectors. - useResets: If True, the TP's reset method will be called before the + useResets: If True, the TM's reset method will be called before the the start of each new sequence. verbosity: An integer controlling the level of printouts. The higher the number the more debug printouts. @@ -154,12 +154,12 @@ def _computeTPMetric(tp=None, sequences=None, useResets=True, verbosity=1): datasetScore = 0 numPredictions = 0 - tp.resetStats() + tm.resetStats() for seqIdx, seq in enumerate(sequences): # Feed in a reset if useResets: - tp.reset() + tm.reset() seq = numpy.array(seq, dtype='uint32') if verbosity > 2: @@ -170,11 +170,11 @@ def _computeTPMetric(tp=None, sequences=None, useResets=True, verbosity=1): print "pattern", inputPattern - # Feed this input to the TP and get the stats - y = tp.infer(inputPattern) + # Feed this input to the TM and get the stats + y = tm.infer(inputPattern) if verbosity > 2: - stats = tp.getStats() + stats = tm.getStats() if stats['curPredictionScore'] > 0: print " patternConfidence=", stats['curPredictionScore2'] @@ -182,16 +182,16 @@ def _computeTPMetric(tp=None, sequences=None, useResets=True, verbosity=1): # Print some diagnostics for debugging if verbosity > 3: print "\n\n" - predOut = numpy.sum(tp.predictedState['t'], axis=1) - actOut = numpy.sum(tp.activeState['t'], axis=1) - outout = numpy.sum(y.reshape(tp.activeState['t'].shape), axis=1) + predOut = numpy.sum(tm.predictedState['t'], axis=1) + actOut = numpy.sum(tm.activeState['t'], axis=1) + outout = numpy.sum(y.reshape(tm.activeState['t'].shape), axis=1) print "Prediction non-zeros: ", predOut.nonzero() print "Activestate non-zero: ", actOut.nonzero() print "input non-zeros: ", inputPattern.nonzero() print "Output non-zeros: ", outout.nonzero() # Print and return final stats - stats = tp.getStats() + stats = tm.getStats() datasetScore = stats['predictionScoreAvg2'] numPredictions = stats['nPredictions'] print "Final results: datasetScore=", datasetScore, @@ -217,18 +217,18 @@ def _createDataset(numSequences, originalSequences, relativeFrequencies): return dataSet -class TPLikelihoodTest(testcasebase.TestCaseBase): +class TMLikelihoodTest(testcasebase.TestCaseBase): def _testSequence(self, trainingSet, nSequencePresentations=1, - tp=None, + tm=None, testSequences=None, doResets=True, relativeFrequencies=None): """Test a single set of sequences once and check that individual predictions reflect the true relative frequencies. Return a success code - as well as the trained TP. Success code is 1 for pass, 0 for fail. + as well as the trained TM. Success code is 1 for pass, 0 for fail. The trainingSet is a set of 3 sequences that share the same first 4 @@ -293,27 +293,27 @@ def _testSequence(self, print "=========Presentation #%d Sequence #%d==============" % \ (r, whichSequence) if doResets: - tp.reset() + tm.reset() for t, x in enumerate(trainingSequence): if VERBOSITY > 3: print "Time step", t - print "Input: ", tp.printInput(x) - tp.learn(x) + print "Input: ", tm.printInput(x) + tm.learn(x) if VERBOSITY > 4: - tp.printStates(printPrevious=(VERBOSITY > 4)) + tm.printStates(printPrevious=(VERBOSITY > 4)) print if VERBOSITY > 4: print "Sequence finished. Complete state after sequence" - tp.printCells() + tm.printCells() print - tp.finishLearning() + tm.finishLearning() if VERBOSITY > 2: print "Training completed. Complete state:" - tp.printCells() + tm.printCells() print - print "TP parameters:" - print tp.printParameters() + print "TM parameters:" + print tm.printParameters() # Infer if VERBOSITY > 1: @@ -321,22 +321,22 @@ def _testSequence(self, testSequence = testSequences[0] slen = len(testSequence) - tp.collectStats = True - tp.resetStats() + tm.collectStats = True + tm.resetStats() if doResets: - tp.reset() + tm.reset() for t, x in enumerate(testSequence): if VERBOSITY > 2: - print "Time step", t, '\nInput:', tp.printInput(x) - tp.infer(x) + print "Time step", t, '\nInput:', tm.printInput(x) + tm.infer(x) if VERBOSITY > 3: - tp.printStates(printPrevious=(VERBOSITY > 4), printLearnState=False) + tm.printStates(printPrevious=(VERBOSITY > 4), printLearnState=False) print # We will exit with the confidence score for the last element if t == slen-2: - tpNonZeros = [pattern.nonzero()[0] for pattern in allTrainingPatterns] - predictionScore2 = tp.checkPrediction2(tpNonZeros)[2] + tmNonZeros = [pattern.nonzero()[0] for pattern in allTrainingPatterns] + predictionScore2 = tm.checkPrediction2(tmNonZeros)[2] if VERBOSITY > 0: print "predictionScore:", predictionScore2 @@ -347,7 +347,7 @@ def _testSequence(self, patternConfidenceScores = numpy.array([x[1] for x in predictionScore2]) # Normalize so that the sum is 1.0. This makes us independent of any # potential scaling differences in the column confidence calculations of - # various TP implementations. + # various TM implementations. patternConfidenceScores /= patternConfidenceScores.sum() msg = ('Prediction failed with predictionScore: %s. Expected %s but got %s.' @@ -367,14 +367,14 @@ def _likelihoodTest1(self, numOnes=5, relativeFrequencies=None, print relativeFrequencies trainingSet = _buildLikelihoodTrainingSet(numOnes, relativeFrequencies) - cppTp, pyTp = _createTPs(numCols=trainingSet[0][0][0].size, - checkSynapseConsistency=checkSynapseConsistency) + cppTm, pyTm = _createTMs(numCols=trainingSet[0][0][0].size, + checkSynapseConsistency=checkSynapseConsistency) - # Test both TP's. Currently the CPP TP has faster confidence estimation - self._testSequence(trainingSet, nSequencePresentations=200, tp=cppTp, + # Test both TM's. Currently the CPP TM has faster confidence estimation + self._testSequence(trainingSet, nSequencePresentations=200, tm=cppTm, relativeFrequencies=relativeFrequencies) - self._testSequence(trainingSet, nSequencePresentations=500, tp=pyTp, + self._testSequence(trainingSet, nSequencePresentations=500, tm=pyTm, relativeFrequencies=relativeFrequencies) def _likelihoodTest2(self, numOnes=5, relativeFrequencies=None, @@ -384,25 +384,25 @@ def _likelihoodTest2(self, numOnes=5, relativeFrequencies=None, trainingSet = _buildLikelihoodTrainingSet(numOnes, relativeFrequencies) - cppTp, pyTp = _createTPs(numCols=trainingSet[0][0][0].size, - checkSynapseConsistency=checkSynapseConsistency) + cppTm, pyTm = _createTMs(numCols=trainingSet[0][0][0].size, + checkSynapseConsistency=checkSynapseConsistency) - # Test both TP's - for tp in [cppTp, pyTp]: - self._testSequence(trainingSet, nSequencePresentations=500, tp=tp, + # Test both TM's + for tm in [cppTm, pyTm]: + self._testSequence(trainingSet, nSequencePresentations=500, tm=tm, relativeFrequencies=relativeFrequencies) # Create a dataset with the same relative frequencies for testing the # metric. testDataSet = _createDataset(500, trainingSet[0], relativeFrequencies) - tp.collectStats = True - score, _ = _computeTPMetric(tp, testDataSet, verbosity=2) + tm.collectStats = True + score, _ = _computeTMMetric(tm, testDataSet, verbosity=2) # Create a dataset with very different relative frequencies # This score should be lower than the one above. testDataSet = _createDataset(500, trainingSet[0], relativeFrequencies = [0.1, 0.1, 0.9]) - score2, _ = _computeTPMetric(tp, testDataSet, verbosity=2) + score2, _ = _computeTMMetric(tm, testDataSet, verbosity=2) self.assertLessEqual(score2, score) diff --git a/tests/integration/nupic/algorithms/tp_overlapping_sequences_test.py b/tests/integration/nupic/algorithms/tm_overlapping_sequences_test.py similarity index 81% rename from tests/integration/nupic/algorithms/tp_overlapping_sequences_test.py rename to tests/integration/nupic/algorithms/tm_overlapping_sequences_test.py index a2545a6413..1e7680fea1 100755 --- a/tests/integration/nupic/algorithms/tp_overlapping_sequences_test.py +++ b/tests/integration/nupic/algorithms/tm_overlapping_sequences_test.py @@ -31,7 +31,7 @@ Test 2 - Test with slow learning, make sure PAM allows us to train with fewer repeats of the training data. -Test 3 - Test with slow learning, some overlap in the patterns, and TP +Test 3 - Test with slow learning, some overlap in the patterns, and TM thresholds of 80% of newSynapseCount Test 4 - Test with "Forbes-like" data. A bunch of sequences of lengths between 2 @@ -46,8 +46,8 @@ import random import unittest2 as unittest -from nupic.research.TP import TP -from nupic.research.TP10X2 import TP10X2 +from nupic.research.BacktrackingTM import BacktrackingTM +from nupic.research.BacktrackingTMCPP import BacktrackingTMCPP from nupic.research import fdrutilities as fdrutils from nupic.support.unittesthelpers import testcasebase @@ -56,8 +56,8 @@ # Whether to only run the short tests. SHORT = True -# If set to 0 the CPP TP will not be tested -INCLUDE_CPP_TP = 1 # Also test with CPP TP +# If set to 0 the CPP TM will not be tested +INCLUDE_CPP_TM = 1 # Also test with CPP TM @@ -124,7 +124,7 @@ def buildOverlappedSequences( numSequences = 2, seqLen: Overall length of each sequence sharedElements: Which element indices of each sequence are shared. These will be in the range between 0 and seqLen-1 - numOnBitsPerPattern: Number of ON bits in each TP input pattern + numOnBitsPerPattern: Number of ON bits in each TM input pattern patternOverlap: Max number of bits of overlap between any 2 patterns retval: (numCols, trainingSequences) numCols - width of the patterns @@ -191,7 +191,7 @@ def buildSequencePool(numSequences = 10, seqLen: List of possible sequence lengths numPatterns: How many possible patterns there are to use within sequences - numOnBitsPerPattern: Number of ON bits in each TP input pattern + numOnBitsPerPattern: Number of ON bits in each TM input pattern patternOverlap: Max number of bits of overlap between any 2 patterns retval: (numCols, trainingSequences) numCols - width of the patterns @@ -231,7 +231,7 @@ def buildSequencePool(numSequences = 10, -def createTPs(includeCPP = True, +def createTMs(includeCPP = True, includePy = True, numCols = 100, cellsPerCol = 4, @@ -249,118 +249,118 @@ def createTPs(includeCPP = True, **kwargs ): - """Create one or more TP instances, placing each into a dict keyed by + """Create one or more TM instances, placing each into a dict keyed by name. Parameters: ------------------------------------------------------------------ - retval: tps - dict of TP instances + retval: tms - dict of TM instances """ # Keep these fixed: connectedPerm = 0.5 - tps = dict() + tms = dict() if includeCPP: if VERBOSITY >= 2: - print "Creating TP10X2 instance" + print "Creating BacktrackingTMCPP instance" - cpp_tp = TP10X2(numberOfCols = numCols, cellsPerColumn = cellsPerCol, - initialPerm = initialPerm, connectedPerm = connectedPerm, - minThreshold = minThreshold, newSynapseCount = newSynapseCount, - permanenceInc = permanenceInc, permanenceDec = permanenceDec, - activationThreshold = activationThreshold, - globalDecay = globalDecay, burnIn = 1, - seed=SEED, verbosity=VERBOSITY, - checkSynapseConsistency = checkSynapseConsistency, - collectStats = True, - pamLength = pamLength, - maxInfBacktrack = maxInfBacktrack, - maxLrnBacktrack = maxLrnBacktrack, - ) + cpp_tm = BacktrackingTMCPP(numberOfCols = numCols, cellsPerColumn = cellsPerCol, + initialPerm = initialPerm, connectedPerm = connectedPerm, + minThreshold = minThreshold, newSynapseCount = newSynapseCount, + permanenceInc = permanenceInc, permanenceDec = permanenceDec, + activationThreshold = activationThreshold, + globalDecay = globalDecay, burnIn = 1, + seed=SEED, verbosity=VERBOSITY, + checkSynapseConsistency = checkSynapseConsistency, + collectStats = True, + pamLength = pamLength, + maxInfBacktrack = maxInfBacktrack, + maxLrnBacktrack = maxLrnBacktrack, + ) - # Ensure we are copying over learning states for TPDiff - cpp_tp.retrieveLearningStates = True + # Ensure we are copying over learning states for TMDiff + cpp_tm.retrieveLearningStates = True - tps['CPP'] = cpp_tp + tms['CPP'] = cpp_tm if includePy: if VERBOSITY >= 2: - print "Creating PY TP instance" + print "Creating PY TM instance" - py_tp = TP(numberOfCols = numCols, cellsPerColumn = cellsPerCol, - initialPerm = initialPerm, connectedPerm = connectedPerm, - minThreshold = minThreshold, newSynapseCount = newSynapseCount, - permanenceInc = permanenceInc, permanenceDec = permanenceDec, - activationThreshold = activationThreshold, - globalDecay = globalDecay, burnIn = 1, - seed=SEED, verbosity=VERBOSITY, - collectStats = True, - pamLength = pamLength, - maxInfBacktrack = maxInfBacktrack, - maxLrnBacktrack = maxLrnBacktrack, - ) + py_tm = BacktrackingTM(numberOfCols = numCols, cellsPerColumn = cellsPerCol, + initialPerm = initialPerm, connectedPerm = connectedPerm, + minThreshold = minThreshold, newSynapseCount = newSynapseCount, + permanenceInc = permanenceInc, permanenceDec = permanenceDec, + activationThreshold = activationThreshold, + globalDecay = globalDecay, burnIn = 1, + seed=SEED, verbosity=VERBOSITY, + collectStats = True, + pamLength = pamLength, + maxInfBacktrack = maxInfBacktrack, + maxLrnBacktrack = maxLrnBacktrack, + ) - tps['PY '] = py_tp + tms['PY '] = py_tm - return tps + return tms -def assertNoTPDiffs(tps): +def assertNoTMDiffs(tms): """ - Check for diffs among the TP instances in the passed in tps dict and + Check for diffs among the TM instances in the passed in tms dict and raise an assert if any are detected Parameters: --------------------------------------------------------------------- - tps: dict of TP instances + tms: dict of TM instances """ - if len(tps) == 1: + if len(tms) == 1: return - if len(tps) > 2: - raise "Not implemented for more than 2 TPs" + if len(tms) > 2: + raise "Not implemented for more than 2 TMs" - same = fdrutils.tpDiff2(*tps.values(), verbosity=VERBOSITY) + same = fdrutils.tmDiff2(*tms.values(), verbosity=VERBOSITY) assert(same) return -def evalSequences(tps, +def evalSequences(tms, trainingSequences, testSequences = None, nTrainRepetitions = 1, doResets = True, **kwargs): - """Train the TPs on the entire training set for nTrainRepetitions in a row. + """Train the TMs on the entire training set for nTrainRepetitions in a row. Then run the test set through inference once and return the inference stats. Parameters: --------------------------------------------------------------------- - tps: dict of TP instances + tms: dict of TM instances trainingSequences: list of training sequences. Each sequence is a list - of TP input patterns + of TM input patterns testSequences: list of test sequences. If None, we will test against the trainingSequences - nTrainRepetitions: Number of times to run the training set through the TP - doResets: If true, send a reset to the TP between each sequence + nTrainRepetitions: Number of times to run the training set through the TM + doResets: If true, send a reset to the TM between each sequence """ # If no test sequence is specified, use the first training sequence if testSequences == None: testSequences = trainingSequences - # First TP instance is used by default for verbose printing of input values, + # First TM instance is used by default for verbose printing of input values, # etc. - firstTP = tps.values()[0] + firstTP = tms.values()[0] - assertNoTPDiffs(tps) + assertNoTMDiffs(tms) # ===================================================================== # Loop through the training set nTrainRepetitions times @@ -370,10 +370,10 @@ def evalSequences(tps, print "\n##############################################################" print "################# Training round #%d of %d #################" \ % (trainingNum, nTrainRepetitions) - for (name,tp) in tps.iteritems(): - print "TP parameters for %s: " % (name) + for (name,tm) in tms.iteritems(): + print "TM parameters for %s: " % (name) print "---------------------" - tp.printParameters() + tm.printParameters() print # ====================================================================== @@ -387,8 +387,8 @@ def evalSequences(tps, % (sequenceNum, numSequences) if doResets: - for tp in tps.itervalues(): - tp.reset() + for tm in tms.itervalues(): + tm.reset() # -------------------------------------------------------------------- # Train each element of the sequence @@ -406,49 +406,49 @@ def evalSequences(tps, # Train in this element x = numpy.array(x).astype('float32') - for tp in tps.itervalues(): - tp.learn(x, computeInfOutput=True) + for tm in tms.itervalues(): + tm.learn(x, computeInfOutput=True) # Print the input and output states if VERBOSITY >= 3: - for (name,tp) in tps.iteritems(): - print "I/O states of %s TP:" % (name) + for (name,tm) in tms.iteritems(): + print "I/O states of %s TM:" % (name) print "-------------------------------------", - tp.printStates(printPrevious = (VERBOSITY >= 5)) + tm.printStates(printPrevious = (VERBOSITY >= 5)) print - assertNoTPDiffs(tps) + assertNoTMDiffs(tms) # Print out number of columns that weren't predicted if VERBOSITY >= 2: - for (name,tp) in tps.iteritems(): - stats = tp.getStats() - print "# of unpredicted columns for %s TP: %d of %d" \ + for (name,tm) in tms.iteritems(): + stats = tm.getStats() + print "# of unpredicted columns for %s TM: %d of %d" \ % (name, stats['curMissing'], x.sum()) - numBurstingCols = tp.infActiveState['t'].min(axis=1).sum() - print "# of bursting columns for %s TP: %d of %d" \ + numBurstingCols = tm.infActiveState['t'].min(axis=1).sum() + print "# of bursting columns for %s TM: %d of %d" \ % (name, numBurstingCols, x.sum()) # Print the trained cells if VERBOSITY >= 4: print "Sequence %d finished." % (sequenceNum) - for (name,tp) in tps.iteritems(): - print "All cells of %s TP:" % (name) + for (name,tm) in tms.iteritems(): + print "All cells of %s TM:" % (name) print "-------------------------------------", - tp.printCells() + tm.printCells() print # -------------------------------------------------------------------- # Done training all sequences in this round, print the total number of - # missing, extra columns and make sure it's the same among the TPs + # missing, extra columns and make sure it's the same among the TMs if VERBOSITY >= 2: print prevResult = None - for (name,tp) in tps.iteritems(): - stats = tp.getStats() + for (name,tm) in tms.iteritems(): + stats = tm.getStats() if VERBOSITY >= 1: - print "Stats for %s TP over all sequences for training round #%d of %d:" \ + print "Stats for %s TM over all sequences for training round #%d of %d:" \ % (name, trainingNum, nTrainRepetitions) print " total missing:", stats['totalMissing'] print " total extra:", stats['totalExtra'] @@ -459,7 +459,7 @@ def evalSequences(tps, assert (stats['totalMissing'] == prevResult[0]) assert (stats['totalExtra'] == prevResult[1]) - tp.resetStats() + tm.resetStats() # ===================================================================== @@ -467,21 +467,21 @@ def evalSequences(tps, if VERBOSITY >= 3: print "Calling trim segments" prevResult = None - for tp in tps.itervalues(): - nSegsRemoved, nSynsRemoved = tp.trimSegments() + for tm in tms.itervalues(): + nSegsRemoved, nSynsRemoved = tm.trimSegments() if prevResult is None: prevResult = (nSegsRemoved, nSynsRemoved) else: assert (nSegsRemoved == prevResult[0]) assert (nSynsRemoved == prevResult[1]) - assertNoTPDiffs(tps) + assertNoTMDiffs(tms) if VERBOSITY >= 4: print "Training completed. Complete state:" - for (name,tp) in tps.iteritems(): + for (name,tm) in tms.iteritems(): print "%s:" % (name) - tp.printCells() + tm.printCells() print @@ -492,9 +492,9 @@ def evalSequences(tps, print "\n##############################################################" print "########################## Inference #########################" - # Reset stats in all TPs - for tp in tps.itervalues(): - tp.resetStats() + # Reset stats in all TMs + for tm in tms.itervalues(): + tm.resetStats() # ------------------------------------------------------------------- # Loop through the test sequences @@ -509,8 +509,8 @@ def evalSequences(tps, # Send in the rest if doResets: - for tp in tps.itervalues(): - tp.reset() + for tm in tms.itervalues(): + tm.reset() # ------------------------------------------------------------------- # Loop through the elements of this sequence @@ -527,35 +527,35 @@ def evalSequences(tps, print "input nzs:", x.nonzero() # Infer on this element - for tp in tps.itervalues(): - tp.infer(x) + for tm in tms.itervalues(): + tm.infer(x) - assertNoTPDiffs(tps) + assertNoTMDiffs(tms) # Print out number of columns that weren't predicted if VERBOSITY >= 2: - for (name,tp) in tps.iteritems(): - stats = tp.getStats() - print "# of unpredicted columns for %s TP: %d of %d" \ + for (name,tm) in tms.iteritems(): + stats = tm.getStats() + print "# of unpredicted columns for %s TM: %d of %d" \ % (name, stats['curMissing'], x.sum()) # Debug print of internal state if VERBOSITY >= 3: - for (name,tp) in tps.iteritems(): - print "I/O states of %s TP:" % (name) + for (name,tm) in tms.iteritems(): + print "I/O states of %s TM:" % (name) print "-------------------------------------", - tp.printStates(printPrevious = (VERBOSITY >= 5), + tm.printStates(printPrevious = (VERBOSITY >= 5), printLearnState = False) print # Done with this sequence - # Debug print of all stats of the TPs + # Debug print of all stats of the TMs if VERBOSITY >= 4: print - for (name,tp) in tps.iteritems(): - print "Interim internal stats for %s TP:" % (name) + for (name,tm) in tms.iteritems(): + print "Interim internal stats for %s TM:" % (name) print "---------------------------------" - pprint.pprint(tp.getStats()) + pprint.pprint(tm.getStats()) print @@ -563,18 +563,18 @@ def evalSequences(tps, print "\n##############################################################" print "####################### Inference Done #######################" - # Get the overall stats for each TP and return them + # Get the overall stats for each TM and return them tpStats = dict() - for (name,tp) in tps.iteritems(): - tpStats[name] = stats = tp.getStats() + for (name,tm) in tms.iteritems(): + tpStats[name] = stats = tm.getStats() if VERBOSITY >= 2: - print "Stats for %s TP over all sequences:" % (name) + print "Stats for %s TM over all sequences:" % (name) print " total missing:", stats['totalMissing'] print " total extra:", stats['totalExtra'] - for (name,tp) in tps.iteritems(): + for (name,tm) in tms.iteritems(): if VERBOSITY >= 3: - print "\nAll internal stats for %s TP:" % (name) + print "\nAll internal stats for %s TM:" % (name) print "-------------------------------------", pprint.pprint(tpStats[name]) print @@ -585,14 +585,14 @@ def evalSequences(tps, def _testConfig(baseParams, expMissingMin=0, expMissingMax=0, **mods): """ - Build up a set of sequences, create the TP(s), train them, test them, + Build up a set of sequences, create the TM(s), train them, test them, and check that we got the expected number of missing predictions during inference. Parameters: ----------------------------------------------------------------------- baseParams: dict of all of the parameters for building sequences, - creating the TPs, and training and testing them. This + creating the TMs, and training and testing them. This gets updated from 'mods' before we use it. expMissingMin: Minimum number of expected missing predictions during testing. @@ -612,14 +612,14 @@ def _testConfig(baseParams, expMissingMin=0, expMissingMax=0, **mods): (numCols, trainingSequences) = func(**params) # -------------------------------------------------------------------- - # Create the TPs + # Create the TMs if params['numCols'] is None: params['numCols'] = numCols - tps = createTPs(**params) + tps = createTMs(**params) # -------------------------------------------------------------------- # Train and get test results - tpStats = evalSequences(tps = tps, + tpStats = evalSequences(tms= tps, trainingSequences=trainingSequences, testSequences=None, **params) @@ -642,7 +642,7 @@ def _testConfig(baseParams, expMissingMin=0, expMissingMax=0, **mods): return True -class TPOverlappingSeqsTest(testcasebase.TestCaseBase): +class TMOverlappingSeqsTest(testcasebase.TestCaseBase): def testFastLearning(self): """ @@ -662,8 +662,8 @@ def testFastLearning(self): sharedElements = [2,3], numOnBitsPerPattern = numOnBitsPerPattern, - # TP construction - includeCPP = INCLUDE_CPP_TP, + # TM construction + includeCPP = INCLUDE_CPP_TM, numCols = None, # filled in based on generated sequences activationThreshold = numOnBitsPerPattern, minThreshold = numOnBitsPerPattern, @@ -712,8 +712,8 @@ def testSlowLearning(self): sharedElements = [2,3], numOnBitsPerPattern = numOnBitsPerPattern, - # TP construction - includeCPP = INCLUDE_CPP_TP, + # TM construction + includeCPP = INCLUDE_CPP_TM, numCols = None, # filled in based on generated sequences activationThreshold = numOnBitsPerPattern, minThreshold = numOnBitsPerPattern, @@ -747,7 +747,7 @@ def testSlowLearning(self): def testSlowLearningWithOverlap(self): """ - Test with slow learning, some overlap in the patterns, and TP thresholds + Test with slow learning, some overlap in the patterns, and TM thresholds of 80% of newSynapseCount Make sure PAM allows us to train with fewer repeats of the training data. @@ -769,8 +769,8 @@ def testSlowLearningWithOverlap(self): numOnBitsPerPattern = numOnBitsPerPattern, patternOverlap = 2, - # TP construction - includeCPP = INCLUDE_CPP_TP, + # TM construction + includeCPP = INCLUDE_CPP_TM, numCols = None, # filled in based on generated sequences activationThreshold = int(0.8 * numOnBitsPerPattern), minThreshold = int(0.8 * numOnBitsPerPattern), @@ -827,8 +827,8 @@ def testForbesLikeData(self): numOnBitsPerPattern = numOnBitsPerPattern, patternOverlap = 1, - # TP construction - includeCPP = INCLUDE_CPP_TP, + # TM construction + includeCPP = INCLUDE_CPP_TM, numCols = None, # filled in based on generated sequences activationThreshold = int(0.8 * numOnBitsPerPattern), minThreshold = int(0.8 * numOnBitsPerPattern), @@ -900,9 +900,9 @@ def testForbesLikeData(self): rgen = numpy.random.RandomState(SEED) random.seed(SEED) - if not INCLUDE_CPP_TP: + if not INCLUDE_CPP_TM: print "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!" - print "!! WARNING: C++ TP testing is DISABLED until it can be updated." + print "!! WARNING: C++ TM testing is DISABLED until it can be updated." print "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!" # Form the command line for the unit test framework. diff --git a/tests/integration/nupic/algorithms/tp_segment_learning.py b/tests/integration/nupic/algorithms/tm_segment_learning.py similarity index 72% rename from tests/integration/nupic/algorithms/tp_segment_learning.py rename to tests/integration/nupic/algorithms/tm_segment_learning.py index a69a04bcd9..8bacdc1ce9 100755 --- a/tests/integration/nupic/algorithms/tp_segment_learning.py +++ b/tests/integration/nupic/algorithms/tm_segment_learning.py @@ -25,7 +25,7 @@ Multi-attribute sequence tests. -SL1) Train the TP repeatedly using a single sequence plus noise. The sequence +SL1) Train the TM repeatedly using a single sequence plus noise. The sequence can be relatively short, say 5 patterns. Add random noise each time a pattern is presented. The noise should be different for each presentation and can be equal to the number of on bits in the pattern. @@ -35,7 +35,7 @@ will be in the left half of the input vector. The noise bits will be in the right half of the input vector. -After several iterations of each sequence, the TP should should achieve perfect +After several iterations of each sequence, the TM should should achieve perfect inference on the true sequence. There should be resets between each presentation of the sequence. Check predictions in the sequence part only (it's ok to predict random bits in the right half of the column space), and test with clean @@ -56,13 +56,13 @@ import numpy import unittest2 as unittest -from nupic.research.TP import TP -from nupic.research.TP10X2 import TP10X2 +from nupic.research.BacktrackingTM import BacktrackingTM +from nupic.research.BacktrackingTMCPP import BacktrackingTMCPP from nupic.research import fdrutilities as fdrutils from nupic.support.unittesthelpers import testcasebase -g_testCPPTP = True +g_testCPPTM = True @@ -92,16 +92,16 @@ def _printAllTrainingSequences(self, trainingSequences): self._printOneTrainingVector(pattern) - def _setVerbosity(self, verbosity, tp, tpPy): - """Set verbosity level on the TP""" - tp.cells4.setVerbosity(verbosity) - tp.verbosity = verbosity - tpPy.verbosity = verbosity + def _setVerbosity(self, verbosity, tm, tmPy): + """Set verbosity level on the TM""" + tm.cells4.setVerbosity(verbosity) + tm.verbosity = verbosity + tmPy.verbosity = verbosity - def _createTPs(self, numCols, fixedResources=False, - checkSynapseConsistency = True): - """Create an instance of the appropriate temporal pooler. We isolate + def _createTMs(self, numCols, fixedResources=False, + checkSynapseConsistency = True): + """Create an instance of the appropriate temporal memory. We isolate all parameters as constants specified here.""" # Keep these fixed: @@ -127,45 +127,48 @@ def _createTPs(self, numCols, fixedResources=False, maxAge = 1 - if g_testCPPTP: + if g_testCPPTM: if g_options.verbosity > 1: - print "Creating TP10X2 instance" - - cppTP = TP10X2(numberOfCols = numCols, cellsPerColumn = 4, - initialPerm = initialPerm, connectedPerm = connectedPerm, - minThreshold = minThreshold, - newSynapseCount = newSynapseCount, - permanenceInc = permanenceInc, - permanenceDec = permanenceDec, - activationThreshold = activationThreshold, - globalDecay = globalDecay, maxAge=maxAge, burnIn = 1, - seed=g_options.seed, verbosity=g_options.verbosity, - checkSynapseConsistency = checkSynapseConsistency, - pamLength = 1000, - maxSegmentsPerCell = maxSegmentsPerCell, - maxSynapsesPerSegment = maxSynapsesPerSegment, - ) - # Ensure we are copying over learning states for TPDiff - cppTP.retrieveLearningStates = True + print "Creating BacktrackingTMCPP instance" + + cppTM = BacktrackingTMCPP(numberOfCols = numCols, cellsPerColumn = 4, + initialPerm = initialPerm, connectedPerm = connectedPerm, + minThreshold = minThreshold, + newSynapseCount = newSynapseCount, + permanenceInc = permanenceInc, + permanenceDec = permanenceDec, + activationThreshold = activationThreshold, + globalDecay = globalDecay, maxAge=maxAge, burnIn = 1, + seed=g_options.seed, verbosity=g_options.verbosity, + checkSynapseConsistency = checkSynapseConsistency, + pamLength = 1000, + maxSegmentsPerCell = maxSegmentsPerCell, + maxSynapsesPerSegment = maxSynapsesPerSegment, + ) + # Ensure we are copying over learning states for TMDiff + cppTM.retrieveLearningStates = True else: - cppTP = None + cppTM = None if g_options.verbosity > 1: - print "Creating PY TP instance" - pyTP = TP(numberOfCols = numCols, cellsPerColumn = 4, - initialPerm = initialPerm, connectedPerm = connectedPerm, - minThreshold = minThreshold, newSynapseCount = newSynapseCount, - permanenceInc = permanenceInc, permanenceDec = permanenceDec, - activationThreshold = activationThreshold, - globalDecay = globalDecay, maxAge=maxAge, burnIn = 1, - seed=g_options.seed, verbosity=g_options.verbosity, - pamLength = 1000, - maxSegmentsPerCell = maxSegmentsPerCell, - maxSynapsesPerSegment = maxSynapsesPerSegment, - ) - - return cppTP, pyTP + print "Creating PY TM instance" + pyTM = BacktrackingTM(numberOfCols = numCols, cellsPerColumn = 4, + initialPerm = initialPerm, + connectedPerm = connectedPerm, + minThreshold = minThreshold, + newSynapseCount = newSynapseCount, + permanenceInc = permanenceInc, + permanenceDec = permanenceDec, + activationThreshold = activationThreshold, + globalDecay = globalDecay, maxAge=maxAge, burnIn = 1, + seed=g_options.seed, verbosity=g_options.verbosity, + pamLength = 1000, + maxSegmentsPerCell = maxSegmentsPerCell, + maxSynapsesPerSegment = maxSynapsesPerSegment, + ) + + return cppTM, pyTM def _getSimplePatterns(self, numOnes, numPatterns): @@ -303,12 +306,12 @@ def _buildSL2TrainingSet(self, numOnes=10, numRepetitions= 10): return (trainingSequences, testSequences) - def _testSegmentLearningSequence(self, tps, - trainingSequences, - testSequences, - doResets = True): + def _testSegmentLearningSequence(self, tms, + trainingSequences, + testSequences, + doResets = True): - """Train the given TP once on the entire training set. on the Test a single + """Train the given TM once on the entire training set. on the Test a single set of sequences once and check that individual predictions reflect the true relative frequencies. Return a success code. Success code is 1 for pass, 0 for fail.""" @@ -317,21 +320,21 @@ def _testSegmentLearningSequence(self, tps, if testSequences == None: testSequences = trainingSequences - cppTP, pyTP = tps[0], tps[1] + cppTM, pyTM = tms[0], tms[1] - if cppTP is not None: - assert fdrutils.tpDiff2(cppTP, pyTP, g_options.verbosity) == True + if cppTM is not None: + assert fdrutils.tmDiff2(cppTM, pyTM, g_options.verbosity) == True #-------------------------------------------------------------------------- # Learn if g_options.verbosity > 0: print "============= Training =================" - print "TP parameters:" + print "TM parameters:" print "CPP" - if cppTP is not None: - print cppTP.printParameters() + if cppTM is not None: + print cppTM.printParameters() print "\nPY" - print pyTP.printParameters() + print pyTM.printParameters() for sequenceNum, trainingSequence in enumerate(trainingSequences): @@ -339,76 +342,76 @@ def _testSegmentLearningSequence(self, tps, print "============= New sequence =================" if doResets: - if cppTP is not None: - cppTP.reset() - pyTP.reset() + if cppTM is not None: + cppTM.reset() + pyTM.reset() for t, x in enumerate(trainingSequence): if g_options.verbosity > 1: print "Time step", t, "sequence number", sequenceNum - print "Input: ", pyTP.printInput(x) + print "Input: ", pyTM.printInput(x) print "NNZ:", x.nonzero() x = numpy.array(x).astype('float32') - if cppTP is not None: - cppTP.learn(x) - pyTP.learn(x) + if cppTM is not None: + cppTM.learn(x) + pyTM.learn(x) - if cppTP is not None: - assert fdrutils.tpDiff2(cppTP, pyTP, g_options.verbosity, + if cppTM is not None: + assert fdrutils.tmDiff2(cppTM, pyTM, g_options.verbosity, relaxSegmentTests = False) == True if g_options.verbosity > 2: - if cppTP is not None: + if cppTM is not None: print "CPP" - cppTP.printStates(printPrevious = (g_options.verbosity > 4)) + cppTM.printStates(printPrevious = (g_options.verbosity > 4)) print "\nPY" - pyTP.printStates(printPrevious = (g_options.verbosity > 4)) + pyTM.printStates(printPrevious = (g_options.verbosity > 4)) print if g_options.verbosity > 4: print "Sequence finished. Complete state after sequence" - if cppTP is not None: + if cppTM is not None: print "CPP" - cppTP.printCells() + cppTM.printCells() print "\nPY" - pyTP.printCells() + pyTM.printCells() print if g_options.verbosity > 2: print "Calling trim segments" - if cppTP is not None: - nSegsRemovedCPP, nSynsRemovedCPP = cppTP.trimSegments() - nSegsRemoved, nSynsRemoved = pyTP.trimSegments() - if cppTP is not None: + if cppTM is not None: + nSegsRemovedCPP, nSynsRemovedCPP = cppTM.trimSegments() + nSegsRemoved, nSynsRemoved = pyTM.trimSegments() + if cppTM is not None: assert nSegsRemovedCPP == nSegsRemoved assert nSynsRemovedCPP == nSynsRemoved - if cppTP is not None: - assert fdrutils.tpDiff2(cppTP, pyTP, g_options.verbosity) == True + if cppTM is not None: + assert fdrutils.tmDiff2(cppTM, pyTM, g_options.verbosity) == True print "Training completed. Stats:" - info = pyTP.getSegmentInfo() + info = pyTM.getSegmentInfo() print " nSegments:", info[0] print " nSynapses:", info[1] if g_options.verbosity > 3: print "Complete state:" - if cppTP is not None: + if cppTM is not None: print "CPP" - cppTP.printCells() + cppTM.printCells() print "\nPY" - pyTP.printCells() + pyTM.printCells() #--------------------------------------------------------------------------- # Infer if g_options.verbosity > 1: print "============= Inference =================" - if cppTP is not None: - cppTP.collectStats = True - pyTP.collectStats = True + if cppTM is not None: + cppTM.collectStats = True + pyTM.collectStats = True nPredictions = 0 cppNumCorrect, pyNumCorrect = 0, 0 @@ -421,58 +424,58 @@ def _testSegmentLearningSequence(self, tps, slen = len(testSequence) if doResets: - if cppTP is not None: - cppTP.reset() - pyTP.reset() + if cppTM is not None: + cppTM.reset() + pyTM.reset() for t, x in enumerate(testSequence): if g_options.verbosity >= 2: print "Time step", t, '\nInput:' - pyTP.printInput(x) + pyTM.printInput(x) - if cppTP is not None: - cppTP.infer(x) - pyTP.infer(x) + if cppTM is not None: + cppTM.infer(x) + pyTM.infer(x) - if cppTP is not None: - assert fdrutils.tpDiff2(cppTP, pyTP, g_options.verbosity) == True + if cppTM is not None: + assert fdrutils.tmDiff2(cppTM, pyTM, g_options.verbosity) == True if g_options.verbosity > 2: - if cppTP is not None: + if cppTM is not None: print "CPP" - cppTP.printStates(printPrevious = (g_options.verbosity > 4), + cppTM.printStates(printPrevious = (g_options.verbosity > 4), printLearnState = False) print "\nPY" - pyTP.printStates(printPrevious = (g_options.verbosity > 4), + pyTM.printStates(printPrevious = (g_options.verbosity > 4), printLearnState = False) - if cppTP is not None: - cppScores = cppTP.getStats() - pyScores = pyTP.getStats() + if cppTM is not None: + cppScores = cppTM.getStats() + pyScores = pyTM.getStats() if g_options.verbosity >= 2: - if cppTP is not None: + if cppTM is not None: print "CPP" print cppScores print "\nPY" print pyScores - if t < slen-1 and t > pyTP.burnIn: + if t < slen-1 and t > pyTM.burnIn: nPredictions += 1 - if cppTP is not None: + if cppTM is not None: if cppScores['curPredictionScore2'] > 0.3: cppNumCorrect += 1 if pyScores['curPredictionScore2'] > 0.3: pyNumCorrect += 1 # Check that every inference was correct, excluding the very last inference - if cppTP is not None: - cppScores = cppTP.getStats() - pyScores = pyTP.getStats() + if cppTM is not None: + cppScores = cppTM.getStats() + pyScores = pyTM.getStats() passTest = False - if cppTP is not None: + if cppTM is not None: if cppNumCorrect == nPredictions and pyNumCorrect == nPredictions: passTest = True else: @@ -502,10 +505,10 @@ def _testSL1(self, numOnes = 10, numRepetitions = 6, fixedResources = False, numRepetitions) numCols = len(trainingSet[0][0]) - tps = self._createTPs(numCols = numCols, fixedResources=fixedResources, - checkSynapseConsistency = checkSynapseConsistency) + tms = self._createTMs(numCols = numCols, fixedResources=fixedResources, + checkSynapseConsistency = checkSynapseConsistency) - testResult = self._testSegmentLearningSequence(tps, trainingSet, testSet) + testResult = self._testSegmentLearningSequence(tms, trainingSet, testSet) if testResult: print "%s PASS" % testName @@ -529,10 +532,10 @@ def _testSL2(self, numOnes = 10, numRepetitions = 10, fixedResources = False, trainingSet, testSet = self._buildSL2TrainingSet(numOnes, numRepetitions) numCols = len(trainingSet[0][0]) - tps = self._createTPs(numCols = numCols, fixedResources=fixedResources, - checkSynapseConsistency = checkSynapseConsistency) + tms = self._createTMs(numCols = numCols, fixedResources=fixedResources, + checkSynapseConsistency = checkSynapseConsistency) - testResult = self._testSegmentLearningSequence(tps, trainingSet, testSet) + testResult = self._testSegmentLearningSequence(tms, trainingSet, testSet) if testResult: print "%s PASS" % testName @@ -543,7 +546,7 @@ def _testSL2(self, numOnes = 10, numRepetitions = 10, fixedResources = False, -class TPSegmentLearningTests(ExperimentTestBaseClass): +class TMSegmentLearningTests(ExperimentTestBaseClass): """Our high level tests""" diff --git a/tests/integration/nupic/algorithms/tp_test.py b/tests/integration/nupic/algorithms/tm_test.py similarity index 92% rename from tests/integration/nupic/algorithms/tp_test.py rename to tests/integration/nupic/algorithms/tm_test.py index cd5dcb624c..235cd17a84 100755 --- a/tests/integration/nupic/algorithms/tp_test.py +++ b/tests/integration/nupic/algorithms/tm_test.py @@ -20,12 +20,12 @@ # ---------------------------------------------------------------------- """ -This file performs a variety of tests on the reference temporal pooler code. +This file performs a variety of tests on the reference temporal memory code. basic_test ========== -Tests creation and serialization of the TP class. Sets parameters and ensures +Tests creation and serialization of the TM class. Sets parameters and ensures they are the same after a serialization and de-serialization step. Runs learning and inference on a small number of random patterns and ensures it doesn't crash. @@ -65,7 +65,7 @@ constructed so that consecutive patterns within a sequence don't share any columns. -Training: The TP is trained with P passes of the M sequences. There +Training: The TM is trained with P passes of the M sequences. There should be a reset between sequences. The total number of iterations during training is P*N*M. @@ -102,7 +102,7 @@ connectedPerm = 0.7 permanenceInc = 0.2 -Now we train the TP with the B1 sequence 4 times (P=4). This will increment +Now we train the TM with the B1 sequence 4 times (P=4). This will increment the permanences to be above 0.8 and at that point the inference will be correct. This test will ensure the basic match function and segment activation rules are working correctly. @@ -232,14 +232,14 @@ Note: for pooling tests the density of input patterns should be pretty low since each pooling step increases the output density. At the same time, we need -enough bits on in the input for the temporal pooler to find enough synapses. So, +enough bits on in the input for the temporal memory to find enough synapses. So, for the tests, constraints should be something like: (Input Density) * (Number of pooling steps) < 25 %. AND sum(Input) > newSynapseCount*1.5 -Training: The TP is trained with P passes of the M sequences. There +Training: The TM is trained with P passes of the M sequences. There should be a reset between sequences. The total number of iterations during training is P*N*M. @@ -250,14 +250,14 @@ with no extra columns. We report the number of columns that are incorrect and report a failure if more than 2 columns are incorrectly predicted. -P1) Train the TP two times (P=2) on a single long sequence consisting of random +P1) Train the TM two times (P=2) on a single long sequence consisting of random patterns (N=20, M=1). There should be no overlapping columns between successive -patterns. During inference, the TP should be able reliably predict the pattern +patterns. During inference, the TM should be able reliably predict the pattern two time steps in advance. numCols should be about 350 to meet the above constraints and also to maintain consistency with test P2. -P2) Increase TP rate to 3 time steps in advance (P=3). At each step during -inference, the TP should be able to reliably predict the pattern coming up at +P2) Increase TM rate to 3 time steps in advance (P=3). At each step during +inference, the TM should be able to reliably predict the pattern coming up at t+1, t+2, and t+3.. P3) Set segUpdateValidDuration to 2 and set P=3. This should behave almost @@ -283,10 +283,10 @@ HiLo Tests ========== -A high order sequence memory like the TP can memorize very long sequences. In +A high order sequence memory like the TM can memorize very long sequences. In many applications though you don't want to memorize. You see a long sequence of patterns but there are actually lower order repeating sequences embedded within -it. A simplistic example is words in a sentence. Words such as You'd like the TP to learn those sequences. +it. A simplistic example is words in a sentence. Words such as You'd like the TM to learn those sequences. Tests should capture number of synapses learned and compare against theoretically optimal numbers to pass/fail. @@ -298,7 +298,7 @@ shares any columns with the others. These sequences are easy to visualize and is very useful for debugging. -TP parameters should be the same as B7 except that permanenceDec should be 0.05: +TM parameters should be the same as B7 except that permanenceDec should be 0.05: activationThreshold = newSynapseCount minThreshold = activationThreshold @@ -322,7 +322,7 @@ actually happening in the code, but verified by visual inspection only. HL1) Noise + sequence + noise + sequence repeatedly without resets until it has -learned that sequence. Train the TP repeatedly with N random sequences that all +learned that sequence. Train the TM repeatedly with N random sequences that all share a single subsequence. Each random sequence can be 10 patterns long, sharing a subsequence that is 5 patterns long. There should be no resets between presentations. Inference should then be on that 5 long shared subsequence. @@ -334,7 +334,7 @@ R S T D E F U V W X Y Z 1 D E F 2 3 4 5 -TP parameters should be the same as HL0. +TM parameters should be the same as HL0. HL2) Like HL1, but after A B C has learned, try to learn D A B C . It should learn ABC is separate from DABC. @@ -365,7 +365,7 @@ Sequence Likelihood Tests ========================= -These tests are in the file TPLikelihood.py +These tests are in the file TMLikelihood.py Segment Learning Tests [UNIMPLEMENTED] @@ -373,12 +373,12 @@ Multi-attribute sequence tests. -SL1) Train the TP repeatedly using a single (multiple) sequence plus noise. The +SL1) Train the TM repeatedly using a single (multiple) sequence plus noise. The sequence can be relatively short, say 20 patterns. No two consecutive patterns in the sequence should share columns. Add random noise each time a pattern is presented. The noise should be different for each presentation and can be equal to the number of on bits in the pattern. After N iterations of the noisy -sequences, the TP should should achieve perfect inference on the true sequence. +sequences, the TM should should achieve perfect inference on the true sequence. There should be resets between each presentation of the sequence. Check predictions in the sequence only. And test with clean sequences. @@ -412,7 +412,7 @@ Capacity Tests [UNIMPLEMENTED] ============== -These are stress tests that verify that the temporal pooler can learn a large +These are stress tests that verify that the temporal memory can learn a large number of sequences and can predict a large number of possible next steps. Some research needs to be done first to understand the capacity of the system as it relates to the number of columns, cells per column, etc. @@ -424,8 +424,8 @@ Online Learning Tests [UNIMPLEMENTED] ===================== -These tests will verify that the temporal pooler continues to work even if -sequence statistics (and the actual sequences) change slowly over time. The TP +These tests will verify that the temporal memory continues to work even if +sequence statistics (and the actual sequences) change slowly over time. The TM should adapt to the changes and learn to recognize newer sequences (and forget the older sequences?). @@ -441,15 +441,15 @@ import cPickle import pprint -from nupic.research.TP import TP -from nupic.research.TP10X2 import TP10X2 +from nupic.research.BacktrackingTM import BacktrackingTM +from nupic.research.BacktrackingTMCPP import BacktrackingTMCPP from nupic.research import fdrutilities as fdrutils #--------------------------------------------------------------------------------- -TEST_CPP_TP = 1 # temporarily disabled until it can be updated +TEST_CPP_TM = 1 # temporarily disabled until it can be updated VERBOSITY = 0 # how chatty the unit tests should be SEED = 33 # the random seed used throughout -TPClass = TP +TMClass = BacktrackingTM checkSynapseConsistency = False rgen = numpy.random.RandomState(SEED) # always call this rgen, NOT random @@ -751,9 +751,9 @@ def buildHL0bTrainingSet(numOnes=5): # Basic test (creation, pickling, basic run of learning and inference) def basicTest(): - global TPClass, SEED, VERBOSITY, checkSynapseConsistency + global TMClass, SEED, VERBOSITY, checkSynapseConsistency #-------------------------------------------------------------------------------- - # Create TP object + # Create TM object numberOfCols =10 cellsPerColumn =3 initialPerm =.2 @@ -770,7 +770,7 @@ def basicTest(): seed =SEED verbosity =VERBOSITY - tp = TPClass(numberOfCols, cellsPerColumn, + tm = TMClass(numberOfCols, cellsPerColumn, initialPerm, connectedPerm, minThreshold, newSynapseCount, permanenceInc, permanenceDec, permanenceMax, @@ -784,25 +784,25 @@ def basicTest(): #-------------------------------------------------------------------------------- # Save and reload - pickle.dump(tp, open("test_tp.pkl", "wb")) - tp2 = pickle.load(open("test_tp.pkl")) - - assert tp2.numberOfCols == numberOfCols - assert tp2.cellsPerColumn == cellsPerColumn - print tp2.initialPerm - assert tp2.initialPerm == numpy.float32(.2) - assert tp2.connectedPerm == numpy.float32(.8) - assert tp2.minThreshold == minThreshold - assert tp2.newSynapseCount == newSynapseCount - assert tp2.permanenceInc == numpy.float32(.1) - assert tp2.permanenceDec == numpy.float32(.05) - assert tp2.permanenceMax == 1 - assert tp2.globalDecay == numpy.float32(.05) - assert tp2.activationThreshold == activationThreshold - assert tp2.doPooling == doPooling - assert tp2.segUpdateValidDuration == segUpdateValidDuration - assert tp2.seed == SEED - assert tp2.verbosity == verbosity + pickle.dump(tm, open("test_tm.pkl", "wb")) + tm2 = pickle.load(open("test_tm.pkl")) + + assert tm2.numberOfCols == numberOfCols + assert tm2.cellsPerColumn == cellsPerColumn + print tm2.initialPerm + assert tm2.initialPerm == numpy.float32(.2) + assert tm2.connectedPerm == numpy.float32(.8) + assert tm2.minThreshold == minThreshold + assert tm2.newSynapseCount == newSynapseCount + assert tm2.permanenceInc == numpy.float32(.1) + assert tm2.permanenceDec == numpy.float32(.05) + assert tm2.permanenceMax == 1 + assert tm2.globalDecay == numpy.float32(.05) + assert tm2.activationThreshold == activationThreshold + assert tm2.doPooling == doPooling + assert tm2.segUpdateValidDuration == segUpdateValidDuration + assert tm2.seed == SEED + assert tm2.verbosity == verbosity print "Save/load ok" @@ -811,7 +811,7 @@ def basicTest(): for i in xrange(5): xi = rgen.randint(0,2,(numberOfCols)) x = numpy.array(xi, dtype="uint32") - y = tp.learn(x) + y = tm.learn(x) #-------------------------------------------------------------------------------- # Infer @@ -819,20 +819,20 @@ def basicTest(): for i in xrange(10): xi = rgen.randint(0,2,(numberOfCols)) x = numpy.array(xi, dtype="uint32") - y = tp.infer(x) + y = tm.infer(x) if i > 0: - p = tp.checkPrediction2([pattern.nonzero()[0] for pattern in patterns]) + p = tm.checkPrediction2([pattern.nonzero()[0] for pattern in patterns]) print "basicTest ok" #--------------------------------------------------------------------------------- # Figure out acceptable patterns if none were passed to us. -def findAcceptablePatterns(tp, t, whichSequence, trainingSequences, nAcceptable = 1): +def findAcceptablePatterns(tm, t, whichSequence, trainingSequences, nAcceptable = 1): """ Tries to infer the set of acceptable patterns for prediction at the given time step and for the give sequence. Acceptable patterns are: the current one, - plus a certain number of patterns after timeStep, in the sequence that the TP + plus a certain number of patterns after timeStep, in the sequence that the TM is currently tracking. Any other pattern is not acceptable. TODO: @@ -842,7 +842,7 @@ def findAcceptablePatterns(tp, t, whichSequence, trainingSequences, nAcceptable Parameters: ========== - tp the whole TP, so that we can look at its parameters + tm the whole TM, so that we can look at its parameters t the current time step whichSequence the sequence we are currently tracking trainingSequences all the training sequences @@ -850,7 +850,7 @@ def findAcceptablePatterns(tp, t, whichSequence, trainingSequences, nAcceptable we are willing to consider acceptable. In the case of pooling, it is less than or equal to the min of the number of training reps and the segUpdateValidDuration - parameter of the TP, depending on the test case. + parameter of the TM, depending on the test case. The default value is 1, because by default, the pattern after the current one should always be predictable. @@ -863,9 +863,9 @@ def findAcceptablePatterns(tp, t, whichSequence, trainingSequences, nAcceptable # Determine how many steps forward we want to see in the prediction upTo = t + 2 # always predict current and next - # If the TP is pooling, more steps can be predicted - if tp.doPooling: - upTo += min(tp.segUpdateValidDuration, nAcceptable) + # If the TM is pooling, more steps can be predicted + if tm.doPooling: + upTo += min(tm.segUpdateValidDuration, nAcceptable) assert upTo <= len(trainingSequences[whichSequence]) @@ -921,7 +921,7 @@ def _testSequence(trainingSequences, prediction failures, the number of errors, and the number of perfect predictions""" - global TP, SEED, checkSynapseConsistency, VERBOSITY + global BacktrackingTM, SEED, checkSynapseConsistency, VERBOSITY numPerfect = 0 # When every column is correct in the prediction numStrictErrors = 0 # When at least one column is incorrect @@ -933,7 +933,7 @@ def _testSequence(trainingSequences, # override default maxSeqLEngth value for high-order sequences if highOrder: - tp = TPClass(numberOfCols, cellsPerColumn, + tm = TMClass(numberOfCols, cellsPerColumn, initialPerm, connectedPerm, minThreshold, newSynapseCount, permanenceInc, permanenceDec, permanenceMax, @@ -945,7 +945,7 @@ def _testSequence(trainingSequences, maxSeqLength=0 ) else: - tp = TPClass(numberOfCols, cellsPerColumn, + tm = TMClass(numberOfCols, cellsPerColumn, initialPerm, connectedPerm, minThreshold, newSynapseCount, permanenceInc, permanenceDec, permanenceMax, @@ -959,26 +959,26 @@ def _testSequence(trainingSequences, if compareToPy: # override default maxSeqLEngth value for high-order sequences if highOrder: - py_tp = TP(numberOfCols, cellsPerColumn, - initialPerm, connectedPerm, - minThreshold, newSynapseCount, - permanenceInc, permanenceDec, permanenceMax, - globalDecay, activationThreshold, - doPooling, segUpdateValidDuration, - seed=SEED, verbosity=verbosity, - pamLength=pamLength, - maxSeqLength=0 - ) + py_tm = BacktrackingTM(numberOfCols, cellsPerColumn, + initialPerm, connectedPerm, + minThreshold, newSynapseCount, + permanenceInc, permanenceDec, permanenceMax, + globalDecay, activationThreshold, + doPooling, segUpdateValidDuration, + seed=SEED, verbosity=verbosity, + pamLength=pamLength, + maxSeqLength=0 + ) else: - py_tp = TP(numberOfCols, cellsPerColumn, - initialPerm, connectedPerm, - minThreshold, newSynapseCount, - permanenceInc, permanenceDec, permanenceMax, - globalDecay, activationThreshold, - doPooling, segUpdateValidDuration, - seed=SEED, verbosity=verbosity, - pamLength=pamLength, - ) + py_tm = BacktrackingTM(numberOfCols, cellsPerColumn, + initialPerm, connectedPerm, + minThreshold, newSynapseCount, + permanenceInc, permanenceDec, permanenceMax, + globalDecay, activationThreshold, + doPooling, segUpdateValidDuration, + seed=SEED, verbosity=verbosity, + pamLength=pamLength, + ) trainingSequences = trainingSequences[0] if testSequences == None: testSequences = trainingSequences @@ -993,9 +993,9 @@ def _testSequence(trainingSequences, if VERBOSITY > 1: print "============= New sequence =================" if doResets: - tp.reset() + tm.reset() if compareToPy: - py_tp.reset() + py_tm.reset() for t,x in enumerate(trainingSequence): if noiseModel is not None and \ 'xor' in noiseModel and 'binomial' in noiseModel \ @@ -1004,28 +1004,28 @@ def _testSequence(trainingSequences, x = logical_xor(x, noise_vector) if VERBOSITY > 2: print "Time step",t, "learning round",r, "sequence number", sequenceNum - print "Input: ",tp.printInput(x) + print "Input: ",tm.printInput(x) print "NNZ:", x.nonzero() x = numpy.array(x).astype('float32') - y = tp.learn(x) + y = tm.learn(x) if compareToPy: - py_y = py_tp.learn(x) + py_y = py_tm.learn(x) if t % 25 == 0: # To track bugs, do that every iteration, but very slow - assert fdrutils.tpDiff(tp, py_tp, VERBOSITY) == True + assert fdrutils.tmDiff(tm, py_tm, VERBOSITY) == True if VERBOSITY > 3: - tp.printStates(printPrevious = (VERBOSITY > 4)) + tm.printStates(printPrevious = (VERBOSITY > 4)) print if VERBOSITY > 3: print "Sequence finished. Complete state after sequence" - tp.printCells() + tm.printCells() print numPerfectAtHub = 0 if compareToPy: print "End of training" - assert fdrutils.tpDiff(tp, py_tp, VERBOSITY) == True + assert fdrutils.tmDiff(tm, py_tm, VERBOSITY) == True #-------------------------------------------------------------------------------- # Infer @@ -1036,9 +1036,9 @@ def _testSequence(trainingSequences, if VERBOSITY > 1: print "============= New sequence =================" if doResets: - tp.reset() + tm.reset() if compareToPy: - py_tp.reset() + py_tm.reset() slen = len(testSequence) @@ -1051,33 +1051,33 @@ def _testSequence(trainingSequences, noise_vector = rgen.binomial(len(x), noiseLevel, (len(x))) x = logical_xor(x, noise_vector) - if VERBOSITY > 2: print "Time step",t, '\nInput:', tp.printInput(x) + if VERBOSITY > 2: print "Time step",t, '\nInput:', tm.printInput(x) x = numpy.array(x).astype('float32') - y = tp.infer(x) + y = tm.infer(x) if compareToPy: - py_y = py_tp.infer(x) - assert fdrutils.tpDiff(tp, py_tp, VERBOSITY) == True + py_y = py_tm.infer(x) + assert fdrutils.tmDiff(tm, py_tm, VERBOSITY) == True # if t == predJustAfterHubOnly: # z = sum(y, axis = 1) # print '\t\t', # print ''.join('.' if z[i] == 0 else '1' for i in xrange(len(z))) - if VERBOSITY > 3: tp.printStates(printPrevious = (VERBOSITY > 4), + if VERBOSITY > 3: tm.printStates(printPrevious = (VERBOSITY > 4), printLearnState = False); print if nMultiStepPrediction > 0: - y_ms = tp.predict(nSteps=nMultiStepPrediction) + y_ms = tm.predict(nSteps=nMultiStepPrediction) if VERBOSITY > 3: print "Multi step prediction at Time step", t for i in range(nMultiStepPrediction): print "Prediction at t+", i+1 - tp.printColConfidence(y_ms[i]) + tm.printColConfidence(y_ms[i]) # Error Checking for i in range(nMultiStepPrediction): @@ -1129,10 +1129,10 @@ def _testSequence(trainingSequences, # nAcceptable is used to reduce the number of automatically determined # acceptable patterns. if inferAcceptablePatterns: - acceptablePatterns = findAcceptablePatterns(tp, t, s, testSequences, + acceptablePatterns = findAcceptablePatterns(tm, t, s, testSequences, nAcceptable) - scores = tp.checkPrediction2([pattern.nonzero()[0] \ + scores = tm.checkPrediction2([pattern.nonzero()[0] \ for pattern in acceptablePatterns]) falsePositives, falseNegatives = scores[0], scores[1] @@ -1171,7 +1171,7 @@ def _testSequence(trainingSequences, print '\t\t',; printOneTrainingVector(p) print 'Output' diagnostic = '' - output = sum(tp.currentOutput,axis=1) + output = sum(tm.currentOutput,axis=1) print '\t\t',; printOneTrainingVector(output) else: @@ -1181,9 +1181,9 @@ def _testSequence(trainingSequences, numPerfectAtHub += 1 if predJustAfterHubOnly is None: - return numFailures, numStrictErrors, numPerfect, tp + return numFailures, numStrictErrors, numPerfect, tm else: - return numFailures, numStrictErrors, numPerfect, numPerfectAtHub, tp + return numFailures, numStrictErrors, numPerfect, numPerfectAtHub, tm @@ -1206,7 +1206,7 @@ def TestB1(numUniquePatterns, nTests, cellsPerColumn = 1, name = "B1"): numCols = numCols, minOnes = 15, maxOnes = 20) - numFailures, numStrictErrors, numPerfect, tp = \ + numFailures, numStrictErrors, numPerfect, tm = \ _testSequence(trainingSet, nTrainingReps = 1, numberOfCols = numCols, @@ -1251,7 +1251,7 @@ def TestB7(numUniquePatterns, nTests, cellsPerColumn = 1, name = "B7"): numCols = numCols, minOnes = 15, maxOnes = 20) - numFailures, numStrictErrors, numPerfect, tp = \ + numFailures, numStrictErrors, numPerfect, tm = \ _testSequence(trainingSet, nTrainingReps = 4, numberOfCols = numCols, @@ -1303,7 +1303,7 @@ def TestB2(numUniquePatterns, nTests, cellsPerColumn = 1, name = "B2"): minOnes = 15, maxOnes = 20) # Do one pass through the training set - numFailures1, numStrictErrors1, numPerfect1, tp1 = \ + numFailures1, numStrictErrors1, numPerfect1, tm1 = \ _testSequence(trainingSet, nTrainingReps = 1, numberOfCols = numCols, @@ -1319,7 +1319,7 @@ def TestB2(numUniquePatterns, nTests, cellsPerColumn = 1, name = "B2"): activationThreshold = 8) # Do two passes through the training set - numFailures, numStrictErrors, numPerfect, tp2 = \ + numFailures, numStrictErrors, numPerfect, tm2 = \ _testSequence(trainingSet, nTrainingReps = 2, numberOfCols = numCols, @@ -1335,8 +1335,8 @@ def TestB2(numUniquePatterns, nTests, cellsPerColumn = 1, name = "B2"): activationThreshold = 8) # Check that training with a second pass did not result in more synapses - segmentInfo1 = tp1.getSegmentInfo() - segmentInfo2 = tp2.getSegmentInfo() + segmentInfo1 = tm1.getSegmentInfo() + segmentInfo2 = tm2.getSegmentInfo() if (segmentInfo1[0] != segmentInfo2[0]) or \ (segmentInfo1[1] != segmentInfo2[1]) : print "Training twice incorrectly resulted in more segments or synapses" @@ -1375,7 +1375,7 @@ def TestB3(numUniquePatterns, nTests): numCols = numCols, minOnes = 15, maxOnes = 20) - numFailures, numStrictErrors, numPerfect, tp = \ + numFailures, numStrictErrors, numPerfect, tm = \ _testSequence(trainingSet, nTrainingReps = 2, numberOfCols = numCols, @@ -1411,7 +1411,7 @@ def TestH0(numOnes = 5,nMultiStepPrediction=0): trainingSet = buildSimpleTrainingSet(numOnes) - numFailures, numStrictErrors, numPerfect, tp = \ + numFailures, numStrictErrors, numPerfect, tm = \ _testSequence(trainingSet, nTrainingReps = 20, numberOfCols = trainingSet[0][0][0].size, @@ -1465,7 +1465,7 @@ def TestH(sequenceLength, nTests, cellsPerColumn, numCols =100, nSequences =[2], numCols = numCols, minOnes = 21, maxOnes = 25) - numFailures, numStrictErrors, numPerfect, tp = \ + numFailures, numStrictErrors, numPerfect, tm = \ _testSequence(trainingSet, nTrainingReps = nTrainingReps, numberOfCols = numCols, @@ -1510,7 +1510,7 @@ def TestH11(numOnes = 3): trainingSet = buildAlternatingTrainingSet(numOnes= 3) - numFailures, numStrictErrors, numPerfect, tp = \ + numFailures, numStrictErrors, numPerfect, tm = \ _testSequence(trainingSet, nTrainingReps = 1, numberOfCols = trainingSet[0][0][0].size, @@ -1573,7 +1573,7 @@ def TestH2a(sequenceLength, nTests, cellsPerColumn, numCols =100, nSequences =[2 print "============== 10 ======================" - numFailures3, numStrictErrors3, numPerfect3, tp3 = \ + numFailures3, numStrictErrors3, numPerfect3, tm3 = \ _testSequence(trainingSet, nTrainingReps = 10, numberOfCols = numCols, @@ -1592,7 +1592,7 @@ def TestH2a(sequenceLength, nTests, cellsPerColumn, numCols =100, nSequences =[2 print "============== 2 ======================" - numFailures, numStrictErrors, numPerfect, tp2 = \ + numFailures, numStrictErrors, numPerfect, tm2 = \ _testSequence(trainingSet, nTrainingReps = 2, numberOfCols = numCols, @@ -1611,7 +1611,7 @@ def TestH2a(sequenceLength, nTests, cellsPerColumn, numCols =100, nSequences =[2 print "============== 1 ======================" - numFailures1, numStrictErrors1, numPerfect1, tp1 = \ + numFailures1, numStrictErrors1, numPerfect1, tm1 = \ _testSequence(trainingSet, nTrainingReps = 1, numberOfCols = numCols, @@ -1629,16 +1629,16 @@ def TestH2a(sequenceLength, nTests, cellsPerColumn, numCols =100, nSequences =[2 shouldFail = shouldFail) # Check that training with a second pass did not result in more synapses - segmentInfo1 = tp1.getSegmentInfo() - segmentInfo2 = tp2.getSegmentInfo() + segmentInfo1 = tm1.getSegmentInfo() + segmentInfo2 = tm2.getSegmentInfo() if (abs(segmentInfo1[0] - segmentInfo2[0]) > 3) or \ (abs(segmentInfo1[1] - segmentInfo2[1]) > 3*15) : print "Training twice incorrectly resulted in too many segments or synapses" print segmentInfo1 print segmentInfo2 - print tp3.getSegmentInfo() - tp3.trimSegments() - print tp3.getSegmentInfo() + print tm3.getSegmentInfo() + tm3.trimSegments() + print tm3.getSegmentInfo() print "Failures for 1, 2, and N reps" print numFailures1, numStrictErrors1, numPerfect1 @@ -1694,7 +1694,7 @@ def TestP(sequenceLength, nTests, cellsPerColumn, numCols =300, nSequences =[2], numCols = numCols, minOnes = minOnes, maxOnes = maxOnes) - numFailures, numStrictErrors, numPerfect, tp = \ + numFailures, numStrictErrors, numPerfect, tm = \ _testSequence(trainingSet, nTrainingReps = nTrainingReps, numberOfCols = numCols, @@ -1736,7 +1736,7 @@ def TestHL0a(numOnes = 5): trainingSet, testSet = buildHL0aTrainingSet() numCols = trainingSet[0][0].size - numFailures, numStrictErrors, numPerfect, tp = \ + numFailures, numStrictErrors, numPerfect, tm = \ _testSequence([trainingSet], nTrainingReps = 1, numberOfCols = numCols, @@ -1754,8 +1754,8 @@ def TestHL0a(numOnes = 5): doPooling = False, testSequences = testSet) - tp.trimSegments() - retAfter = tp.getSegmentInfo() + tm.trimSegments() + retAfter = tm.getSegmentInfo() print retAfter[0], retAfter[1] if retAfter[0] > 20: print "Too many segments" @@ -1788,7 +1788,7 @@ def TestHL0b(numOnes = 5): numCols = trainingSet[0][0].size print "numCols=", numCols - numFailures, numStrictErrors, numPerfect, tp = \ + numFailures, numStrictErrors, numPerfect, tm = \ _testSequence([trainingSet], nTrainingReps = 1, numberOfCols = numCols, @@ -1805,9 +1805,9 @@ def TestHL0b(numOnes = 5): doPooling = False, testSequences = testSet) - tp.trimSegments() - retAfter = tp.getSegmentInfo() - tp.printCells() + tm.trimSegments() + retAfter = tm.getSegmentInfo() + tm.printCells() if numFailures == 0: print "Test HL0 ok" @@ -1856,7 +1856,7 @@ def TestHL(sequenceLength, nTests, cellsPerColumn, numCols =200, nSequences =[2] numCols = numCols, minOnes = minOnes, maxOnes = maxOnes) - numFailures, numStrictErrors, numPerfect, tp = \ + numFailures, numStrictErrors, numPerfect, tm = \ _testSequence(trainingSet, nTrainingReps = nTrainingReps, numberOfCols = numCols, @@ -1908,7 +1908,7 @@ def worker(x): numCols = numCols, minOnes = 21, maxOnes = 25) - numFailures1, numStrictErrors1, numPerfect1, atHub, tp = \ + numFailures1, numStrictErrors1, numPerfect1, atHub, tm = \ _testSequence(trainingSet, nTrainingReps = nTrainingReps, numberOfCols = numCols, @@ -1934,7 +1934,7 @@ def worker(x): numCols = numCols, minOnes = 21, maxOnes = 25) - numFailures2, numStrictErrors2, numPerfect2, tp = \ + numFailures2, numStrictErrors2, numPerfect2, tm = \ _testSequence(trainingSet, nTrainingReps = nTrainingReps, numberOfCols = numCols, @@ -2257,9 +2257,9 @@ def runTests(testLength = "short"): if __name__=="__main__": - if not TEST_CPP_TP: + if not TEST_CPP_TM: print "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!" - print "!! WARNING: C++ TP testing is DISABLED until it can be updated." + print "!! WARNING: C++ TM testing is DISABLED until it can be updated." print "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!" # Three different test lengths are passed in through the command line. @@ -2281,7 +2281,7 @@ def runTests(testLength = "short"): if 'verbosity' in arg: VERBOSITY = int(sys.argv[i+1]) if 'help' in arg: - print "TPTest.py --short|long --seed number|'rand' --verbosity number" + print "TMTest.py --short|long --seed number|'rand' --verbosity number" sys.exit() if "short" in arg: testLength = "short" @@ -2302,13 +2302,13 @@ def runTests(testLength = "short"): numUniquePatterns = 100 nTests = 3 - print "TP tests", testLength, "numUniquePatterns=", numUniquePatterns, "nTests=", nTests, + print "TM tests", testLength, "numUniquePatterns=", numUniquePatterns, "nTests=", nTests, print "seed=", SEED print if testLength == "long": - print 'Testing Python TP' - TPClass = TP + print 'Testing Python TM' + TMClass = BacktrackingTM runTests(testLength) if testLength != 'long': @@ -2318,7 +2318,7 @@ def runTests(testLength = "short"): # Temporarily turned off so we can investigate checkSynapseConsistency = False - if TEST_CPP_TP: - print 'Testing C++ TP' - TPClass = TP10X2 + if TEST_CPP_TM: + print 'Testing C++ TM' + TMClass = BacktrackingTMCPP runTests(testLength) diff --git a/tests/integration/nupic/engine/network_creation_common.py b/tests/integration/nupic/engine/network_creation_common.py index b8ae1a2ddd..ee8d9cb20f 100755 --- a/tests/integration/nupic/engine/network_creation_common.py +++ b/tests/integration/nupic/engine/network_creation_common.py @@ -31,7 +31,7 @@ from nupic.encoders import MultiEncoder, ScalarEncoder, DateEncoder from nupic.regions.RecordSensor import RecordSensor from nupic.regions.SPRegion import SPRegion -from nupic.regions.TPRegion import TPRegion +from nupic.regions.TMRegion import TMRegion try: import capnp @@ -65,8 +65,8 @@ "boostStrength": 0.0, } -# Config field for TPRegion -TP_PARAMS = { +# Config field for TMRegion +TM_PARAMS = { "verbosity": _VERBOSITY, "columnCount": 2048, "cellsPerColumn": 32, @@ -109,7 +109,7 @@ def createNetwork(dataSource, enableTP=False, temporalImp="py"): The network has a sensor region reading data from `dataSource` and passing the encoded representation to an SPRegion. The SPRegion output is passed to - a TPRegion. + a TMRegion. :param dataSource: a RecordStream instance to get data from :returns: a Network instance ready to run @@ -141,10 +141,10 @@ def createNetwork(dataSource, enableTP=False, temporalImp="py"): srcOutput="temporalTopDownOut", destInput="temporalTopDownIn") if enableTP: - # Add the TPRegion on top of the SPRegion - TP_PARAMS["temporalImp"] = temporalImp - network.addRegion("temporalPoolerRegion", "py.TPRegion", - json.dumps(TP_PARAMS)) + # Add the TMRegion on top of the SPRegion + TM_PARAMS["temporalImp"] = temporalImp + network.addRegion("temporalPoolerRegion", "py.TMRegion", + json.dumps(TM_PARAMS)) network.link("spatialPoolerRegion", "temporalPoolerRegion", "UniformLink", "") network.link("temporalPoolerRegion", "spatialPoolerRegion", "UniformLink", "", diff --git a/tests/integration/nupic/engine/temporal_memory_compatibility_test.py b/tests/integration/nupic/engine/temporal_memory_compatibility_test.py index a0406873e6..43f7cd3bf2 100755 --- a/tests/integration/nupic/engine/temporal_memory_compatibility_test.py +++ b/tests/integration/nupic/engine/temporal_memory_compatibility_test.py @@ -23,7 +23,7 @@ import unittest import numpy -from nupic.regions.TPRegion import TPRegion +from nupic.regions.TMRegion import TMRegion from network_creation_common import createAndRunNetwork @@ -35,12 +35,12 @@ def testTMPyCpp(self): """ Test compatibility between C++ and Python TM implementation. """ - results1 = createAndRunNetwork(TPRegion, + results1 = createAndRunNetwork(TMRegion, "bottomUpOut", checkpointMidway=False, temporalImp="tm_cpp") - results2 = createAndRunNetwork(TPRegion, + results2 = createAndRunNetwork(TMRegion, "bottomUpOut", checkpointMidway=False, temporalImp="tm_py") diff --git a/tests/integration/nupic/opf/expgenerator_test.py b/tests/integration/nupic/opf/expgenerator_test.py index df20f82aa0..175b5acf86 100755 --- a/tests/integration/nupic/opf/expgenerator_test.py +++ b/tests/integration/nupic/opf/expgenerator_test.py @@ -1059,7 +1059,7 @@ def test_MultiStep(self): self.assertEqual(perms.inputPredictedField, "auto") - # Should have TP parameters being permuted + # Should have TM parameters being permuted self.assertIn('activationThreshold', perms.permutations['modelParams']['tmParams']) self.assertIn('minThreshold', perms.permutations['modelParams']['tmParams']) @@ -1091,7 +1091,7 @@ def test_MultiStep(self): # -------------------------------------- - # If we specify NonTemporal, we shouldn't permute over TP parameters + # If we specify NonTemporal, we shouldn't permute over TM parameters expDesc2 = copy.deepcopy(expDesc) expDesc2['inferenceType'] = 'NontemporalMultiStep' (base, perms) = self.getModules(expDesc2) @@ -1795,7 +1795,7 @@ def test_NontemporalClassification(self): base.config['modelParams']['sensorParams']['encoders'].keys()) - # The SP and TP should both be disabled + # The SP and TM should both be disabled self.assertFalse(base.config['modelParams']['spEnable']) self.assertFalse(base.config['modelParams']['tmEnable']) @@ -1806,7 +1806,7 @@ def test_NontemporalClassification(self): + "steps=\\[0\\]:window=1000:field=consumption") self.assertIn('alpha', perms.permutations['modelParams']['clParams']) - # Should have no SP or TP params to permute over + # Should have no SP or TM params to permute over self.assertEqual(perms.permutations['modelParams']['tmParams'], {}) self.assertEqual(perms.permutations['modelParams']['spParams'], {}) diff --git a/tests/integration/nupic/opf/opf_checkpoint_test/experiments/backwards_compatibility/a/savedmodels_2012-10-05/DefaultTask.nta/modelextradata/TemporalAnomaly-network.nta/network.yaml b/tests/integration/nupic/opf/opf_checkpoint_test/experiments/backwards_compatibility/a/savedmodels_2012-10-05/DefaultTask.nta/modelextradata/TemporalAnomaly-network.nta/network.yaml index 64d29f5a25..f11d99b889 100644 --- a/tests/integration/nupic/opf/opf_checkpoint_test/experiments/backwards_compatibility/a/savedmodels_2012-10-05/DefaultTask.nta/modelextradata/TemporalAnomaly-network.nta/network.yaml +++ b/tests/integration/nupic/opf/opf_checkpoint_test/experiments/backwards_compatibility/a/savedmodels_2012-10-05/DefaultTask.nta/modelextradata/TemporalAnomaly-network.nta/network.yaml @@ -14,8 +14,8 @@ Regions: phases: - 1 label: R1 - - name: TP - nodeType: py.TPRegion + - name: TM + nodeType: py.TMRegion dimensions: - 1 phases: @@ -62,7 +62,7 @@ Links: destInput: resetIn - type: UniformLink params: "" - srcRegion: TP + srcRegion: TM srcOutput: topDownOut destRegion: SP destInput: topDownIn @@ -70,17 +70,17 @@ Links: params: "" srcRegion: SP srcOutput: bottomUpOut - destRegion: TP + destRegion: TM destInput: bottomUpIn - type: UniformLink params: "" srcRegion: sensor srcOutput: resetOut - destRegion: TP + destRegion: TM destInput: resetIn - type: UniformLink params: "" - srcRegion: TP + srcRegion: TM srcOutput: bottomUpOut destRegion: Classifier destInput: bottomUpIn @@ -98,13 +98,13 @@ Links: destInput: spBottomUpOut - type: UniformLink params: "" - srcRegion: TP + srcRegion: TM srcOutput: lrnActiveStateT destRegion: AnomalyClassifier destInput: tpLrnActiveStateT - type: UniformLink params: "" - srcRegion: TP + srcRegion: TM srcOutput: topDownOut destRegion: AnomalyClassifier destInput: tpTopDownOut \ No newline at end of file diff --git a/tests/integration/nupic/opf/opf_checkpoint_test/experiments/backwards_compatibility/base.py b/tests/integration/nupic/opf/opf_checkpoint_test/experiments/backwards_compatibility/base.py index 345986fcd0..b476366e7c 100644 --- a/tests/integration/nupic/opf/opf_checkpoint_test/experiments/backwards_compatibility/base.py +++ b/tests/integration/nupic/opf/opf_checkpoint_test/experiments/backwards_compatibility/base.py @@ -175,7 +175,7 @@ 'globalInhibition': 1, # Number of cell columns in the cortical region (same number for - # SP and TP) + # SP and TM) # (see also tpNCellsPerCol) 'columnCount': 2048, @@ -201,7 +201,7 @@ # level before inhibition falls below minDutyCycleBeforeInh # will have their own internal synPermConnectedCell # threshold set below this default value. - # (This concept applies to both SP and TP and so 'cells' + # (This concept applies to both SP and TM and so 'cells' # is correct here as opposed to 'columns') 'synPermConnected': 0.1, @@ -210,20 +210,20 @@ 'synPermInactiveDec': 0.01, }, - # Controls whether TP is enabled or disabled; - # TP is necessary for making temporal predictions, such as predicting - # the next inputs. Without TP, the model is only capable of + # Controls whether TM is enabled or disabled; + # TM is necessary for making temporal predictions, such as predicting + # the next inputs. Without TM, the model is only capable of # reconstructing missing sensor inputs (via SP). 'tmEnable' : True, 'tmParams': { - # TP diagnostic output verbosity control; + # TM diagnostic output verbosity control; # 0: silent; [1..6]: increasing levels of verbosity - # (see verbosity in nupic/trunk/py/nupic/research/TP.py and TP10X*.py) + # (see verbosity in nupic/trunk/py/nupic/research/BacktrackingTM.py and BacktrackingTMCPP.py) 'verbosity': 0, # Number of cell columns in the cortical region (same number for - # SP and TP) + # SP and TM) # (see also tpNCellsPerCol) 'columnCount': 2048, @@ -248,7 +248,7 @@ # > 0 for fixed-size CLA # -1 for non-fixed-size CLA # - # TODO: for Ron: once the appropriate value is placed in TP + # TODO: for Ron: once the appropriate value is placed in TM # constructor, see if we should eliminate this parameter from # description.py. 'maxSynapsesPerSegment': 32, @@ -257,7 +257,7 @@ # > 0 for fixed-size CLA # -1 for non-fixed-size CLA # - # TODO: for Ron: once the appropriate value is placed in TP + # TODO: for Ron: once the appropriate value is placed in TM # constructor, see if we should eliminate this parameter from # description.py. 'maxSegmentsPerCell': 128, @@ -293,7 +293,7 @@ 'outputType': 'normal', - # "Pay Attention Mode" length. This tells the TP how many new + # "Pay Attention Mode" length. This tells the TM how many new # elements to append to the end of a learned sequence at a time. # Smaller values are better for datasets with short sequences, # higher values are better for datasets with long sequences. diff --git a/tests/integration/nupic/opf/opf_checkpoint_test/experiments/non_temporal_multi_step/base.py b/tests/integration/nupic/opf/opf_checkpoint_test/experiments/non_temporal_multi_step/base.py index 860459f855..1a9699cd1c 100644 --- a/tests/integration/nupic/opf/opf_checkpoint_test/experiments/non_temporal_multi_step/base.py +++ b/tests/integration/nupic/opf/opf_checkpoint_test/experiments/non_temporal_multi_step/base.py @@ -175,7 +175,7 @@ 'globalInhibition': 1, # Number of cell columns in the cortical region (same number for - # SP and TP) + # SP and TM) # (see also tpNCellsPerCol) 'columnCount': 2048, @@ -201,7 +201,7 @@ # level before inhibition falls below minDutyCycleBeforeInh # will have their own internal synPermConnectedCell # threshold set below this default value. - # (This concept applies to both SP and TP and so 'cells' + # (This concept applies to both SP and TM and so 'cells' # is correct here as opposed to 'columns') 'synPermConnected': 0.1, @@ -210,20 +210,20 @@ 'synPermInactiveDec': 0.01, }, - # Controls whether TP is enabled or disabled; - # TP is necessary for making temporal predictions, such as predicting - # the next inputs. Without TP, the model is only capable of + # Controls whether TM is enabled or disabled; + # TM is necessary for making temporal predictions, such as predicting + # the next inputs. Without TM, the model is only capable of # reconstructing missing sensor inputs (via SP). 'tmEnable' : True, 'tmParams': { - # TP diagnostic output verbosity control; + # TM diagnostic output verbosity control; # 0: silent; [1..6]: increasing levels of verbosity - # (see verbosity in nupic/trunk/py/nupic/research/TP.py and TP10X*.py) + # (see verbosity in nupic/trunk/py/nupic/research/BacktrackingTM.py and BacktrackingTMCPP.py) 'verbosity': 0, # Number of cell columns in the cortical region (same number for - # SP and TP) + # SP and TM) # (see also tpNCellsPerCol) 'columnCount': 2048, @@ -248,7 +248,7 @@ # > 0 for fixed-size CLA # -1 for non-fixed-size CLA # - # TODO: for Ron: once the appropriate value is placed in TP + # TODO: for Ron: once the appropriate value is placed in TM # constructor, see if we should eliminate this parameter from # description.py. 'maxSynapsesPerSegment': 32, @@ -257,7 +257,7 @@ # > 0 for fixed-size CLA # -1 for non-fixed-size CLA # - # TODO: for Ron: once the appropriate value is placed in TP + # TODO: for Ron: once the appropriate value is placed in TM # constructor, see if we should eliminate this parameter from # description.py. 'maxSegmentsPerCell': 128, @@ -293,7 +293,7 @@ 'outputType': 'normal', - # "Pay Attention Mode" length. This tells the TP how many new + # "Pay Attention Mode" length. This tells the TM how many new # elements to append to the end of a learned sequence at a time. # Smaller values are better for datasets with short sequences, # higher values are better for datasets with long sequences. diff --git a/tests/integration/nupic/opf/opf_checkpoint_test/experiments/temporal_anomaly/base.py b/tests/integration/nupic/opf/opf_checkpoint_test/experiments/temporal_anomaly/base.py index 510618272d..4838789c40 100644 --- a/tests/integration/nupic/opf/opf_checkpoint_test/experiments/temporal_anomaly/base.py +++ b/tests/integration/nupic/opf/opf_checkpoint_test/experiments/temporal_anomaly/base.py @@ -175,7 +175,7 @@ 'globalInhibition': 1, # Number of cell columns in the cortical region (same number for - # SP and TP) + # SP and TM) # (see also tpNCellsPerCol) 'columnCount': 2048, @@ -201,7 +201,7 @@ # level before inhibition falls below minDutyCycleBeforeInh # will have their own internal synPermConnectedCell # threshold set below this default value. - # (This concept applies to both SP and TP and so 'cells' + # (This concept applies to both SP and TM and so 'cells' # is correct here as opposed to 'columns') 'synPermConnected': 0.1, @@ -210,20 +210,20 @@ 'synPermInactiveDec': 0.01, }, - # Controls whether TP is enabled or disabled; - # TP is necessary for making temporal predictions, such as predicting - # the next inputs. Without TP, the model is only capable of + # Controls whether TM is enabled or disabled; + # TM is necessary for making temporal predictions, such as predicting + # the next inputs. Without TM, the model is only capable of # reconstructing missing sensor inputs (via SP). 'tmEnable' : True, 'tmParams': { - # TP diagnostic output verbosity control; + # TM diagnostic output verbosity control; # 0: silent; [1..6]: increasing levels of verbosity - # (see verbosity in nupic/trunk/py/nupic/research/TP.py and TP10X*.py) + # (see verbosity in nupic/trunk/py/nupic/research/BacktrackingTM.py and BacktrackingTMCPP.py) 'verbosity': 0, # Number of cell columns in the cortical region (same number for - # SP and TP) + # SP and TM) # (see also tpNCellsPerCol) 'columnCount': 2048, @@ -248,7 +248,7 @@ # > 0 for fixed-size CLA # -1 for non-fixed-size CLA # - # TODO: for Ron: once the appropriate value is placed in TP + # TODO: for Ron: once the appropriate value is placed in TM # constructor, see if we should eliminate this parameter from # description.py. 'maxSynapsesPerSegment': 32, @@ -257,7 +257,7 @@ # > 0 for fixed-size CLA # -1 for non-fixed-size CLA # - # TODO: for Ron: once the appropriate value is placed in TP + # TODO: for Ron: once the appropriate value is placed in TM # constructor, see if we should eliminate this parameter from # description.py. 'maxSegmentsPerCell': 128, @@ -293,7 +293,7 @@ 'outputType': 'normal', - # "Pay Attention Mode" length. This tells the TP how many new + # "Pay Attention Mode" length. This tells the TM how many new # elements to append to the end of a learned sequence at a time. # Smaller values are better for datasets with short sequences, # higher values are better for datasets with long sequences. diff --git a/tests/integration/nupic/opf/opf_checkpoint_test/experiments/temporal_multi_step/base.py b/tests/integration/nupic/opf/opf_checkpoint_test/experiments/temporal_multi_step/base.py index 345986fcd0..b476366e7c 100644 --- a/tests/integration/nupic/opf/opf_checkpoint_test/experiments/temporal_multi_step/base.py +++ b/tests/integration/nupic/opf/opf_checkpoint_test/experiments/temporal_multi_step/base.py @@ -175,7 +175,7 @@ 'globalInhibition': 1, # Number of cell columns in the cortical region (same number for - # SP and TP) + # SP and TM) # (see also tpNCellsPerCol) 'columnCount': 2048, @@ -201,7 +201,7 @@ # level before inhibition falls below minDutyCycleBeforeInh # will have their own internal synPermConnectedCell # threshold set below this default value. - # (This concept applies to both SP and TP and so 'cells' + # (This concept applies to both SP and TM and so 'cells' # is correct here as opposed to 'columns') 'synPermConnected': 0.1, @@ -210,20 +210,20 @@ 'synPermInactiveDec': 0.01, }, - # Controls whether TP is enabled or disabled; - # TP is necessary for making temporal predictions, such as predicting - # the next inputs. Without TP, the model is only capable of + # Controls whether TM is enabled or disabled; + # TM is necessary for making temporal predictions, such as predicting + # the next inputs. Without TM, the model is only capable of # reconstructing missing sensor inputs (via SP). 'tmEnable' : True, 'tmParams': { - # TP diagnostic output verbosity control; + # TM diagnostic output verbosity control; # 0: silent; [1..6]: increasing levels of verbosity - # (see verbosity in nupic/trunk/py/nupic/research/TP.py and TP10X*.py) + # (see verbosity in nupic/trunk/py/nupic/research/BacktrackingTM.py and BacktrackingTMCPP.py) 'verbosity': 0, # Number of cell columns in the cortical region (same number for - # SP and TP) + # SP and TM) # (see also tpNCellsPerCol) 'columnCount': 2048, @@ -248,7 +248,7 @@ # > 0 for fixed-size CLA # -1 for non-fixed-size CLA # - # TODO: for Ron: once the appropriate value is placed in TP + # TODO: for Ron: once the appropriate value is placed in TM # constructor, see if we should eliminate this parameter from # description.py. 'maxSynapsesPerSegment': 32, @@ -257,7 +257,7 @@ # > 0 for fixed-size CLA # -1 for non-fixed-size CLA # - # TODO: for Ron: once the appropriate value is placed in TP + # TODO: for Ron: once the appropriate value is placed in TM # constructor, see if we should eliminate this parameter from # description.py. 'maxSegmentsPerCell': 128, @@ -293,7 +293,7 @@ 'outputType': 'normal', - # "Pay Attention Mode" length. This tells the TP how many new + # "Pay Attention Mode" length. This tells the TM how many new # elements to append to the end of a learned sequence at a time. # Smaller values are better for datasets with short sequences, # higher values are better for datasets with long sequences. diff --git a/tests/integration/nupic/opf/opf_description_template_test/experiments/gym/base.py b/tests/integration/nupic/opf/opf_description_template_test/experiments/gym/base.py index 059fba27d5..8145e76d18 100644 --- a/tests/integration/nupic/opf/opf_description_template_test/experiments/gym/base.py +++ b/tests/integration/nupic/opf/opf_description_template_test/experiments/gym/base.py @@ -150,7 +150,7 @@ 'globalInhibition': 1, # Number of cell columns in the cortical region (same number for - # SP and TP) + # SP and TM) # (see also tpNCellsPerCol) 'columnCount': 2048, @@ -176,7 +176,7 @@ # level before inhibition falls below minDutyCycleBeforeInh # will have their own internal synPermConnectedCell # threshold set below this default value. - # (This concept applies to both SP and TP and so 'cells' + # (This concept applies to both SP and TM and so 'cells' # is correct here as opposed to 'columns') 'synPermConnected': 0.1, @@ -185,20 +185,20 @@ 'synPermInactiveDec': 0.01, }, - # Controls whether TP is enabled or disabled; - # TP is necessary for making temporal predictions, such as predicting - # the next inputs. Without TP, the model is only capable of + # Controls whether TM is enabled or disabled; + # TM is necessary for making temporal predictions, such as predicting + # the next inputs. Without TM, the model is only capable of # reconstructing missing sensor inputs (via SP). 'tmEnable' : True, 'tmParams': { - # TP diagnostic output verbosity control; + # TM diagnostic output verbosity control; # 0: silent; [1..6]: increasing levels of verbosity - # (see verbosity in nupic/trunk/py/nupic/research/TP.py and TP10X*.py) + # (see verbosity in nupic/trunk/py/nupic/research/BacktrackingTM.py and BacktrackingTMCPP.py) 'verbosity': 0, # Number of cell columns in the cortical region (same number for - # SP and TP) + # SP and TM) # (see also tpNCellsPerCol) 'columnCount': 2048, @@ -223,7 +223,7 @@ # > 0 for fixed-size CLA # -1 for non-fixed-size CLA # - # TODO: for Ron: once the appropriate value is placed in TP + # TODO: for Ron: once the appropriate value is placed in TM # constructor, see if we should eliminate this parameter from # description.py. 'maxSynapsesPerSegment': 32, @@ -232,7 +232,7 @@ # > 0 for fixed-size CLA # -1 for non-fixed-size CLA # - # TODO: for Ron: once the appropriate value is placed in TP + # TODO: for Ron: once the appropriate value is placed in TM # constructor, see if we should eliminate this parameter from # description.py. 'maxSegmentsPerCell': 128, @@ -268,7 +268,7 @@ 'outputType': 'normal', - # "Pay Attention Mode" length. This tells the TP how many new + # "Pay Attention Mode" length. This tells the TM how many new # elements to append to the end of a learned sequence at a time. # Smaller values are better for datasets with short sequences, # higher values are better for datasets with long sequences. diff --git a/tests/integration/nupic/opf/opf_description_template_test/opf_description_template_test.py b/tests/integration/nupic/opf/opf_description_template_test/opf_description_template_test.py index f17ec5c666..6b3d4b5f66 100755 --- a/tests/integration/nupic/opf/opf_description_template_test/opf_description_template_test.py +++ b/tests/integration/nupic/opf/opf_description_template_test/opf_description_template_test.py @@ -224,7 +224,7 @@ def test_sub_experiment_override(self): expectedValue = 12 self.assertEqual(tpActivationThreshold, expectedValue, - "Expected tp activationThreshold=%s, but got %s" % ( + "Expected tm activationThreshold=%s, but got %s" % ( expectedValue, tpActivationThreshold)) diff --git a/tests/integration/nupic/opf/opf_experiment_results_test.py b/tests/integration/nupic/opf/opf_experiment_results_test.py index b9e265d75f..d89ad318d6 100755 --- a/tests/integration/nupic/opf/opf_experiment_results_test.py +++ b/tests/integration/nupic/opf/opf_experiment_results_test.py @@ -205,7 +205,7 @@ def testExperimentResults(self): } }, - { 'experimentDir': 'experiments/classification/category_TP_0', + { 'experimentDir': 'experiments/classification/category_TM_0', 'results': { ('OnlineLearning.TemporalClassification.predictionLog.csv', 'classification:avg_err:window=200'): (0.0, 0.045), @@ -215,7 +215,7 @@ def testExperimentResults(self): } }, - { 'experimentDir': 'experiments/classification/category_TP_1', + { 'experimentDir': 'experiments/classification/category_TM_1', 'results': { ('OnlineLearning.TemporalClassification.predictionLog.csv', 'classification:avg_err:window=200'): (0.0, 0.005), diff --git a/tests/integration/nupic/opf/opf_region_test.py b/tests/integration/nupic/opf/opf_region_test.py index 3bdd60789d..dc2248cd9d 100755 --- a/tests/integration/nupic/opf/opf_region_test.py +++ b/tests/integration/nupic/opf/opf_region_test.py @@ -20,7 +20,7 @@ # ---------------------------------------------------------------------- """ -This test ensures that SPRegion and TPRegion are working as expected. It runs a +This test ensures that SPRegion and TMRegion are working as expected. It runs a number of tests: 1: testSaveAndReload -- tests that a saved and reloaded network behaves the same @@ -36,7 +36,7 @@ Test N: test that all the parameters of an SP region work properly -Test N: test that all the parameters of a TP region work properly +Test N: test that all the parameters of a TM region work properly """ @@ -55,9 +55,9 @@ from nupic.support.unittesthelpers.testcasebase import TestCaseBase from nupic.bindings.algorithms import SpatialPooler -from nupic.research.TP10X2 import TP10X2 +from nupic.research.BacktrackingTMCPP import BacktrackingTMCPP from nupic.regions.SPRegion import SPRegion -from nupic.regions.TPRegion import TPRegion +from nupic.regions.TMRegion import TMRegion _VERBOSITY = 0 # how chatty the unit tests should be _SEED = 35 # the random seed used throughout @@ -85,7 +85,7 @@ def _initConfigDicts(): ) # ============================================================================ - # Config field for TPRegion + # Config field for TMRegion global g_tpRegionConfig # pylint: disable=W0603 g_tpRegionConfig = dict( verbosity = _VERBOSITY, @@ -139,7 +139,7 @@ def _createEncoder(): def _createOPFNetwork(addSP = True, addTP = False): """Create a 'new-style' network ala OPF and return it. If addSP is true, an SPRegion will be added named 'level1SP'. - If addTP is true, a TPRegion will be added named 'level1TP' + If addTP is true, a TMRegion will be added named 'level1TP' """ # ========================================================================== @@ -176,11 +176,11 @@ def _createOPFNetwork(addSP = True, addTP = False): # ========================================================================== if addTP and addSP: - # Add the TP on top of SP if requested - # The input width of the TP is set to the column count of the SP - print "Adding TPRegion on top of SP" + # Add the TM on top of SP if requested + # The input width of the TM is set to the column count of the SP + print "Adding TMRegion on top of SP" g_tpRegionConfig['inputWidth'] = g_spRegionConfig['columnCount'] - n.addRegion("level1TP", "py.TPRegion", json.dumps(g_tpRegionConfig)) + n.addRegion("level1TP", "py.TMRegion", json.dumps(g_tpRegionConfig)) n.link("level1SP", "level1TP", "UniformLink", "") n.link("level1TP", "level1SP", "UniformLink", "", srcOutput="topDownOut", destInput="topDownIn") @@ -188,11 +188,11 @@ def _createOPFNetwork(addSP = True, addTP = False): srcOutput="resetOut", destInput="resetIn") elif addTP: - # Add a lone TPRegion if requested - # The input width of the TP is set to the encoder width - print "Adding TPRegion" + # Add a lone TMRegion if requested + # The input width of the TM is set to the encoder width + print "Adding TMRegion" g_tpRegionConfig['inputWidth'] = encoder.getWidth() - n.addRegion("level1TP", "py.TPRegion", json.dumps(g_tpRegionConfig)) + n.addRegion("level1TP", "py.TMRegion", json.dumps(g_tpRegionConfig)) n.link("sensor", "level1TP", "UniformLink", "") n.link("sensor", "level1TP", "UniformLink", "", @@ -269,9 +269,9 @@ def testMaxEnabledPhase(self): level1SP.setParameter('learningMode', 1) level1SP.setParameter('inferenceMode', 0) - tp = netOPF.regions['level1TP'] - tp.setParameter('learningMode', 0) - tp.setParameter('inferenceMode', 0) + tm = netOPF.regions['level1TP'] + tm.setParameter('learningMode', 0) + tm.setParameter('inferenceMode', 0) print "maxPhase,maxEnabledPhase = ", netOPF.maxPhase, \ netOPF.getMaxEnabledPhase() @@ -327,7 +327,7 @@ def testGetAlgorithmOnRegions(self): network.run(1) spRegions = network.getRegionsByType(SPRegion) - tpRegions = network.getRegionsByType(TPRegion) + tpRegions = network.getRegionsByType(TMRegion) self.assertEqual(len(spRegions), 1) self.assertEqual(len(tpRegions), 1) @@ -336,10 +336,10 @@ def testGetAlgorithmOnRegions(self): tpRegion = tpRegions[0] sp = spRegion.getSelf().getAlgorithmInstance() - tp = tpRegion.getSelf().getAlgorithmInstance() + tm = tpRegion.getSelf().getAlgorithmInstance() self.assertEqual(type(sp), SpatialPooler) - self.assertEqual(type(tp), TP10X2) + self.assertEqual(type(tm), BacktrackingTMCPP) diff --git a/tests/swarming/nupic/swarming/experiments/delta/description.py b/tests/swarming/nupic/swarming/experiments/delta/description.py index 86c38d6e05..233a550277 100644 --- a/tests/swarming/nupic/swarming/experiments/delta/description.py +++ b/tests/swarming/nupic/swarming/experiments/delta/description.py @@ -166,7 +166,7 @@ 'globalInhibition': 1, # Number of cell columns in the cortical region (same number for - # SP and TP) + # SP and TM) # (see also tpNCellsPerCol) 'columnCount': 2048, @@ -192,7 +192,7 @@ # level before inhibition falls below minDutyCycleBeforeInh # will have their own internal synPermConnectedCell # threshold set below this default value. - # (This concept applies to both SP and TP and so 'cells' + # (This concept applies to both SP and TM and so 'cells' # is correct here as opposed to 'columns') 'synPermConnected': 0.1, @@ -201,20 +201,20 @@ 'synPermInactiveDec': 0.01, }, - # Controls whether TP is enabled or disabled; - # TP is necessary for making temporal predictions, such as predicting - # the next inputs. Without TP, the model is only capable of + # Controls whether TM is enabled or disabled; + # TM is necessary for making temporal predictions, such as predicting + # the next inputs. Without TM, the model is only capable of # reconstructing missing sensor inputs (via SP). 'tmEnable' : True, 'tmParams': { - # TP diagnostic output verbosity control; + # TM diagnostic output verbosity control; # 0: silent; [1..6]: increasing levels of verbosity - # (see verbosity in nupic/trunk/py/nupic/research/TP.py and TP10X*.py) + # (see verbosity in nupic/trunk/py/nupic/research/BacktrackingTM.py and BacktrackingTMCPP.py) 'verbosity': 0, # Number of cell columns in the cortical region (same number for - # SP and TP) + # SP and TM) # (see also tpNCellsPerCol) 'columnCount': 2048, @@ -239,7 +239,7 @@ # > 0 for fixed-size CLA # -1 for non-fixed-size CLA # - # TODO: for Ron: once the appropriate value is placed in TP + # TODO: for Ron: once the appropriate value is placed in TM # constructor, see if we should eliminate this parameter from # description.py. 'maxSynapsesPerSegment': 32, @@ -248,7 +248,7 @@ # > 0 for fixed-size CLA # -1 for non-fixed-size CLA # - # TODO: for Ron: once the appropriate value is placed in TP + # TODO: for Ron: once the appropriate value is placed in TM # constructor, see if we should eliminate this parameter from # description.py. 'maxSegmentsPerCell': 128, @@ -284,7 +284,7 @@ 'outputType': 'normal', - # "Pay Attention Mode" length. This tells the TP how many new + # "Pay Attention Mode" length. This tells the TM how many new # elements to append to the end of a learned sequence at a time. # Smaller values are better for datasets with short sequences, # higher values are better for datasets with long sequences. diff --git a/tests/swarming/nupic/swarming/experiments/dummyV2/description.py b/tests/swarming/nupic/swarming/experiments/dummyV2/description.py index 9863ddd0d6..17debdd5d6 100644 --- a/tests/swarming/nupic/swarming/experiments/dummyV2/description.py +++ b/tests/swarming/nupic/swarming/experiments/dummyV2/description.py @@ -180,7 +180,7 @@ 'globalInhibition': 1, # Number of cell columns in the cortical region (same number for - # SP and TP) + # SP and TM) # (see also tpNCellsPerCol) 'columnCount': 2048, @@ -206,7 +206,7 @@ # level before inhibition falls below minDutyCycleBeforeInh # will have their own internal synPermConnectedCell # threshold set below this default value. - # (This concept applies to both SP and TP and so 'cells' + # (This concept applies to both SP and TM and so 'cells' # is correct here as opposed to 'columns') 'synPermConnected': 0.1, @@ -215,20 +215,20 @@ 'synPermInactiveDec': 0.01, }, - # Controls whether TP is enabled or disabled; - # TP is necessary for making temporal predictions, such as predicting - # the next inputs. Without TP, the model is only capable of + # Controls whether TM is enabled or disabled; + # TM is necessary for making temporal predictions, such as predicting + # the next inputs. Without TM, the model is only capable of # reconstructing missing sensor inputs (via SP). 'tmEnable' : True, 'tmParams': { - # TP diagnostic output verbosity control; + # TM diagnostic output verbosity control; # 0: silent; [1..6]: increasing levels of verbosity - # (see verbosity in nupic/trunk/py/nupic/research/TP.py and TP10X*.py) + # (see verbosity in nupic/trunk/py/nupic/research/BacktrackingTM.py and BacktrackingTMCPP.py) 'verbosity': 0, # Number of cell columns in the cortical region (same number for - # SP and TP) + # SP and TM) # (see also tpNCellsPerCol) 'columnCount': 2048, @@ -253,7 +253,7 @@ # > 0 for fixed-size CLA # -1 for non-fixed-size CLA # - # TODO: for Ron: once the appropriate value is placed in TP + # TODO: for Ron: once the appropriate value is placed in TM # constructor, see if we should eliminate this parameter from # description.py. 'maxSynapsesPerSegment': 32, @@ -262,7 +262,7 @@ # > 0 for fixed-size CLA # -1 for non-fixed-size CLA # - # TODO: for Ron: once the appropriate value is placed in TP + # TODO: for Ron: once the appropriate value is placed in TM # constructor, see if we should eliminate this parameter from # description.py. 'maxSegmentsPerCell': 128, @@ -298,7 +298,7 @@ 'outputType': 'normal', - # "Pay Attention Mode" length. This tells the TP how many new + # "Pay Attention Mode" length. This tells the TM how many new # elements to append to the end of a learned sequence at a time. # Smaller values are better for datasets with short sequences, # higher values are better for datasets with long sequences. diff --git a/tests/swarming/nupic/swarming/experiments/dummy_multi_v2/description.py b/tests/swarming/nupic/swarming/experiments/dummy_multi_v2/description.py index e91954df54..bc99e29f4f 100644 --- a/tests/swarming/nupic/swarming/experiments/dummy_multi_v2/description.py +++ b/tests/swarming/nupic/swarming/experiments/dummy_multi_v2/description.py @@ -179,7 +179,7 @@ 'globalInhibition': 1, # Number of cell columns in the cortical region (same number for - # SP and TP) + # SP and TM) # (see also tpNCellsPerCol) 'columnCount': 2048, @@ -205,7 +205,7 @@ # level before inhibition falls below minDutyCycleBeforeInh # will have their own internal synPermConnectedCell # threshold set below this default value. - # (This concept applies to both SP and TP and so 'cells' + # (This concept applies to both SP and TM and so 'cells' # is correct here as opposed to 'columns') 'synPermConnected': 0.1, @@ -214,20 +214,20 @@ 'synPermInactiveDec': 0.01, }, - # Controls whether TP is enabled or disabled; - # TP is necessary for making temporal predictions, such as predicting - # the next inputs. Without TP, the model is only capable of + # Controls whether TM is enabled or disabled; + # TM is necessary for making temporal predictions, such as predicting + # the next inputs. Without TM, the model is only capable of # reconstructing missing sensor inputs (via SP). 'tmEnable' : True, 'tmParams': { - # TP diagnostic output verbosity control; + # TM diagnostic output verbosity control; # 0: silent; [1..6]: increasing levels of verbosity - # (see verbosity in nupic/trunk/py/nupic/research/TP.py and TP10X*.py) + # (see verbosity in nupic/trunk/py/nupic/research/BacktrackingTM.py and BacktrackingTMCPP.py) 'verbosity': 0, # Number of cell columns in the cortical region (same number for - # SP and TP) + # SP and TM) # (see also tpNCellsPerCol) 'columnCount': 2048, @@ -252,7 +252,7 @@ # > 0 for fixed-size CLA # -1 for non-fixed-size CLA # - # TODO: for Ron: once the appropriate value is placed in TP + # TODO: for Ron: once the appropriate value is placed in TM # constructor, see if we should eliminate this parameter from # description.py. 'maxSynapsesPerSegment': 32, @@ -261,7 +261,7 @@ # > 0 for fixed-size CLA # -1 for non-fixed-size CLA # - # TODO: for Ron: once the appropriate value is placed in TP + # TODO: for Ron: once the appropriate value is placed in TM # constructor, see if we should eliminate this parameter from # description.py. 'maxSegmentsPerCell': 128, @@ -297,7 +297,7 @@ 'outputType': 'normal', - # "Pay Attention Mode" length. This tells the TP how many new + # "Pay Attention Mode" length. This tells the TM how many new # elements to append to the end of a learned sequence at a time. # Smaller values are better for datasets with short sequences, # higher values are better for datasets with long sequences. diff --git a/tests/swarming/nupic/swarming/experiments/field_contrib_temporal/description.py b/tests/swarming/nupic/swarming/experiments/field_contrib_temporal/description.py index 8ef13e8adc..663988774e 100644 --- a/tests/swarming/nupic/swarming/experiments/field_contrib_temporal/description.py +++ b/tests/swarming/nupic/swarming/experiments/field_contrib_temporal/description.py @@ -179,7 +179,7 @@ 'globalInhibition': 1, # Number of cell columns in the cortical region (same number for - # SP and TP) + # SP and TM) # (see also tpNCellsPerCol) 'columnCount': 2048, @@ -205,7 +205,7 @@ # level before inhibition falls below minDutyCycleBeforeInh # will have their own internal synPermConnectedCell # threshold set below this default value. - # (This concept applies to both SP and TP and so 'cells' + # (This concept applies to both SP and TM and so 'cells' # is correct here as opposed to 'columns') 'synPermConnected': 0.1, @@ -214,20 +214,20 @@ 'synPermInactiveDec': 0.01, }, - # Controls whether TP is enabled or disabled; - # TP is necessary for making temporal predictions, such as predicting - # the next inputs. Without TP, the model is only capable of + # Controls whether TM is enabled or disabled; + # TM is necessary for making temporal predictions, such as predicting + # the next inputs. Without TM, the model is only capable of # reconstructing missing sensor inputs (via SP). 'tmEnable' : True, 'tmParams': { - # TP diagnostic output verbosity control; + # TM diagnostic output verbosity control; # 0: silent; [1..6]: increasing levels of verbosity - # (see verbosity in nupic/trunk/py/nupic/research/TP.py and TP10X*.py) + # (see verbosity in nupic/trunk/py/nupic/research/BacktrackingTM.py and BacktrackingTMCPP.py) 'verbosity': 0, # Number of cell columns in the cortical region (same number for - # SP and TP) + # SP and TM) # (see also tpNCellsPerCol) 'columnCount': 2048, @@ -252,7 +252,7 @@ # > 0 for fixed-size CLA # -1 for non-fixed-size CLA # - # TODO: for Ron: once the appropriate value is placed in TP + # TODO: for Ron: once the appropriate value is placed in TM # constructor, see if we should eliminate this parameter from # description.py. 'maxSynapsesPerSegment': 32, @@ -261,7 +261,7 @@ # > 0 for fixed-size CLA # -1 for non-fixed-size CLA # - # TODO: for Ron: once the appropriate value is placed in TP + # TODO: for Ron: once the appropriate value is placed in TM # constructor, see if we should eliminate this parameter from # description.py. 'maxSegmentsPerCell': 128, @@ -297,7 +297,7 @@ 'outputType': 'normal', - # "Pay Attention Mode" length. This tells the TP how many new + # "Pay Attention Mode" length. This tells the TM how many new # elements to append to the end of a learned sequence at a time. # Smaller values are better for datasets with short sequences, # higher values are better for datasets with long sequences. diff --git a/tests/swarming/nupic/swarming/experiments/field_threshold_temporal/description.py b/tests/swarming/nupic/swarming/experiments/field_threshold_temporal/description.py index 56898be49d..0db0d1b14a 100644 --- a/tests/swarming/nupic/swarming/experiments/field_threshold_temporal/description.py +++ b/tests/swarming/nupic/swarming/experiments/field_threshold_temporal/description.py @@ -192,7 +192,7 @@ 'globalInhibition': 1, # Number of cell columns in the cortical region (same number for - # SP and TP) + # SP and TM) # (see also tpNCellsPerCol) 'columnCount': 2048, @@ -218,7 +218,7 @@ # level before inhibition falls below minDutyCycleBeforeInh # will have their own internal synPermConnectedCell # threshold set below this default value. - # (This concept applies to both SP and TP and so 'cells' + # (This concept applies to both SP and TM and so 'cells' # is correct here as opposed to 'columns') 'synPermConnected': 0.1, @@ -227,20 +227,20 @@ 'synPermInactiveDec': 0.01, }, - # Controls whether TP is enabled or disabled; - # TP is necessary for making temporal predictions, such as predicting - # the next inputs. Without TP, the model is only capable of + # Controls whether TM is enabled or disabled; + # TM is necessary for making temporal predictions, such as predicting + # the next inputs. Without TM, the model is only capable of # reconstructing missing sensor inputs (via SP). 'tmEnable' : True, 'tmParams': { - # TP diagnostic output verbosity control; + # TM diagnostic output verbosity control; # 0: silent; [1..6]: increasing levels of verbosity - # (see verbosity in nupic/trunk/py/nupic/research/TP.py and TP10X*.py) + # (see verbosity in nupic/trunk/py/nupic/research/BacktrackingTM.py and BacktrackingTMCPP.py) 'verbosity': 0, # Number of cell columns in the cortical region (same number for - # SP and TP) + # SP and TM) # (see also tpNCellsPerCol) 'columnCount': 2048, @@ -265,7 +265,7 @@ # > 0 for fixed-size CLA # -1 for non-fixed-size CLA # - # TODO: for Ron: once the appropriate value is placed in TP + # TODO: for Ron: once the appropriate value is placed in TM # constructor, see if we should eliminate this parameter from # description.py. 'maxSynapsesPerSegment': 32, @@ -274,7 +274,7 @@ # > 0 for fixed-size CLA # -1 for non-fixed-size CLA # - # TODO: for Ron: once the appropriate value is placed in TP + # TODO: for Ron: once the appropriate value is placed in TM # constructor, see if we should eliminate this parameter from # description.py. 'maxSegmentsPerCell': 128, @@ -310,7 +310,7 @@ 'outputType': 'normal', - # "Pay Attention Mode" length. This tells the TP how many new + # "Pay Attention Mode" length. This tells the TM how many new # elements to append to the end of a learned sequence at a time. # Smaller values are better for datasets with short sequences, # higher values are better for datasets with long sequences. diff --git a/tests/swarming/nupic/swarming/experiments/input_predicted_field/description.py b/tests/swarming/nupic/swarming/experiments/input_predicted_field/description.py index 015734f7cf..ece9bfc2b5 100644 --- a/tests/swarming/nupic/swarming/experiments/input_predicted_field/description.py +++ b/tests/swarming/nupic/swarming/experiments/input_predicted_field/description.py @@ -159,7 +159,7 @@ 'globalInhibition': 1, # Number of cell columns in the cortical region (same number for - # SP and TP) + # SP and TM) # (see also tpNCellsPerCol) 'columnCount': 2048, @@ -185,7 +185,7 @@ # level before inhibition falls below minDutyCycleBeforeInh # will have their own internal synPermConnectedCell # threshold set below this default value. - # (This concept applies to both SP and TP and so 'cells' + # (This concept applies to both SP and TM and so 'cells' # is correct here as opposed to 'columns') 'synPermConnected': 0.1, @@ -194,20 +194,20 @@ 'synPermInactiveDec': 0.01, }, - # Controls whether TP is enabled or disabled; - # TP is necessary for making temporal predictions, such as predicting - # the next inputs. Without TP, the model is only capable of + # Controls whether TM is enabled or disabled; + # TM is necessary for making temporal predictions, such as predicting + # the next inputs. Without TM, the model is only capable of # reconstructing missing sensor inputs (via SP). 'tmEnable' : True, 'tmParams': { - # TP diagnostic output verbosity control; + # TM diagnostic output verbosity control; # 0: silent; [1..6]: increasing levels of verbosity - # (see verbosity in nupic/trunk/py/nupic/research/TP.py and TP10X*.py) + # (see verbosity in nupic/trunk/py/nupic/research/BacktrackingTM.py and BacktrackingTMCPP.py) 'verbosity': 0, # Number of cell columns in the cortical region (same number for - # SP and TP) + # SP and TM) # (see also tpNCellsPerCol) 'columnCount': 2048, @@ -232,7 +232,7 @@ # > 0 for fixed-size CLA # -1 for non-fixed-size CLA # - # TODO: for Ron: once the appropriate value is placed in TP + # TODO: for Ron: once the appropriate value is placed in TM # constructor, see if we should eliminate this parameter from # description.py. 'maxSynapsesPerSegment': 32, @@ -241,7 +241,7 @@ # > 0 for fixed-size CLA # -1 for non-fixed-size CLA # - # TODO: for Ron: once the appropriate value is placed in TP + # TODO: for Ron: once the appropriate value is placed in TM # constructor, see if we should eliminate this parameter from # description.py. 'maxSegmentsPerCell': 128, @@ -277,7 +277,7 @@ 'outputType': 'normal', - # "Pay Attention Mode" length. This tells the TP how many new + # "Pay Attention Mode" length. This tells the TM how many new # elements to append to the end of a learned sequence at a time. # Smaller values are better for datasets with short sequences, # higher values are better for datasets with long sequences. diff --git a/tests/swarming/nupic/swarming/experiments/legacy_cla_multistep/description.py b/tests/swarming/nupic/swarming/experiments/legacy_cla_multistep/description.py index 01555e4960..5c421155ab 100644 --- a/tests/swarming/nupic/swarming/experiments/legacy_cla_multistep/description.py +++ b/tests/swarming/nupic/swarming/experiments/legacy_cla_multistep/description.py @@ -139,7 +139,7 @@ 'globalInhibition': 1, # Number of cell columns in the cortical region (same number for - # SP and TP) + # SP and TM) # (see also tpNCellsPerCol) 'columnCount': 2048, @@ -165,7 +165,7 @@ # level before inhibition falls below minDutyCycleBeforeInh # will have their own internal synPermConnectedCell # threshold set below this default value. - # (This concept applies to both SP and TP and so 'cells' + # (This concept applies to both SP and TM and so 'cells' # is correct here as opposed to 'columns') 'synPermConnected': 0.1, @@ -174,20 +174,20 @@ 'synPermInactiveDec': 0.01, }, - # Controls whether TP is enabled or disabled; - # TP is necessary for making temporal predictions, such as predicting - # the next inputs. Without TP, the model is only capable of + # Controls whether TM is enabled or disabled; + # TM is necessary for making temporal predictions, such as predicting + # the next inputs. Without TM, the model is only capable of # reconstructing missing sensor inputs (via SP). 'tmEnable' : True, 'tmParams': { - # TP diagnostic output verbosity control; + # TM diagnostic output verbosity control; # 0: silent; [1..6]: increasing levels of verbosity - # (see verbosity in nupic/trunk/py/nupic/research/TP.py and TP10X*.py) + # (see verbosity in nupic/trunk/py/nupic/research/BacktrackingTM.py and BacktrackingTMCPP.py) 'verbosity': 0, # Number of cell columns in the cortical region (same number for - # SP and TP) + # SP and TM) # (see also tpNCellsPerCol) 'columnCount': 2048, @@ -212,7 +212,7 @@ # > 0 for fixed-size CLA # -1 for non-fixed-size CLA # - # TODO: for Ron: once the appropriate value is placed in TP + # TODO: for Ron: once the appropriate value is placed in TM # constructor, see if we should eliminate this parameter from # description.py. 'maxSynapsesPerSegment': 32, @@ -221,7 +221,7 @@ # > 0 for fixed-size CLA # -1 for non-fixed-size CLA # - # TODO: for Ron: once the appropriate value is placed in TP + # TODO: for Ron: once the appropriate value is placed in TM # constructor, see if we should eliminate this parameter from # description.py. 'maxSegmentsPerCell': 128, @@ -257,7 +257,7 @@ 'outputType': 'normal', - # "Pay Attention Mode" length. This tells the TP how many new + # "Pay Attention Mode" length. This tells the TM how many new # elements to append to the end of a learned sequence at a time. # Smaller values are better for datasets with short sequences, # higher values are better for datasets with long sequences. diff --git a/tests/swarming/nupic/swarming/experiments/max_branching_temporal/description.py b/tests/swarming/nupic/swarming/experiments/max_branching_temporal/description.py index 8547915420..87959c9ccf 100644 --- a/tests/swarming/nupic/swarming/experiments/max_branching_temporal/description.py +++ b/tests/swarming/nupic/swarming/experiments/max_branching_temporal/description.py @@ -192,7 +192,7 @@ 'globalInhibition': 1, # Number of cell columns in the cortical region (same number for - # SP and TP) + # SP and TM) # (see also tpNCellsPerCol) 'columnCount': 2048, @@ -218,7 +218,7 @@ # level before inhibition falls below minDutyCycleBeforeInh # will have their own internal synPermConnectedCell # threshold set below this default value. - # (This concept applies to both SP and TP and so 'cells' + # (This concept applies to both SP and TM and so 'cells' # is correct here as opposed to 'columns') 'synPermConnected': 0.1, @@ -227,20 +227,20 @@ 'synPermInactiveDec': 0.01, }, - # Controls whether TP is enabled or disabled; - # TP is necessary for making temporal predictions, such as predicting - # the next inputs. Without TP, the model is only capable of + # Controls whether TM is enabled or disabled; + # TM is necessary for making temporal predictions, such as predicting + # the next inputs. Without TM, the model is only capable of # reconstructing missing sensor inputs (via SP). 'tmEnable' : True, 'tmParams': { - # TP diagnostic output verbosity control; + # TM diagnostic output verbosity control; # 0: silent; [1..6]: increasing levels of verbosity - # (see verbosity in nupic/trunk/py/nupic/research/TP.py and TP10X*.py) + # (see verbosity in nupic/trunk/py/nupic/research/BacktrackingTM.py and BacktrackingTMCPP.py) 'verbosity': 0, # Number of cell columns in the cortical region (same number for - # SP and TP) + # SP and TM) # (see also tpNCellsPerCol) 'columnCount': 2048, @@ -265,7 +265,7 @@ # > 0 for fixed-size CLA # -1 for non-fixed-size CLA # - # TODO: for Ron: once the appropriate value is placed in TP + # TODO: for Ron: once the appropriate value is placed in TM # constructor, see if we should eliminate this parameter from # description.py. 'maxSynapsesPerSegment': 32, @@ -274,7 +274,7 @@ # > 0 for fixed-size CLA # -1 for non-fixed-size CLA # - # TODO: for Ron: once the appropriate value is placed in TP + # TODO: for Ron: once the appropriate value is placed in TM # constructor, see if we should eliminate this parameter from # description.py. 'maxSegmentsPerCell': 128, @@ -310,7 +310,7 @@ 'outputType': 'normal', - # "Pay Attention Mode" length. This tells the TP how many new + # "Pay Attention Mode" length. This tells the TM how many new # elements to append to the end of a learned sequence at a time. # Smaller values are better for datasets with short sequences, # higher values are better for datasets with long sequences. diff --git a/tests/swarming/nupic/swarming/experiments/oneField/description.py b/tests/swarming/nupic/swarming/experiments/oneField/description.py index 298460b6b6..a73ffd1552 100644 --- a/tests/swarming/nupic/swarming/experiments/oneField/description.py +++ b/tests/swarming/nupic/swarming/experiments/oneField/description.py @@ -164,7 +164,7 @@ 'globalInhibition': 1, # Number of cell columns in the cortical region (same number for - # SP and TP) + # SP and TM) # (see also tpNCellsPerCol) 'columnCount': 2048, @@ -190,7 +190,7 @@ # level before inhibition falls below minDutyCycleBeforeInh # will have their own internal synPermConnectedCell # threshold set below this default value. - # (This concept applies to both SP and TP and so 'cells' + # (This concept applies to both SP and TM and so 'cells' # is correct here as opposed to 'columns') 'synPermConnected': 0.1, @@ -199,20 +199,20 @@ 'synPermInactiveDec': 0.01, }, - # Controls whether TP is enabled or disabled; - # TP is necessary for making temporal predictions, such as predicting - # the next inputs. Without TP, the model is only capable of + # Controls whether TM is enabled or disabled; + # TM is necessary for making temporal predictions, such as predicting + # the next inputs. Without TM, the model is only capable of # reconstructing missing sensor inputs (via SP). 'tmEnable' : True, 'tmParams': { - # TP diagnostic output verbosity control; + # TM diagnostic output verbosity control; # 0: silent; [1..6]: increasing levels of verbosity - # (see verbosity in nupic/trunk/py/nupic/research/TP.py and TP10X*.py) + # (see verbosity in nupic/trunk/py/nupic/research/BacktrackingTM.py and BacktrackingTMCPP.py) 'verbosity': 0, # Number of cell columns in the cortical region (same number for - # SP and TP) + # SP and TM) # (see also tpNCellsPerCol) 'columnCount': 2048, @@ -237,7 +237,7 @@ # > 0 for fixed-size CLA # -1 for non-fixed-size CLA # - # TODO: for Ron: once the appropriate value is placed in TP + # TODO: for Ron: once the appropriate value is placed in TM # constructor, see if we should eliminate this parameter from # description.py. 'maxSynapsesPerSegment': 32, @@ -246,7 +246,7 @@ # > 0 for fixed-size CLA # -1 for non-fixed-size CLA # - # TODO: for Ron: once the appropriate value is placed in TP + # TODO: for Ron: once the appropriate value is placed in TM # constructor, see if we should eliminate this parameter from # description.py. 'maxSegmentsPerCell': 128, @@ -282,7 +282,7 @@ 'outputType': 'normal', - # "Pay Attention Mode" length. This tells the TP how many new + # "Pay Attention Mode" length. This tells the TM how many new # elements to append to the end of a learned sequence at a time. # Smaller values are better for datasets with short sequences, # higher values are better for datasets with long sequences. diff --git a/tests/swarming/nupic/swarming/experiments/simpleV2/description.py b/tests/swarming/nupic/swarming/experiments/simpleV2/description.py index de72c136f4..77e6663869 100644 --- a/tests/swarming/nupic/swarming/experiments/simpleV2/description.py +++ b/tests/swarming/nupic/swarming/experiments/simpleV2/description.py @@ -180,7 +180,7 @@ 'globalInhibition': 1, # Number of cell columns in the cortical region (same number for - # SP and TP) + # SP and TM) # (see also tpNCellsPerCol) 'columnCount': 2048, @@ -206,7 +206,7 @@ # level before inhibition falls below minDutyCycleBeforeInh # will have their own internal synPermConnectedCell # threshold set below this default value. - # (This concept applies to both SP and TP and so 'cells' + # (This concept applies to both SP and TM and so 'cells' # is correct here as opposed to 'columns') 'synPermConnected': 0.1, @@ -215,20 +215,20 @@ 'synPermInactiveDec': 0.01, }, - # Controls whether TP is enabled or disabled; - # TP is necessary for making temporal predictions, such as predicting - # the next inputs. Without TP, the model is only capable of + # Controls whether TM is enabled or disabled; + # TM is necessary for making temporal predictions, such as predicting + # the next inputs. Without TM, the model is only capable of # reconstructing missing sensor inputs (via SP). 'tmEnable' : True, 'tmParams': { - # TP diagnostic output verbosity control; + # TM diagnostic output verbosity control; # 0: silent; [1..6]: increasing levels of verbosity - # (see verbosity in nupic/trunk/py/nupic/research/TP.py and TP10X*.py) + # (see verbosity in nupic/trunk/py/nupic/research/BacktrackingTM.py and BacktrackingTMCPP.py) 'verbosity': 0, # Number of cell columns in the cortical region (same number for - # SP and TP) + # SP and TM) # (see also tpNCellsPerCol) 'columnCount': 2048, @@ -253,7 +253,7 @@ # > 0 for fixed-size CLA # -1 for non-fixed-size CLA # - # TODO: for Ron: once the appropriate value is placed in TP + # TODO: for Ron: once the appropriate value is placed in TM # constructor, see if we should eliminate this parameter from # description.py. 'maxSynapsesPerSegment': 32, @@ -262,7 +262,7 @@ # > 0 for fixed-size CLA # -1 for non-fixed-size CLA # - # TODO: for Ron: once the appropriate value is placed in TP + # TODO: for Ron: once the appropriate value is placed in TM # constructor, see if we should eliminate this parameter from # description.py. 'maxSegmentsPerCell': 128, @@ -298,7 +298,7 @@ 'outputType': 'normal', - # "Pay Attention Mode" length. This tells the TP how many new + # "Pay Attention Mode" length. This tells the TM how many new # elements to append to the end of a learned sequence at a time. # Smaller values are better for datasets with short sequences, # higher values are better for datasets with long sequences. diff --git a/tests/swarming/nupic/swarming/experiments/simple_cla_multistep/description.py b/tests/swarming/nupic/swarming/experiments/simple_cla_multistep/description.py index 4a79ca4d41..a6c86c0981 100644 --- a/tests/swarming/nupic/swarming/experiments/simple_cla_multistep/description.py +++ b/tests/swarming/nupic/swarming/experiments/simple_cla_multistep/description.py @@ -147,7 +147,7 @@ 'globalInhibition': 1, # Number of cell columns in the cortical region (same number for - # SP and TP) + # SP and TM) # (see also tpNCellsPerCol) 'columnCount': 2048, @@ -173,7 +173,7 @@ # level before inhibition falls below minDutyCycleBeforeInh # will have their own internal synPermConnectedCell # threshold set below this default value. - # (This concept applies to both SP and TP and so 'cells' + # (This concept applies to both SP and TM and so 'cells' # is correct here as opposed to 'columns') 'synPermConnected': 0.1, @@ -182,20 +182,20 @@ 'synPermInactiveDec': 0.01, }, - # Controls whether TP is enabled or disabled; - # TP is necessary for making temporal predictions, such as predicting - # the next inputs. Without TP, the model is only capable of + # Controls whether TM is enabled or disabled; + # TM is necessary for making temporal predictions, such as predicting + # the next inputs. Without TM, the model is only capable of # reconstructing missing sensor inputs (via SP). 'tmEnable' : True, 'tmParams': { - # TP diagnostic output verbosity control; + # TM diagnostic output verbosity control; # 0: silent; [1..6]: increasing levels of verbosity - # (see verbosity in nupic/trunk/py/nupic/research/TP.py and TP10X*.py) + # (see verbosity in nupic/trunk/py/nupic/research/BacktrackingTM.py and BacktrackingTMCPP.py) 'verbosity': 0, # Number of cell columns in the cortical region (same number for - # SP and TP) + # SP and TM) # (see also tpNCellsPerCol) 'columnCount': 2048, @@ -220,7 +220,7 @@ # > 0 for fixed-size CLA # -1 for non-fixed-size CLA # - # TODO: for Ron: once the appropriate value is placed in TP + # TODO: for Ron: once the appropriate value is placed in TM # constructor, see if we should eliminate this parameter from # description.py. 'maxSynapsesPerSegment': 32, @@ -229,7 +229,7 @@ # > 0 for fixed-size CLA # -1 for non-fixed-size CLA # - # TODO: for Ron: once the appropriate value is placed in TP + # TODO: for Ron: once the appropriate value is placed in TM # constructor, see if we should eliminate this parameter from # description.py. 'maxSegmentsPerCell': 128, @@ -265,7 +265,7 @@ 'outputType': 'normal', - # "Pay Attention Mode" length. This tells the TP how many new + # "Pay Attention Mode" length. This tells the TM how many new # elements to append to the end of a learned sequence at a time. # Smaller values are better for datasets with short sequences, # higher values are better for datasets with long sequences. diff --git a/tests/swarming/nupic/swarming/experiments/smart_speculation_spatial_classification/description.py b/tests/swarming/nupic/swarming/experiments/smart_speculation_spatial_classification/description.py index f550e964ec..f08b6a3656 100644 --- a/tests/swarming/nupic/swarming/experiments/smart_speculation_spatial_classification/description.py +++ b/tests/swarming/nupic/swarming/experiments/smart_speculation_spatial_classification/description.py @@ -198,7 +198,7 @@ 'globalInhibition': 1, # Number of cell columns in the cortical region (same number for - # SP and TP) + # SP and TM) # (see also tpNCellsPerCol) 'columnCount': 2048, @@ -224,7 +224,7 @@ # level before inhibition falls below minDutyCycleBeforeInh # will have their own internal synPermConnectedCell # threshold set below this default value. - # (This concept applies to both SP and TP and so 'cells' + # (This concept applies to both SP and TM and so 'cells' # is correct here as opposed to 'columns') 'synPermConnected': 0.1, @@ -233,20 +233,20 @@ 'synPermInactiveDec': 0.01, }, - # Controls whether TP is enabled or disabled; - # TP is necessary for making temporal predictions, such as predicting - # the next inputs. Without TP, the model is only capable of + # Controls whether TM is enabled or disabled; + # TM is necessary for making temporal predictions, such as predicting + # the next inputs. Without TM, the model is only capable of # reconstructing missing sensor inputs (via SP). 'tmEnable' : True, 'tmParams': { - # TP diagnostic output verbosity control; + # TM diagnostic output verbosity control; # 0: silent; [1..6]: increasing levels of verbosity - # (see verbosity in nupic/trunk/py/nupic/research/TP.py and TP10X*.py) + # (see verbosity in nupic/trunk/py/nupic/research/BacktrackingTM.py and BacktrackingTMCPP.py) 'verbosity': 0, # Number of cell columns in the cortical region (same number for - # SP and TP) + # SP and TM) # (see also tpNCellsPerCol) 'columnCount': 2048, @@ -271,7 +271,7 @@ # > 0 for fixed-size CLA # -1 for non-fixed-size CLA # - # TODO: for Ron: once the appropriate value is placed in TP + # TODO: for Ron: once the appropriate value is placed in TM # constructor, see if we should eliminate this parameter from # description.py. 'maxSynapsesPerSegment': 32, @@ -280,7 +280,7 @@ # > 0 for fixed-size CLA # -1 for non-fixed-size CLA # - # TODO: for Ron: once the appropriate value is placed in TP + # TODO: for Ron: once the appropriate value is placed in TM # constructor, see if we should eliminate this parameter from # description.py. 'maxSegmentsPerCell': 128, @@ -316,7 +316,7 @@ 'outputType': 'normal', - # "Pay Attention Mode" length. This tells the TP how many new + # "Pay Attention Mode" length. This tells the TM how many new # elements to append to the end of a learned sequence at a time. # Smaller values are better for datasets with short sequences, # higher values are better for datasets with long sequences. diff --git a/tests/swarming/nupic/swarming/experiments/smart_speculation_temporal/description.py b/tests/swarming/nupic/swarming/experiments/smart_speculation_temporal/description.py index 108dce8256..edad8d2ffb 100644 --- a/tests/swarming/nupic/swarming/experiments/smart_speculation_temporal/description.py +++ b/tests/swarming/nupic/swarming/experiments/smart_speculation_temporal/description.py @@ -197,7 +197,7 @@ 'globalInhibition': 1, # Number of cell columns in the cortical region (same number for - # SP and TP) + # SP and TM) # (see also tpNCellsPerCol) 'columnCount': 2048, @@ -223,7 +223,7 @@ # level before inhibition falls below minDutyCycleBeforeInh # will have their own internal synPermConnectedCell # threshold set below this default value. - # (This concept applies to both SP and TP and so 'cells' + # (This concept applies to both SP and TM and so 'cells' # is correct here as opposed to 'columns') 'synPermConnected': 0.1, @@ -232,20 +232,20 @@ 'synPermInactiveDec': 0.01, }, - # Controls whether TP is enabled or disabled; - # TP is necessary for making temporal predictions, such as predicting - # the next inputs. Without TP, the model is only capable of + # Controls whether TM is enabled or disabled; + # TM is necessary for making temporal predictions, such as predicting + # the next inputs. Without TM, the model is only capable of # reconstructing missing sensor inputs (via SP). 'tmEnable' : True, 'tmParams': { - # TP diagnostic output verbosity control; + # TM diagnostic output verbosity control; # 0: silent; [1..6]: increasing levels of verbosity - # (see verbosity in nupic/trunk/py/nupic/research/TP.py and TP10X*.py) + # (see verbosity in nupic/trunk/py/nupic/research/BacktrackingTM.py and BacktrackingTMCPP.py) 'verbosity': 0, # Number of cell columns in the cortical region (same number for - # SP and TP) + # SP and TM) # (see also tpNCellsPerCol) 'columnCount': 2048, @@ -270,7 +270,7 @@ # > 0 for fixed-size CLA # -1 for non-fixed-size CLA # - # TODO: for Ron: once the appropriate value is placed in TP + # TODO: for Ron: once the appropriate value is placed in TM # constructor, see if we should eliminate this parameter from # description.py. 'maxSynapsesPerSegment': 32, @@ -279,7 +279,7 @@ # > 0 for fixed-size CLA # -1 for non-fixed-size CLA # - # TODO: for Ron: once the appropriate value is placed in TP + # TODO: for Ron: once the appropriate value is placed in TM # constructor, see if we should eliminate this parameter from # description.py. 'maxSegmentsPerCell': 128, @@ -315,7 +315,7 @@ 'outputType': 'normal', - # "Pay Attention Mode" length. This tells the TP how many new + # "Pay Attention Mode" length. This tells the TM how many new # elements to append to the end of a learned sequence at a time. # Smaller values are better for datasets with short sequences, # higher values are better for datasets with long sequences. diff --git a/tests/swarming/nupic/swarming/experiments/spatial_classification/description.py b/tests/swarming/nupic/swarming/experiments/spatial_classification/description.py index 89f4d7df56..1f85ab9f52 100644 --- a/tests/swarming/nupic/swarming/experiments/spatial_classification/description.py +++ b/tests/swarming/nupic/swarming/experiments/spatial_classification/description.py @@ -191,7 +191,7 @@ 'globalInhibition': 1, # Number of cell columns in the cortical region (same number for - # SP and TP) + # SP and TM) # (see also tpNCellsPerCol) 'columnCount': 2048, @@ -217,7 +217,7 @@ # level before inhibition falls below minDutyCycleBeforeInh # will have their own internal synPermConnectedCell # threshold set below this default value. - # (This concept applies to both SP and TP and so 'cells' + # (This concept applies to both SP and TM and so 'cells' # is correct here as opposed to 'columns') 'synPermConnected': 0.1, @@ -226,20 +226,20 @@ 'synPermInactiveDec': 0.01, }, - # Controls whether TP is enabled or disabled; - # TP is necessary for making temporal predictions, such as predicting - # the next inputs. Without TP, the model is only capable of + # Controls whether TM is enabled or disabled; + # TM is necessary for making temporal predictions, such as predicting + # the next inputs. Without TM, the model is only capable of # reconstructing missing sensor inputs (via SP). 'tmEnable' : False, 'tmParams': { - # TP diagnostic output verbosity control; + # TM diagnostic output verbosity control; # 0: silent; [1..6]: increasing levels of verbosity - # (see verbosity in nupic/trunk/py/nupic/research/TP.py and TP10X*.py) + # (see verbosity in nupic/trunk/py/nupic/research/BacktrackingTM.py and BacktrackingTMCPP.py) 'verbosity': 0, # Number of cell columns in the cortical region (same number for - # SP and TP) + # SP and TM) # (see also tpNCellsPerCol) 'columnCount': 2048, @@ -264,7 +264,7 @@ # > 0 for fixed-size CLA # -1 for non-fixed-size CLA # - # TODO: for Ron: once the appropriate value is placed in TP + # TODO: for Ron: once the appropriate value is placed in TM # constructor, see if we should eliminate this parameter from # description.py. 'maxSynapsesPerSegment': 32, @@ -273,7 +273,7 @@ # > 0 for fixed-size CLA # -1 for non-fixed-size CLA # - # TODO: for Ron: once the appropriate value is placed in TP + # TODO: for Ron: once the appropriate value is placed in TM # constructor, see if we should eliminate this parameter from # description.py. 'maxSegmentsPerCell': 128, @@ -309,7 +309,7 @@ 'outputType': 'normal', - # "Pay Attention Mode" length. This tells the TP how many new + # "Pay Attention Mode" length. This tells the TM how many new # elements to append to the end of a learned sequence at a time. # Smaller values are better for datasets with short sequences, # higher values are better for datasets with long sequences. diff --git a/tests/unit/nupic/algorithms/tm_cpp_test.py b/tests/unit/nupic/algorithms/tm_cpp_test.py new file mode 100755 index 0000000000..28016cdbd5 --- /dev/null +++ b/tests/unit/nupic/algorithms/tm_cpp_test.py @@ -0,0 +1,371 @@ +# ---------------------------------------------------------------------- +# Numenta Platform for Intelligent Computing (NuPIC) +# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement +# with Numenta, Inc., for a separate license for this software code, the +# following terms and conditions apply: +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero Public License version 3 as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +# See the GNU Affero Public License for more details. +# +# You should have received a copy of the GNU Affero Public License +# along with this program. If not, see http://www.gnu.org/licenses. +# +# http://numenta.org/licenses/ +# ---------------------------------------------------------------------- + +"""Tests for the C++ implementation of the temporal memory.""" + +import cPickle as pickle +import unittest2 as unittest + +import numpy + +from nupic.bindings.math import Random +from nupic.research import fdrutilities as fdrutils +from nupic.research.BacktrackingTM import BacktrackingTM +from nupic.research.BacktrackingTMCPP import BacktrackingTMCPP + +VERBOSITY = 0 # how chatty the unit tests should be +INFERENCE_VERBOSITY = 0 # Chattiness during inference test +SEED = 12 +_RGEN = Random(SEED) + + + +def checkCell0(tm): + """Check that cell 0 has no incoming segments""" + for c in range(tm.numberOfCols): + assert tm.getNumSegmentsInCell(c, 0) == 0 + + + +def setVerbosity(verbosity, tm, tmPy): + """Set verbosity levels of the TM's""" + tm.cells4.setVerbosity(verbosity) + tm.verbosity = verbosity + tmPy.verbosity = verbosity + + + +class BacktrackingTMCPP2Test(unittest.TestCase): + + + def basicTest(self): + """Basic test (creation, pickling, basic run of learning and inference)""" + # Create TM object + tm = BacktrackingTMCPP(numberOfCols=10, cellsPerColumn=3, + initialPerm=.2, connectedPerm= 0.8, + minThreshold=2, newSynapseCount=5, + permanenceInc=.1, permanenceDec= .05, + permanenceMax=1, globalDecay=.05, + activationThreshold=4, doPooling=False, + segUpdateValidDuration=5, seed=SEED, + verbosity=VERBOSITY) + tm.retrieveLearningStates = True + + # Save and reload + tm.makeCells4Ephemeral = False + pickle.dump(tm, open("test_tm_cpp.pkl", "wb")) + tm2 = pickle.load(open("test_tm_cpp.pkl")) + + self.assertTrue(fdrutils.tmDiff2(tm, tm2, VERBOSITY, checkStates=False)) + + # Learn + for i in xrange(5): + x = numpy.zeros(tm.numberOfCols, dtype='uint32') + _RGEN.initializeUInt32Array(x, 2) + tm.learn(x) + + # Save and reload after learning + tm.reset() + tm.makeCells4Ephemeral = False + pickle.dump(tm, open("test_tm_cpp.pkl", "wb")) + tm2 = pickle.load(open("test_tm_cpp.pkl")) + self.assertTrue(fdrutils.tmDiff2(tm, tm2, VERBOSITY)) + + ## Infer + patterns = numpy.zeros((4, tm.numberOfCols), dtype='uint32') + for i in xrange(4): + _RGEN.initializeUInt32Array(patterns[i], 2) + + for i in xrange(10): + x = numpy.zeros(tm.numberOfCols, dtype='uint32') + _RGEN.initializeUInt32Array(x, 2) + tm.infer(x) + if i > 0: + tm.checkPrediction2(patterns) + + + def basicTest2(self, tm, numPatterns=100, numRepetitions=3, activity=15, + testTrimming=False, testRebuild=False): + """Basic test (basic run of learning and inference)""" + # Create PY TM object that mirrors the one sent in. + tmPy = BacktrackingTM(numberOfCols=tm.numberOfCols, + cellsPerColumn=tm.cellsPerColumn, + initialPerm=tm.initialPerm, + connectedPerm=tm.connectedPerm, + minThreshold=tm.minThreshold, + newSynapseCount=tm.newSynapseCount, + permanenceInc=tm.permanenceInc, + permanenceDec=tm.permanenceDec, + permanenceMax=tm.permanenceMax, + globalDecay=tm.globalDecay, + activationThreshold=tm.activationThreshold, + doPooling=tm.doPooling, + segUpdateValidDuration=tm.segUpdateValidDuration, + pamLength=tm.pamLength, maxAge=tm.maxAge, + maxSeqLength=tm.maxSeqLength, + maxSegmentsPerCell=tm.maxSegmentsPerCell, + maxSynapsesPerSegment=tm.maxSynapsesPerSegment, + seed=tm.seed, verbosity=tm.verbosity) + + # Ensure we are copying over learning states for TMDiff + tm.retrieveLearningStates = True + + verbosity = VERBOSITY + + # Learn + + # Build up sequences + sequence = fdrutils.generateCoincMatrix(nCoinc=numPatterns, + length=tm.numberOfCols, + activity=activity) + for r in xrange(numRepetitions): + for i in xrange(sequence.nRows()): + + #if i > 11: + # setVerbosity(6, tm, tmPy) + + if i % 10 == 0: + tm.reset() + tmPy.reset() + + if verbosity >= 2: + print "\n\n ===================================\nPattern:", + print i, "Round:", r, "input:", sequence.getRow(i) + + y1 = tm.learn(sequence.getRow(i)) + y2 = tmPy.learn(sequence.getRow(i)) + + # Ensure everything continues to work well even if we continuously + # rebuild outSynapses structure + if testRebuild: + tm.cells4.rebuildOutSynapses() + + if testTrimming: + tm.trimSegments() + tmPy.trimSegments() + + if verbosity > 2: + print "\n ------ CPP states ------ ", + tm.printStates() + print "\n ------ PY states ------ ", + tmPy.printStates() + if verbosity > 6: + print "C++ cells: " + tm.printCells() + print "PY cells: " + tmPy.printCells() + + if verbosity >= 3: + print "Num segments in PY and C++", tmPy.getNumSegments(), \ + tm.getNumSegments() + + # Check if the two TM's are identical or not. This check is slow so + # we do it every other iteration. Make it every iteration for debugging + # as needed. + self.assertTrue(fdrutils.tmDiff2(tm, tmPy, verbosity, False)) + + # Check that outputs are identical + self.assertLess(abs((y1 - y2).sum()), 3) + + print "Learning completed" + + self.assertTrue(fdrutils.tmDiff2(tm, tmPy, verbosity)) + + # TODO: Need to check - currently failing this + #checkCell0(tmPy) + + # Remove unconnected synapses and check TM's again + + # Test rebuild out synapses + print "Rebuilding outSynapses" + tm.cells4.rebuildOutSynapses() + self.assertTrue(fdrutils.tmDiff2(tm, tmPy, VERBOSITY)) + + print "Trimming segments" + tm.trimSegments() + tmPy.trimSegments() + self.assertTrue(fdrutils.tmDiff2(tm, tmPy, VERBOSITY)) + + # Save and reload after learning + print "Pickling and unpickling" + tm.makeCells4Ephemeral = False + pickle.dump(tm, open("test_tm_cpp.pkl", "wb")) + tm2 = pickle.load(open("test_tm_cpp.pkl")) + self.assertTrue(fdrutils.tmDiff2(tm, tm2, VERBOSITY, checkStates=False)) + + # Infer + print "Testing inference" + + # Setup for inference + tm.reset() + tmPy.reset() + setVerbosity(INFERENCE_VERBOSITY, tm, tmPy) + + patterns = numpy.zeros((40, tm.numberOfCols), dtype='uint32') + for i in xrange(4): + _RGEN.initializeUInt32Array(patterns[i], 2) + + for i, x in enumerate(patterns): + + x = numpy.zeros(tm.numberOfCols, dtype='uint32') + _RGEN.initializeUInt32Array(x, 2) + y = tm.infer(x) + yPy = tmPy.infer(x) + + self.assertTrue(fdrutils.tmDiff2(tm, tmPy, VERBOSITY, checkLearn=False)) + if abs((y - yPy).sum()) > 0: + print "C++ output", y + print "Py output", yPy + assert False + + if i > 0: + tm.checkPrediction2(patterns) + tmPy.checkPrediction2(patterns) + + print "Inference completed" + print "====================================" + + return tm, tmPy + + + def testTMs(self, short=True): + """Call basicTest2 with multiple parameter settings and ensure the C++ and + PY versions are identical throughout.""" + + if short == True: + print "Testing short version" + else: + print "Testing long version" + + if short: + print "\nTesting with fixed resource CLA - test max segment and synapses" + tm = BacktrackingTMCPP(numberOfCols=30, cellsPerColumn=5, + initialPerm=.5, connectedPerm= 0.5, + permanenceMax=1, + minThreshold=8, newSynapseCount=10, + permanenceInc=0.1, permanenceDec=0.01, + globalDecay=.0, activationThreshold=8, + doPooling=False, segUpdateValidDuration=5, + seed=SEED, verbosity=VERBOSITY, + maxAge=0, + maxSegmentsPerCell=2, maxSynapsesPerSegment=10, + checkSynapseConsistency=True) + tm.cells4.setCellSegmentOrder(True) + self.basicTest2(tm, numPatterns=15, numRepetitions=1) + + if not short: + print "\nTesting with fixed resource CLA - test max segment and synapses" + tm = BacktrackingTMCPP(numberOfCols=30, cellsPerColumn=5, + initialPerm = .5, connectedPerm= 0.5, + permanenceMax = 1, + minThreshold = 8, newSynapseCount = 10, + permanenceInc = .1, permanenceDec= .01, + globalDecay = .0, activationThreshold = 8, + doPooling = False, segUpdateValidDuration = 5, + seed=SEED, verbosity = VERBOSITY, + maxAge = 0, + maxSegmentsPerCell = 2, maxSynapsesPerSegment = 10, + checkSynapseConsistency = True) + tm.cells4.setCellSegmentOrder(1) + self.basicTest2(tm, numPatterns=30, numRepetitions=2) + + print "\nTesting with permanenceInc = 0 and Dec = 0" + tm = BacktrackingTMCPP(numberOfCols=30, cellsPerColumn=5, + initialPerm = .5, connectedPerm= 0.5, + minThreshold = 3, newSynapseCount = 3, + permanenceInc = 0.0, permanenceDec= 0.00, + permanenceMax = 1, + globalDecay = .0, activationThreshold = 3, + doPooling = False, segUpdateValidDuration = 5, + seed=SEED, verbosity = VERBOSITY, + checkSynapseConsistency = False) + tm.printParameters() + self.basicTest2(tm, numPatterns = 30, numRepetitions = 3) + + print "Testing with permanenceInc = 0 and Dec = 0 and 1 cell per column" + tm = BacktrackingTMCPP(numberOfCols=30, cellsPerColumn=1, + initialPerm = .5, connectedPerm= 0.5, + minThreshold = 3, newSynapseCount = 3, + permanenceInc = 0.0, permanenceDec= 0.0, + permanenceMax = 1, + globalDecay = .0, activationThreshold = 3, + doPooling = False, segUpdateValidDuration = 5, + seed=SEED, verbosity = VERBOSITY, + checkSynapseConsistency = False) + self.basicTest2(tm) + + print "Testing with permanenceInc = 0.1 and Dec = .0" + tm = BacktrackingTMCPP(numberOfCols=30, cellsPerColumn=5, + initialPerm = .5, connectedPerm= 0.5, + minThreshold = 3, newSynapseCount = 3, + permanenceInc = .1, permanenceDec= .0, + permanenceMax = 1, + globalDecay = .0, activationThreshold = 3, + doPooling = False, segUpdateValidDuration = 5, + seed=SEED, verbosity = VERBOSITY, + checkSynapseConsistency = False) + self.basicTest2(tm) + + print ("Testing with permanenceInc = 0.1, Dec = .01 and higher synapse " + "count") + tm = BacktrackingTMCPP(numberOfCols=30, cellsPerColumn=2, + initialPerm = .5, connectedPerm= 0.5, + minThreshold = 3, newSynapseCount = 5, + permanenceInc = .1, permanenceDec= .01, + permanenceMax = 1, + globalDecay = .0, activationThreshold = 3, + doPooling = False, segUpdateValidDuration = 5, + seed=SEED, verbosity = VERBOSITY, + checkSynapseConsistency = True) + self.basicTest2(tm, numPatterns=10, numRepetitions=2) + + print "Testing age based global decay" + tm = BacktrackingTMCPP(numberOfCols=30, cellsPerColumn=5, + initialPerm = .4, connectedPerm= 0.5, + minThreshold = 3, newSynapseCount = 3, + permanenceInc = 0.1, permanenceDec= 0.1, + permanenceMax = 1, + globalDecay = .25, activationThreshold = 3, + doPooling = False, segUpdateValidDuration = 5, + pamLength = 2, maxAge = 20, + seed=SEED, verbosity = VERBOSITY, + checkSynapseConsistency = True) + tm.cells4.setCellSegmentOrder(1) + self.basicTest2(tm) + + print "\nTesting with fixed size CLA, max segments per cell" + tm = BacktrackingTMCPP(numberOfCols=30, cellsPerColumn=5, + initialPerm = .5, connectedPerm= 0.5, permanenceMax = 1, + minThreshold = 8, newSynapseCount = 10, + permanenceInc = .1, permanenceDec= .01, + globalDecay = .0, activationThreshold = 8, + doPooling = False, segUpdateValidDuration = 5, + seed=SEED, verbosity = VERBOSITY, + maxAge = 0, + maxSegmentsPerCell = 2, maxSynapsesPerSegment = 100, + checkSynapseConsistency = True) + tm.cells4.setCellSegmentOrder(1) + self.basicTest2(tm, numPatterns=30, numRepetitions=2) + + + +if __name__ == '__main__': + unittest.main() diff --git a/tests/unit/nupic/algorithms/tp10x2_test.py b/tests/unit/nupic/algorithms/tp10x2_test.py deleted file mode 100755 index af48be80e8..0000000000 --- a/tests/unit/nupic/algorithms/tp10x2_test.py +++ /dev/null @@ -1,361 +0,0 @@ -# ---------------------------------------------------------------------- -# Numenta Platform for Intelligent Computing (NuPIC) -# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement -# with Numenta, Inc., for a separate license for this software code, the -# following terms and conditions apply: -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU Affero Public License version 3 as -# published by the Free Software Foundation. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -# See the GNU Affero Public License for more details. -# -# You should have received a copy of the GNU Affero Public License -# along with this program. If not, see http://www.gnu.org/licenses. -# -# http://numenta.org/licenses/ -# ---------------------------------------------------------------------- - -"""Tests for the C++ implementation of the temporal pooler.""" - -import cPickle as pickle -import unittest2 as unittest - -import numpy - -from nupic.bindings.math import Random -from nupic.research import fdrutilities as fdrutils -from nupic.research.TP import TP -from nupic.research.TP10X2 import TP10X2 - -VERBOSITY = 0 # how chatty the unit tests should be -INFERENCE_VERBOSITY = 0 # Chattiness during inference test -SEED = 12 -_RGEN = Random(SEED) - - - -def checkCell0(tp): - """Check that cell 0 has no incoming segments""" - for c in range(tp.numberOfCols): - assert tp.getNumSegmentsInCell(c, 0) == 0 - - - -def setVerbosity(verbosity, tp, tpPy): - """Set verbosity levels of the TP's""" - tp.cells4.setVerbosity(verbosity) - tp.verbosity = verbosity - tpPy.verbosity = verbosity - - - -class TP10X2Test(unittest.TestCase): - - - def basicTest(self): - """Basic test (creation, pickling, basic run of learning and inference)""" - # Create TP object - tp = TP10X2(numberOfCols=10, cellsPerColumn=3, initialPerm=.2, - connectedPerm= 0.8, minThreshold=2, newSynapseCount=5, - permanenceInc=.1, permanenceDec= .05, permanenceMax=1, - globalDecay=.05, activationThreshold=4, doPooling=False, - segUpdateValidDuration=5, seed=SEED, verbosity=VERBOSITY) - tp.retrieveLearningStates = True - - # Save and reload - tp.makeCells4Ephemeral = False - pickle.dump(tp, open("test_tp10x.pkl", "wb")) - tp2 = pickle.load(open("test_tp10x.pkl")) - - self.assertTrue(fdrutils.tpDiff2(tp, tp2, VERBOSITY, checkStates=False)) - - # Learn - for i in xrange(5): - x = numpy.zeros(tp.numberOfCols, dtype='uint32') - _RGEN.initializeUInt32Array(x, 2) - tp.learn(x) - - # Save and reload after learning - tp.reset() - tp.makeCells4Ephemeral = False - pickle.dump(tp, open("test_tp10x.pkl", "wb")) - tp2 = pickle.load(open("test_tp10x.pkl")) - self.assertTrue(fdrutils.tpDiff2(tp, tp2, VERBOSITY)) - - ## Infer - patterns = numpy.zeros((4, tp.numberOfCols), dtype='uint32') - for i in xrange(4): - _RGEN.initializeUInt32Array(patterns[i], 2) - - for i in xrange(10): - x = numpy.zeros(tp.numberOfCols, dtype='uint32') - _RGEN.initializeUInt32Array(x, 2) - tp.infer(x) - if i > 0: - tp.checkPrediction2(patterns) - - - def basicTest2(self, tp, numPatterns=100, numRepetitions=3, activity=15, - testTrimming=False, testRebuild=False): - """Basic test (basic run of learning and inference)""" - # Create PY TP object that mirrors the one sent in. - tpPy = TP(numberOfCols=tp.numberOfCols, cellsPerColumn=tp.cellsPerColumn, - initialPerm=tp.initialPerm, connectedPerm=tp.connectedPerm, - minThreshold=tp.minThreshold, newSynapseCount=tp.newSynapseCount, - permanenceInc=tp.permanenceInc, permanenceDec=tp.permanenceDec, - permanenceMax=tp.permanenceMax, globalDecay=tp.globalDecay, - activationThreshold=tp.activationThreshold, - doPooling=tp.doPooling, - segUpdateValidDuration=tp.segUpdateValidDuration, - pamLength=tp.pamLength, maxAge=tp.maxAge, - maxSeqLength=tp.maxSeqLength, - maxSegmentsPerCell=tp.maxSegmentsPerCell, - maxSynapsesPerSegment=tp.maxSynapsesPerSegment, - seed=tp.seed, verbosity=tp.verbosity) - - # Ensure we are copying over learning states for TPDiff - tp.retrieveLearningStates = True - - verbosity = VERBOSITY - - # Learn - - # Build up sequences - sequence = fdrutils.generateCoincMatrix(nCoinc=numPatterns, - length=tp.numberOfCols, - activity=activity) - for r in xrange(numRepetitions): - for i in xrange(sequence.nRows()): - - #if i > 11: - # setVerbosity(6, tp, tpPy) - - if i % 10 == 0: - tp.reset() - tpPy.reset() - - if verbosity >= 2: - print "\n\n ===================================\nPattern:", - print i, "Round:", r, "input:", sequence.getRow(i) - - y1 = tp.learn(sequence.getRow(i)) - y2 = tpPy.learn(sequence.getRow(i)) - - # Ensure everything continues to work well even if we continuously - # rebuild outSynapses structure - if testRebuild: - tp.cells4.rebuildOutSynapses() - - if testTrimming: - tp.trimSegments() - tpPy.trimSegments() - - if verbosity > 2: - print "\n ------ CPP states ------ ", - tp.printStates() - print "\n ------ PY states ------ ", - tpPy.printStates() - if verbosity > 6: - print "C++ cells: " - tp.printCells() - print "PY cells: " - tpPy.printCells() - - if verbosity >= 3: - print "Num segments in PY and C++", tpPy.getNumSegments(), \ - tp.getNumSegments() - - # Check if the two TP's are identical or not. This check is slow so - # we do it every other iteration. Make it every iteration for debugging - # as needed. - self.assertTrue(fdrutils.tpDiff2(tp, tpPy, verbosity, False)) - - # Check that outputs are identical - self.assertLess(abs((y1 - y2).sum()), 3) - - print "Learning completed" - - self.assertTrue(fdrutils.tpDiff2(tp, tpPy, verbosity)) - - # TODO: Need to check - currently failing this - #checkCell0(tpPy) - - # Remove unconnected synapses and check TP's again - - # Test rebuild out synapses - print "Rebuilding outSynapses" - tp.cells4.rebuildOutSynapses() - self.assertTrue(fdrutils.tpDiff2(tp, tpPy, VERBOSITY)) - - print "Trimming segments" - tp.trimSegments() - tpPy.trimSegments() - self.assertTrue(fdrutils.tpDiff2(tp, tpPy, VERBOSITY)) - - # Save and reload after learning - print "Pickling and unpickling" - tp.makeCells4Ephemeral = False - pickle.dump(tp, open("test_tp10x.pkl", "wb")) - tp2 = pickle.load(open("test_tp10x.pkl")) - self.assertTrue(fdrutils.tpDiff2(tp, tp2, VERBOSITY, checkStates=False)) - - # Infer - print "Testing inference" - - # Setup for inference - tp.reset() - tpPy.reset() - setVerbosity(INFERENCE_VERBOSITY, tp, tpPy) - - patterns = numpy.zeros((40, tp.numberOfCols), dtype='uint32') - for i in xrange(4): - _RGEN.initializeUInt32Array(patterns[i], 2) - - for i, x in enumerate(patterns): - - x = numpy.zeros(tp.numberOfCols, dtype='uint32') - _RGEN.initializeUInt32Array(x, 2) - y = tp.infer(x) - yPy = tpPy.infer(x) - - self.assertTrue(fdrutils.tpDiff2(tp, tpPy, VERBOSITY, checkLearn=False)) - if abs((y - yPy).sum()) > 0: - print "C++ output", y - print "Py output", yPy - assert False - - if i > 0: - tp.checkPrediction2(patterns) - tpPy.checkPrediction2(patterns) - - print "Inference completed" - print "====================================" - - return tp, tpPy - - - def testTPs(self, short=True): - """Call basicTest2 with multiple parameter settings and ensure the C++ and - PY versions are identical throughout.""" - - if short == True: - print "Testing short version" - else: - print "Testing long version" - - if short: - print "\nTesting with fixed resource CLA - test max segment and synapses" - tp = TP10X2(numberOfCols=30, cellsPerColumn=5, - initialPerm=.5, connectedPerm= 0.5, permanenceMax=1, - minThreshold=8, newSynapseCount=10, - permanenceInc=0.1, permanenceDec=0.01, - globalDecay=.0, activationThreshold=8, - doPooling=False, segUpdateValidDuration=5, - seed=SEED, verbosity=VERBOSITY, - maxAge=0, - maxSegmentsPerCell=2, maxSynapsesPerSegment=10, - checkSynapseConsistency=True) - tp.cells4.setCellSegmentOrder(True) - self.basicTest2(tp, numPatterns=15, numRepetitions=1) - - if not short: - print "\nTesting with fixed resource CLA - test max segment and synapses" - tp = TP10X2(numberOfCols=30, cellsPerColumn=5, - initialPerm = .5, connectedPerm= 0.5, permanenceMax = 1, - minThreshold = 8, newSynapseCount = 10, - permanenceInc = .1, permanenceDec= .01, - globalDecay = .0, activationThreshold = 8, - doPooling = False, segUpdateValidDuration = 5, - seed=SEED, verbosity = VERBOSITY, - maxAge = 0, - maxSegmentsPerCell = 2, maxSynapsesPerSegment = 10, - checkSynapseConsistency = True) - tp.cells4.setCellSegmentOrder(1) - self.basicTest2(tp, numPatterns=30, numRepetitions=2) - - print "\nTesting with permanenceInc = 0 and Dec = 0" - tp = TP10X2(numberOfCols=30, cellsPerColumn=5, - initialPerm = .5, connectedPerm= 0.5, - minThreshold = 3, newSynapseCount = 3, - permanenceInc = 0.0, permanenceDec= 0.00, - permanenceMax = 1, - globalDecay = .0, activationThreshold = 3, - doPooling = False, segUpdateValidDuration = 5, - seed=SEED, verbosity = VERBOSITY, - checkSynapseConsistency = False) - tp.printParameters() - self.basicTest2(tp, numPatterns = 30, numRepetitions = 3) - - print "Testing with permanenceInc = 0 and Dec = 0 and 1 cell per column" - tp = TP10X2(numberOfCols=30, cellsPerColumn=1, - initialPerm = .5, connectedPerm= 0.5, - minThreshold = 3, newSynapseCount = 3, - permanenceInc = 0.0, permanenceDec= 0.0, - permanenceMax = 1, - globalDecay = .0, activationThreshold = 3, - doPooling = False, segUpdateValidDuration = 5, - seed=SEED, verbosity = VERBOSITY, - checkSynapseConsistency = False) - self.basicTest2(tp) - - print "Testing with permanenceInc = 0.1 and Dec = .0" - tp = TP10X2(numberOfCols=30, cellsPerColumn=5, - initialPerm = .5, connectedPerm= 0.5, - minThreshold = 3, newSynapseCount = 3, - permanenceInc = .1, permanenceDec= .0, - permanenceMax = 1, - globalDecay = .0, activationThreshold = 3, - doPooling = False, segUpdateValidDuration = 5, - seed=SEED, verbosity = VERBOSITY, - checkSynapseConsistency = False) - self.basicTest2(tp) - - print ("Testing with permanenceInc = 0.1, Dec = .01 and higher synapse " - "count") - tp = TP10X2(numberOfCols=30, cellsPerColumn=2, - initialPerm = .5, connectedPerm= 0.5, - minThreshold = 3, newSynapseCount = 5, - permanenceInc = .1, permanenceDec= .01, - permanenceMax = 1, - globalDecay = .0, activationThreshold = 3, - doPooling = False, segUpdateValidDuration = 5, - seed=SEED, verbosity = VERBOSITY, - checkSynapseConsistency = True) - self.basicTest2(tp, numPatterns=10, numRepetitions=2) - - print "Testing age based global decay" - tp = TP10X2(numberOfCols=30, cellsPerColumn=5, - initialPerm = .4, connectedPerm= 0.5, - minThreshold = 3, newSynapseCount = 3, - permanenceInc = 0.1, permanenceDec= 0.1, - permanenceMax = 1, - globalDecay = .25, activationThreshold = 3, - doPooling = False, segUpdateValidDuration = 5, - pamLength = 2, maxAge = 20, - seed=SEED, verbosity = VERBOSITY, - checkSynapseConsistency = True) - tp.cells4.setCellSegmentOrder(1) - self.basicTest2(tp) - - print "\nTesting with fixed size CLA, max segments per cell" - tp = TP10X2(numberOfCols=30, cellsPerColumn=5, - initialPerm = .5, connectedPerm= 0.5, permanenceMax = 1, - minThreshold = 8, newSynapseCount = 10, - permanenceInc = .1, permanenceDec= .01, - globalDecay = .0, activationThreshold = 8, - doPooling = False, segUpdateValidDuration = 5, - seed=SEED, verbosity = VERBOSITY, - maxAge = 0, - maxSegmentsPerCell = 2, maxSynapsesPerSegment = 100, - checkSynapseConsistency = True) - tp.cells4.setCellSegmentOrder(1) - self.basicTest2(tp, numPatterns=30, numRepetitions=2) - - - -if __name__ == '__main__': - unittest.main() diff --git a/tests/unit/nupic/encoders/logenc_test.py b/tests/unit/nupic/encoders/logenc_test.py index 9e8a3d0828..a8168b0987 100755 --- a/tests/unit/nupic/encoders/logenc_test.py +++ b/tests/unit/nupic/encoders/logenc_test.py @@ -76,10 +76,10 @@ def testLogEncoder(self): ([1], [1000], 0.25), ([1], [1], 1.0), ([1], [-200], 1.0)] - for tp in testTuples: - expected = tp[0] - actual = tp[1] - expectedResult = tp[2] + for tm in testTuples: + expected = tm[0] + actual = tm[1] + expectedResult = tm[2] self.assertEqual(le.closenessScores(expected, actual), expectedResult, "exp: %s act: %s expR: %s" % (str(expected), diff --git a/tests/unit/nupic/frameworks/opf/htmpredictionmodel_classifier_helper_test.py b/tests/unit/nupic/frameworks/opf/htmpredictionmodel_classifier_helper_test.py index 9d8b23ae56..106be02878 100755 --- a/tests/unit/nupic/frameworks/opf/htmpredictionmodel_classifier_helper_test.py +++ b/tests/unit/nupic/frameworks/opf/htmpredictionmodel_classifier_helper_test.py @@ -635,25 +635,25 @@ def testConstructClassificationVector(self): } self.helper.htm_prediction_model.getParameter.side_effect = modelParams.get sp = self.helper.htm_prediction_model._getSPRegion() - tp = self.helper.htm_prediction_model._getTPRegion() - tpImp = tp.getSelf()._tfdr + tm = self.helper.htm_prediction_model._getTPRegion() + tpImp = tm.getSelf()._tfdr sp.getParameter.side_effect = spVals['params'].get sp.getOutputData.side_effect = spVals['output'].get self.helper._activeColumnCount = 5 - tp.getParameter.side_effect = tpVals['params'].get - tp.getOutputData.side_effect = tpVals['output'].get + tm.getParameter.side_effect = tpVals['params'].get + tm.getOutputData.side_effect = tpVals['output'].get tpImp.getLearnActiveStateT.return_value = tpVals['output']['lrnActive'] - # Test TP Cell vector + # Test TM Cell vector self.helper._vectorType = 'tpc' vector = self.helper._constructClassificationRecord() self.assertEqual(vector.anomalyVector, tpImp.getLearnActiveStateT().nonzero()[0].tolist()) - # Test SP and TP Column Error vector + # Test SP and TM Column Error vector self.helper._vectorType = 'sp_tpe' self.helper._prevPredictedColumns = numpy.array([1,0,0,0,1]).nonzero()[0] vector = self.helper._constructClassificationRecord() diff --git a/tests/unit/nupic/regions/knn_anomaly_classifier_region_test.py b/tests/unit/nupic/regions/knn_anomaly_classifier_region_test.py index 98adba52f3..8221b9298a 100755 --- a/tests/unit/nupic/regions/knn_anomaly_classifier_region_test.py +++ b/tests/unit/nupic/regions/knn_anomaly_classifier_region_test.py @@ -608,13 +608,13 @@ def testConstructClassificationVector(self): self.helper._activeColumnCount = 5 - # Test TP Cell vector + # Test TM Cell vector self.helper.classificationVectorType = 1 vector = self.helper.constructClassificationRecord(inputs) self.assertEqual(vector.anomalyVector, tpVals['output']['lrnActive'].nonzero()[0].tolist()) - # Test SP and TP Column Error vector + # Test SP and TM Column Error vector self.helper.classificationVectorType = 2 self.helper._prevPredictedColumns = numpy.array( [1, 0, 0, 0, 1]).nonzero()[0] diff --git a/tests/unit/nupic/research/data/tp_input.csv b/tests/unit/nupic/research/data/tm_input.csv similarity index 100% rename from tests/unit/nupic/research/data/tp_input.csv rename to tests/unit/nupic/research/data/tm_input.csv diff --git a/tests/unit/nupic/research/tp_constant_test.py b/tests/unit/nupic/research/tm_constant_test.py similarity index 52% rename from tests/unit/nupic/research/tp_constant_test.py rename to tests/unit/nupic/research/tm_constant_test.py index 4c91cb388c..51c5dd0453 100755 --- a/tests/unit/nupic/research/tp_constant_test.py +++ b/tests/unit/nupic/research/tm_constant_test.py @@ -29,8 +29,8 @@ import unittest2 as unittest from nupic.research import fdrutilities as fdrutils -from nupic.research.TP import TP -from nupic.research.TP10X2 import TP10X2 +from nupic.research.BacktrackingTM import BacktrackingTM +from nupic.research.BacktrackingTMCPP import BacktrackingTMCPP _SEED = 42 VERBOSITY = 1 @@ -58,9 +58,9 @@ def _getSimplePatterns(numOnes, numPatterns): return p -def _createTps(numCols): - """Create two instances of temporal poolers (TP.py and TP10X2.py) with - identical parameter settings.""" +def _createTms(numCols): + """Create two instances of temporal poolers (BacktrackingTM.py + and BacktrackingTMCPP.py) with identical parameter settings.""" # Keep these fixed: minThreshold = 4 @@ -73,39 +73,47 @@ def _createTps(numCols): globalDecay = 0 cellsPerColumn = 1 - cppTp = TP10X2(numberOfCols=numCols, cellsPerColumn=cellsPerColumn, - initialPerm=initialPerm, connectedPerm=connectedPerm, - minThreshold=minThreshold, newSynapseCount=newSynapseCount, - permanenceInc=permanenceInc, permanenceDec=permanenceDec, - activationThreshold=activationThreshold, - globalDecay=globalDecay, burnIn=1, - seed=_SEED, verbosity=VERBOSITY, - checkSynapseConsistency=True, - pamLength=1000) - - # Ensure we are copying over learning states for TPDiff - cppTp.retrieveLearningStates = True - - pyTp = TP(numberOfCols=numCols, cellsPerColumn=cellsPerColumn, - initialPerm=initialPerm, connectedPerm=connectedPerm, - minThreshold=minThreshold, newSynapseCount=newSynapseCount, - permanenceInc=permanenceInc, permanenceDec=permanenceDec, - activationThreshold=activationThreshold, - globalDecay=globalDecay, burnIn=1, - seed=_SEED, verbosity=VERBOSITY, - pamLength=1000) - - return cppTp, pyTp - - -class TPConstantTest(unittest.TestCase): + cppTm = BacktrackingTMCPP(numberOfCols=numCols, + cellsPerColumn=cellsPerColumn, + initialPerm=initialPerm, + connectedPerm=connectedPerm, + minThreshold=minThreshold, + newSynapseCount=newSynapseCount, + permanenceInc=permanenceInc, + permanenceDec=permanenceDec, + activationThreshold=activationThreshold, + globalDecay=globalDecay, burnIn=1, + seed=_SEED, verbosity=VERBOSITY, + checkSynapseConsistency=True, + pamLength=1000) + + # Ensure we are copying over learning states for TMDiff + cppTm.retrieveLearningStates = True + + pyTm = BacktrackingTM(numberOfCols=numCols, + cellsPerColumn=cellsPerColumn, + initialPerm=initialPerm, + connectedPerm=connectedPerm, + minThreshold=minThreshold, + newSynapseCount=newSynapseCount, + permanenceInc=permanenceInc, + permanenceDec=permanenceDec, + activationThreshold=activationThreshold, + globalDecay=globalDecay, burnIn=1, + seed=_SEED, verbosity=VERBOSITY, + pamLength=1000) + + return cppTm, pyTm + + +class TMConstantTest(unittest.TestCase): def setUp(self): - self.cppTp, self.pyTp = _createTps(100) + self.cppTm, self.pyTm = _createTms(100) - def _basicTest(self, tp=None): + def _basicTest(self, tm=None): """Test creation, pickling, and basic run of learning and inference.""" trainingSet = _getSimplePatterns(10, 10) @@ -114,47 +122,47 @@ def _basicTest(self, tp=None): for _ in range(2): for seq in trainingSet[0:5]: for _ in range(10): - tp.learn(seq) - tp.reset() + tm.learn(seq) + tm.reset() print "Learning completed" # Infer print "Running inference" - tp.collectStats = True + tm.collectStats = True for seq in trainingSet[0:5]: - tp.reset() - tp.resetStats() + tm.reset() + tm.resetStats() for _ in range(10): - tp.infer(seq) + tm.infer(seq) if VERBOSITY > 1 : print _printOneTrainingVector(seq) - tp.printStates(False, False) + tm.printStates(False, False) print print if VERBOSITY > 1: - print tp.getStats() + print tm.getStats() # Ensure our predictions are accurate for each sequence - self.assertGreater(tp.getStats()['predictionScoreAvg2'], 0.8) - print ("tp.getStats()['predictionScoreAvg2'] = ", - tp.getStats()['predictionScoreAvg2']) + self.assertGreater(tm.getStats()['predictionScoreAvg2'], 0.8) + print ("tm.getStats()['predictionScoreAvg2'] = ", + tm.getStats()['predictionScoreAvg2']) - print "TPConstant basicTest ok" + print "TMConstant basicTest ok" - def testCppTpBasic(self): - self._basicTest(self.cppTp) + def testCppTmBasic(self): + self._basicTest(self.cppTm) - def testPyTpBasic(self): - self._basicTest(self.pyTp) + def testPyTmBasic(self): + self._basicTest(self.pyTm) - def testIdenticalTps(self): - self.assertTrue(fdrutils.tpDiff2(self.cppTp, self.pyTp)) + def testIdenticalTms(self): + self.assertTrue(fdrutils.tmDiff2(self.cppTm, self.pyTm)) diff --git a/tests/unit/nupic/research/tp10x2_test.py b/tests/unit/nupic/research/tm_cpp_test.py similarity index 80% rename from tests/unit/nupic/research/tp10x2_test.py rename to tests/unit/nupic/research/tm_cpp_test.py index 3e3f69680c..bbe3e8d6df 100755 --- a/tests/unit/nupic/research/tp10x2_test.py +++ b/tests/unit/nupic/research/tm_cpp_test.py @@ -19,17 +19,17 @@ # http://numenta.org/licenses/ # ---------------------------------------------------------------------- -"""Tests for the C++ implementation of the temporal pooler.""" +"""Tests for the C++ implementation of the temporal memory.""" import unittest2 as unittest -from nupic.research.TP10X2 import TP10X2 +from nupic.research.BacktrackingTMCPP import BacktrackingTMCPP -import tp_test +import tm_test -# Run the Python TP test against the TP10X2. -tp_test.TP = TP10X2 -TPTest = tp_test.TPTest +# Run the Python TM test against the BacktrackingTMCPP. +tm_test.BacktrackingTM = BacktrackingTMCPP +TMTest = tm_test.TMTest diff --git a/tests/unit/nupic/research/tp_test.py b/tests/unit/nupic/research/tm_test.py similarity index 59% rename from tests/unit/nupic/research/tp_test.py rename to tests/unit/nupic/research/tm_test.py index 8ab0c2d43d..fa7ee30b90 100755 --- a/tests/unit/nupic/research/tp_test.py +++ b/tests/unit/nupic/research/tm_test.py @@ -19,7 +19,7 @@ # http://numenta.org/licenses/ # ---------------------------------------------------------------------- -"""Tests for the Python implementation of the temporal pooler.""" +"""Tests for the Python implementation of the temporal memory.""" import csv import cPickle as pickle @@ -34,7 +34,7 @@ from pkg_resources import resource_filename from nupic.research import fdrutilities -from nupic.research.TP import TP +from nupic.research.BacktrackingTM import BacktrackingTM COL_SET = set(range(500)) @@ -42,8 +42,8 @@ -class TPTest(unittest.TestCase): - """Unit tests for the TP class.""" +class TMTest(unittest.TestCase): + """Unit tests for the TM class.""" def setUp(self): @@ -54,88 +54,92 @@ def tearDown(self): shutil.rmtree(self._tmpDir) - def testInitDefaultTP(self): - self.assertTrue(isinstance(TP(), TP)) + def testInitDefaultTM(self): + self.assertTrue(isinstance(BacktrackingTM(), BacktrackingTM)) def testCheckpointLearned(self): # Create a model and give it some inputs to learn. - tp1 = TP(numberOfCols=100, cellsPerColumn=12, verbosity=VERBOSITY) + tm1 = BacktrackingTM(numberOfCols=100, cellsPerColumn=12, + verbosity=VERBOSITY) sequences = [self.generateSequence() for _ in xrange(5)] train = list(itertools.chain.from_iterable(sequences[:3])) for bottomUpInput in train: if bottomUpInput is None: - tp1.reset() + tm1.reset() else: - tp1.compute(bottomUpInput, True, True) + tm1.compute(bottomUpInput, True, True) - # Serialize and deserialized the TP. + # Serialize and deserialized the TM. checkpointPath = os.path.join(self._tmpDir, 'a') - tp1.saveToFile(checkpointPath) - tp2 = pickle.loads(pickle.dumps(tp1)) - tp2.loadFromFile(checkpointPath) + tm1.saveToFile(checkpointPath) + tm2 = pickle.loads(pickle.dumps(tm1)) + tm2.loadFromFile(checkpointPath) - # Check that the TPs are the same. - self.assertTPsEqual(tp1, tp2) + # Check that the TMs are the same. + self.assertTMsEqual(tm1, tm2) # Feed some data into the models. test = list(itertools.chain.from_iterable(sequences[3:])) for bottomUpInput in test: if bottomUpInput is None: - tp1.reset() - tp2.reset() + tm1.reset() + tm2.reset() else: - result1 = tp1.compute(bottomUpInput, True, True) - result2 = tp2.compute(bottomUpInput, True, True) + result1 = tm1.compute(bottomUpInput, True, True) + result2 = tm2.compute(bottomUpInput, True, True) - self.assertTPsEqual(tp1, tp2) + self.assertTMsEqual(tm1, tm2) self.assertTrue(numpy.array_equal(result1, result2)) def testCheckpointMiddleOfSequence(self): # Create a model and give it some inputs to learn. - tp1 = TP(numberOfCols=100, cellsPerColumn=12, verbosity=VERBOSITY) + tm1 = BacktrackingTM(numberOfCols=100, cellsPerColumn=12, + verbosity=VERBOSITY) sequences = [self.generateSequence() for _ in xrange(5)] train = list(itertools.chain.from_iterable(sequences[:3] + [sequences[3][:5]])) for bottomUpInput in train: if bottomUpInput is None: - tp1.reset() + tm1.reset() else: - tp1.compute(bottomUpInput, True, True) + tm1.compute(bottomUpInput, True, True) - # Serialize and deserialized the TP. + # Serialize and deserialized the TM. checkpointPath = os.path.join(self._tmpDir, 'a') - tp1.saveToFile(checkpointPath) - tp2 = pickle.loads(pickle.dumps(tp1)) - tp2.loadFromFile(checkpointPath) + tm1.saveToFile(checkpointPath) + tm2 = pickle.loads(pickle.dumps(tm1)) + tm2.loadFromFile(checkpointPath) - # Check that the TPs are the same. - self.assertTPsEqual(tp1, tp2) + # Check that the TMs are the same. + self.assertTMsEqual(tm1, tm2) # Feed some data into the models. test = list(itertools.chain.from_iterable([sequences[3][5:]] + sequences[3:])) for bottomUpInput in test: if bottomUpInput is None: - tp1.reset() - tp2.reset() + tm1.reset() + tm2.reset() else: - result1 = tp1.compute(bottomUpInput, True, True) - result2 = tp2.compute(bottomUpInput, True, True) + result1 = tm1.compute(bottomUpInput, True, True) + result2 = tm2.compute(bottomUpInput, True, True) - self.assertTPsEqual(tp1, tp2) + self.assertTMsEqual(tm1, tm2) self.assertTrue(numpy.array_equal(result1, result2)) def testCheckpointMiddleOfSequence2(self): """More complex test of checkpointing in the middle of a sequence.""" - tp1 = TP(2048, 32, 0.21, 0.5, 11, 20, 0.1, 0.1, 1.0, 0.0, 14, False, 5, 2, - False, 1960, 0, False, 3, 10, 5, 0, 32, 128, 32, 'normal') - tp2 = TP(2048, 32, 0.21, 0.5, 11, 20, 0.1, 0.1, 1.0, 0.0, 14, False, 5, 2, - False, 1960, 0, False, 3, 10, 5, 0, 32, 128, 32, 'normal') - - with open(resource_filename(__name__, 'data/tp_input.csv'), 'r') as fin: + tm1 = BacktrackingTM(2048, 32, 0.21, 0.5, 11, 20, 0.1, 0.1, 1.0, 0.0, 14, + False, 5, 2, False, 1960, 0, False, 3, 10, 5, 0, 32, + 128, 32, 'normal') + tm2 = BacktrackingTM(2048, 32, 0.21, 0.5, 11, 20, 0.1, 0.1, 1.0, 0.0, 14, + False, 5, 2, False, 1960, 0, False, 3, 10, 5, 0, 32, + 128, 32, 'normal') + + with open(resource_filename(__name__, 'data/tm_input.csv'), 'r') as fin: reader = csv.reader(fin) records = [] for bottomUpInStr in fin: @@ -147,56 +151,56 @@ def testCheckpointMiddleOfSequence2(self): for r in records[:250]: print i i += 1 - output1 = tp1.compute(r, True, True) - output2 = tp2.compute(r, True, True) + output1 = tm1.compute(r, True, True) + output2 = tm2.compute(r, True, True) self.assertTrue(numpy.array_equal(output1, output2)) print 'Serializing and deserializing models.' - savePath1 = os.path.join(self._tmpDir, 'tp1.bin') - tp1.saveToFile(savePath1) - tp3 = pickle.loads(pickle.dumps(tp1)) - tp3.loadFromFile(savePath1) + savePath1 = os.path.join(self._tmpDir, 'tm1.bin') + tm1.saveToFile(savePath1) + tm3 = pickle.loads(pickle.dumps(tm1)) + tm3.loadFromFile(savePath1) - savePath2 = os.path.join(self._tmpDir, 'tp2.bin') - tp2.saveToFile(savePath2) - tp4 = pickle.loads(pickle.dumps(tp2)) - tp4.loadFromFile(savePath2) + savePath2 = os.path.join(self._tmpDir, 'tm2.bin') + tm2.saveToFile(savePath2) + tm4 = pickle.loads(pickle.dumps(tm2)) + tm4.loadFromFile(savePath2) - self.assertTPsEqual(tp1, tp3) - self.assertTPsEqual(tp2, tp4) + self.assertTMsEqual(tm1, tm3) + self.assertTMsEqual(tm2, tm4) for r in records[250:]: print i i += 1 - out1 = tp1.compute(r, True, True) - out2 = tp2.compute(r, True, True) - out3 = tp3.compute(r, True, True) - out4 = tp4.compute(r, True, True) + out1 = tm1.compute(r, True, True) + out2 = tm2.compute(r, True, True) + out3 = tm3.compute(r, True, True) + out4 = tm4.compute(r, True, True) self.assertTrue(numpy.array_equal(out1, out2)) self.assertTrue(numpy.array_equal(out1, out3)) self.assertTrue(numpy.array_equal(out1, out4)) - self.assertTPsEqual(tp1, tp2) - self.assertTPsEqual(tp1, tp3) - self.assertTPsEqual(tp2, tp4) + self.assertTMsEqual(tm1, tm2) + self.assertTMsEqual(tm1, tm3) + self.assertTMsEqual(tm2, tm4) - def assertTPsEqual(self, tp1, tp2): - """Asserts that two TP instances are the same. + def assertTMsEqual(self, tm1, tm2): + """Asserts that two TM instances are the same. This is temporarily disabled since it does not work with the C++ - implementation of the TP. + implementation of the TM. """ - self.assertEqual(tp1, tp2, tp1.diff(tp2)) - self.assertTrue(fdrutilities.tpDiff2(tp1, tp2, 1, False)) + self.assertEqual(tm1, tm2, tm1.diff(tm2)) + self.assertTrue(fdrutilities.tmDiff2(tm1, tm2, 1, False)) @staticmethod def generateSequence(n=10, numCols=100, minOnes=21, maxOnes=25): """Generates a sequence of n patterns.""" - return [None] + [TPTest.generatePattern(numCols, minOnes, maxOnes) + return [None] + [TMTest.generatePattern(numCols, minOnes, maxOnes) for _ in xrange(n)]