Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

NUP-2397: rename TP* to TM* #3555

Merged
merged 14 commits into from
Apr 26, 2017
Merged
Show file tree
Hide file tree
Changes from 10 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 4 additions & 4 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -125,7 +125,7 @@
* Updated SDR classifier internals
* calculate raw anomly score in KNNAnomalyClassifier
* removes anomaly.py dependency in network_api_demo.py
* changes how TPRegion computes prevPredictdColumns and updates clamodel
* changes how TMRegion computes prevPredictdColumns and updates clamodel
* Install pip from local copy, other simplifications
* Fixup PYTHONPATH to properly include previously-defined PYTHONPATH
* adds pseudocode to core functions
Expand Down Expand Up @@ -250,8 +250,8 @@
* Change temporalImp to tm_py for both networks and add comment about it being a temporary value until C++ TM is implemented
* Refactored to remove common code between network_checkpoint_test.py and temporal_memory_compatibility_test.py
* Use named constants from nupic.data.fieldmeta in aggregator module instead of naked constants.
* Fix AttributeError: 'TPShim' object has no attribute 'topDownCompute'
* Support more parameters in TPShim
* Fix AttributeError: 'TMShim' object has no attribute 'topDownCompute'
* Support more parameters in TMShim
* Serialize remaining fields in CLAModel using capnproto
* Enforce pyproj==1.9.3 in requirements.txt
* Use FastCLAClassifier read class method instead of instance method
Expand Down Expand Up @@ -394,7 +394,7 @@
* Merge remote-tracking branch 'upstream/master'
* Rename testconsoleprinter_output.txt so as to not be picked up by py.test as a test during discovery
* likelihood test: fix raw-value must be int
* Fix broken TPShim
* Fix broken TMShim
* Revert "Fix TP Shim"
* Anomaly serialization verify complex anomaly instance
* Likelihood pickle serialization test
Expand Down
8 changes: 4 additions & 4 deletions ci/travis/script-run-examples.sh
Original file line number Diff line number Diff line change
Expand Up @@ -31,14 +31,14 @@ python ${NUPIC}/examples/bindings/sparse_matrix_how_to.py || exit
# examples/opf (run at least 1 from each category)
python ${NUPIC}/scripts/run_opf_experiment.py ${NUPIC}/examples/opf/experiments/anomaly/spatial/2field_few_skewed/ || exit
python ${NUPIC}/scripts/run_opf_experiment.py ${NUPIC}/examples/opf/experiments/anomaly/temporal/saw_200/ || exit
python ${NUPIC}/scripts/run_opf_experiment.py ${NUPIC}/examples/opf/experiments/classification/category_TP_1/ || exit
python ${NUPIC}/scripts/run_opf_experiment.py ${NUPIC}/examples/opf/experiments/classification/category_TM_1/ || exit
python ${NUPIC}/scripts/run_opf_experiment.py ${NUPIC}/examples/opf/experiments/missing_record/simple_0/ || exit
python ${NUPIC}/scripts/run_opf_experiment.py ${NUPIC}/examples/opf/experiments/multistep/hotgym/ || exit
python ${NUPIC}/scripts/run_opf_experiment.py ${NUPIC}/examples/opf/experiments/opfrunexperiment_test/simpleOPF/hotgym_1hr_agg/ || exit

# opf/experiments/params - skip now
python ${NUPIC}/scripts/run_opf_experiment.py ${NUPIC}/examples/opf/experiments/spatial_classification/category_1/ || exit

# examples/tp
python ${NUPIC}/examples/tp/hello_tm.py || exit
python ${NUPIC}/examples/tp/tp_test.py || exit
# examples/tm
python ${NUPIC}/examples/tm/hello_tm.py || exit
python ${NUPIC}/examples/tm/tm_test.py || exit
6 changes: 3 additions & 3 deletions docs/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -135,12 +135,12 @@ nupic
│   ├── SPRegion.py [TODO]
│   ├── SVMClassifierNode.py [TODO]
│   ├── Spec.py [TODO]
│   ├── TPRegion.py [TODO]
│   ├── TMRegion.py [TODO]
│   ├── TestRegion.py [TODO]
│   └─── UnimportableNode.py [TODO]
├── research
│   ├── TP.py [TODO]
│   ├── TP10X2.py [TODO]
│   ├── BacktrackingTM.py [TODO]
│   ├── BacktrackingTMCPP.py [TODO]
│   ├── TP_shim.py [TODO]
│   ├── connections.py [TODO]
│   ├── fdrutilities.py [TODO]
Expand Down
14 changes: 7 additions & 7 deletions docs/examples/opf/model_params.py
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,7 @@
'globalInhibition': 1,

# Number of cell columns in the cortical region (same number for
# SP and TP)
# SP and TM)
# (see also tpNCellsPerCol)
'columnCount': 2048,

Expand Down Expand Up @@ -121,13 +121,13 @@
'tmEnable' : True,

'tmParams': {
# TP diagnostic output verbosity control;
# TM diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
# (see verbosity in nupic/trunk/py/nupic/research/TP.py and TP10X*.py)
# (see verbosity in nupic/trunk/py/nupic/research/BacktrackingTM.py and BacktrackingTMCPP.py)
'verbosity': 0,

# Number of cell columns in the cortical region (same number for
# SP and TP)
# SP and TM)
# (see also tpNCellsPerCol)
'columnCount': 2048,

Expand All @@ -152,7 +152,7 @@
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TP
# TODO: for Ron: once the appropriate value is placed in TM
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSynapsesPerSegment': 32,
Expand All @@ -161,7 +161,7 @@
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TP
# TODO: for Ron: once the appropriate value is placed in TM
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSegmentsPerCell': 128,
Expand Down Expand Up @@ -196,7 +196,7 @@

'outputType': 'normal',

# "Pay Attention Mode" length. This tells the TP how many new
# "Pay Attention Mode" length. This tells the TM how many new
# elements to append to the end of a learned sequence at a time.
# Smaller values are better for datasets with short sequences,
# higher values are better for datasets with long sequences.
Expand Down
4 changes: 2 additions & 2 deletions docs/source/api/network/regions.rst
Original file line number Diff line number Diff line change
Expand Up @@ -23,10 +23,10 @@ SPRegion
:members:
:show-inheritance:

TPRegion
TMRegion
^^^^^^^^^^^^^

.. autoclass:: nupic.regions.TPRegion.TPRegion
.. autoclass:: nupic.regions.TMRegion.TMRegion
:members:
:show-inheritance:

Expand Down
10 changes: 5 additions & 5 deletions docs/source/guides/anomaly-detection.md
Original file line number Diff line number Diff line change
Expand Up @@ -4,17 +4,17 @@ This technical note describes how the anomaly score is implemented and incorpora

The anomaly score enables the CLA to provide a metric representing the degree to which each record is predictable. For example, if you have temporal anomaly model that is predicting the energy consumption of a building, each record will have an anomaly score between zero and one. A zero represents a completely predicted value whereas a one represents a completely anomalous value.

The anomaly score feature of CLA is implemented on top of the core spatial and temporal pooler, and don’t require any spatial pooler and temporal pooler algorithm changes.
The anomaly score feature of CLA is implemented on top of the core spatial and temporal memory, and don’t require any spatial pooler and temporal memory algorithm changes.

## TemporalAnomaly model

### Description

The user must specify the model as a TemporalAnomaly type to have the model report the anomaly score. The anomaly score uses the temporal pooler to detect novel points in sequences. This will detect both novel input patterns (because they have not been seen in any sequence) as well as old spatial patterns that occur in a novel context.
The user must specify the model as a TemporalAnomaly type to have the model report the anomaly score. The anomaly score uses the temporal memory to detect novel points in sequences. This will detect both novel input patterns (because they have not been seen in any sequence) as well as old spatial patterns that occur in a novel context.

### Computation

A TemporalAnomaly model calculates the anomaly score based on the correctness of the previous prediction. This is calculated as the percentage of active spatial pooler columns that were incorrectly predicted by the temporal pooler.
A TemporalAnomaly model calculates the anomaly score based on the correctness of the previous prediction. This is calculated as the percentage of active spatial pooler columns that were incorrectly predicted by the temporal memory.

The algorithm for the anomaly score is as follows:

Expand Down Expand Up @@ -59,7 +59,7 @@ There were also some attempts at adding anomaly detection that are "non-temporal

### Computation

Since NontemporalAnomaly models have no temporal pooler, the anomaly score is based on the state within the spatial pooler.
Since NontemporalAnomaly models have no temporal memory, the anomaly score is based on the state within the spatial pooler.

To compute the nontemporal anomaly score, we first compute the "match" score for each winning column after inhibition

Expand All @@ -77,4 +77,4 @@ The purpose of this anomaly score was to detect input records that represented n

### Results

This algorithm was run on some artificial datasets. However, the results were not very promising, and this approach was abandoned. From a theoretical perspective the temporal anomaly detection technique is a superset of this technique. If a static pattern by itself is novel, by definition the temporal pooler won't make good predictions and hence the temporal anomaly score should be high. As such there was not too much interest in pursuing this route.
This algorithm was run on some artificial datasets. However, the results were not very promising, and this approach was abandoned. From a theoretical perspective the temporal anomaly detection technique is a superset of this technique. If a static pattern by itself is novel, by definition the temporal memory won't make good predictions and hence the temporal anomaly score should be high. As such there was not too much interest in pursuing this route.
2 changes: 1 addition & 1 deletion docs/source/guides/swarming/index.rst
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ Swarming
Swarming is a process that automatically determines the best model for a
given dataset. By "best", we mean the model that most accurately produces
the desired output. Swarming figures out which optional components should go
into a model (encoders, spatial pooler, temporal pooler, classifier, etc.),
into a model (encoders, spatial pooler, temporal memory, classifier, etc.),
as well as the best parameter values to use for each component.

We have plans to replace the current swarming library with a more universal
Expand Down
2 changes: 1 addition & 1 deletion docs/source/guides/swarming/running.md
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@

This document contains detailed instructions for configuring and running swarms. Please see the document [Swarming Algorithm](Swarming-Algorithm) for a description of the underlying swarming algorithm.

Swarming is a process that automatically determines the best model for a given dataset. By "best", we mean the model that most accurately produces the desired output. Swarming figures out which optional components should go into a model (encoders, spatial pooler, temporal pooler, classifier, etc.), as well as the best parameter values to use for each component.
Swarming is a process that automatically determines the best model for a given dataset. By "best", we mean the model that most accurately produces the desired output. Swarming figures out which optional components should go into a model (encoders, spatial pooler, temporal memory, classifier, etc.), as well as the best parameter values to use for each component.

When you run a swarm, you provide the following information:
* A dataset to optimize over (a .csv file containing the inputs and desired output).
Expand Down
42 changes: 21 additions & 21 deletions examples/NuPIC Walkthrough.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -649,7 +649,7 @@
"source": [
"# Temporal Memory (a.k.a. Sequence Memory, Temporal Pooler)\n",
"\n",
"From: `examples/tp/hello_tm.py`"
"From: `examples/tm/hello_tm.py`"
]
},
{
Expand All @@ -674,7 +674,7 @@
"outputs": [],
"source": [
"# Step 1: create Temporal Pooler instance with appropriate parameters\n",
"tp = TP(numberOfCols=50, cellsPerColumn=2,\n",
"tm = TP(numberOfCols=50, cellsPerColumn=2,\n",
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Is this constructor call still valid?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think so (with BacktrackignTM). I fixed the notebook, and everything executes properly now.

" initialPerm=0.5, connectedPerm=0.5,\n",
" minThreshold=10, newSynapseCount=10,\n",
" permanenceInc=0.1, permanenceDec=0.0,\n",
Expand All @@ -692,10 +692,10 @@
},
"outputs": [],
"source": [
"# Step 2: create input vectors to feed to the temporal pooler. Each input vector\n",
"# Step 2: create input vectors to feed to the temporal memory. Each input vector\n",
"# must be numberOfCols wide. Here we create a simple sequence of 5 vectors\n",
"# representing the sequence A -> B -> C -> D -> E\n",
"x = numpy.zeros((5, tp.numberOfCols), dtype=\"uint32\")\n",
"x = numpy.zeros((5, tm.numberOfCols), dtype=\"uint32\")\n",
"x[0,0:10] = 1 # Input SDR representing \"A\", corresponding to columns 0-9\n",
"x[1,10:20] = 1 # Input SDR representing \"B\", corresponding to columns 10-19\n",
"x[2,20:30] = 1 # Input SDR representing \"C\", corresponding to columns 20-29\n",
Expand All @@ -711,7 +711,7 @@
},
"outputs": [],
"source": [
"# Step 3: send this simple sequence to the temporal pooler for learning\n",
"# Step 3: send this simple sequence to the temporal memory for learning\n",
"# We repeat the sequence 10 times\n",
"for i in range(10):\n",
"\n",
Expand All @@ -721,18 +721,18 @@
" # The compute method performs one step of learning and/or inference. Note:\n",
" # here we just perform learning but you can perform prediction/inference and\n",
" # learning in the same step if you want (online learning).\n",
" tp.compute(x[j], enableLearn = True, computeInfOutput = False)\n",
" tm.compute(x[j], enableLearn = True, computeInfOutput = False)\n",
"\n",
" # This function prints the segments associated with every cell.$$$$\n",
" # If you really want to understand the TP, uncomment this line. By following\n",
" # every step you can get an excellent understanding for exactly how the TP\n",
" # learns.\n",
" #tp.printCells()\n",
" #tm.printCells()\n",
"\n",
" # The reset command tells the TP that a sequence just ended and essentially\n",
" # zeros out all the states. It is not strictly necessary but it's a bit\n",
" # messier without resets, and the TP learns quicker with resets.\n",
" tp.reset()"
" tm.reset()"
]
},
{
Expand Down Expand Up @@ -762,7 +762,7 @@
"0000000000 1111111111 0000000000 0000000000 0000000000 \n",
"\n",
"\n",
"The following columns are predicted by the temporal pooler. This\n",
"The following columns are predicted by the temporal memory. This\n",
"should correspond to columns in the *next* item in the sequence.\n",
"[10 11 12 13 14 15 16 17 18 19] \n",
"\n",
Expand All @@ -781,7 +781,7 @@
"0000000000 0000000000 1111111111 0000000000 0000000000 \n",
"\n",
"\n",
"The following columns are predicted by the temporal pooler. This\n",
"The following columns are predicted by the temporal memory. This\n",
"should correspond to columns in the *next* item in the sequence.\n",
"[20 21 22 23 24 25 26 27 28 29] \n",
"\n",
Expand All @@ -800,7 +800,7 @@
"0000000000 0000000000 0000000000 1111111111 0000000000 \n",
"\n",
"\n",
"The following columns are predicted by the temporal pooler. This\n",
"The following columns are predicted by the temporal memory. This\n",
"should correspond to columns in the *next* item in the sequence.\n",
"[30 31 32 33 34 35 36 37 38 39] \n",
"\n",
Expand All @@ -819,7 +819,7 @@
"0000000000 0000000000 0000000000 0000000000 1111111111 \n",
"\n",
"\n",
"The following columns are predicted by the temporal pooler. This\n",
"The following columns are predicted by the temporal memory. This\n",
"should correspond to columns in the *next* item in the sequence.\n",
"[40 41 42 43 44 45 46 47 48 49] \n",
"\n",
Expand All @@ -838,15 +838,15 @@
"0000000000 0000000000 0000000000 0000000000 0000000000 \n",
"\n",
"\n",
"The following columns are predicted by the temporal pooler. This\n",
"The following columns are predicted by the temporal memory. This\n",
"should correspond to columns in the *next* item in the sequence.\n",
"[] \n"
]
}
],
"source": [
"# Step 4: send the same sequence of vectors and look at predictions made by\n",
"# temporal pooler\n",
"# temporal memory\n",
"\n",
"# Utility routine for printing the input vector\n",
"def formatRow(x):\n",
Expand All @@ -863,7 +863,7 @@
" print \"Raw input vector\\n\",formatRow(x[j])\n",
"\n",
" # Send each vector to the TP, with learning turned off\n",
" tp.compute(x[j], enableLearn=False, computeInfOutput=True)\n",
" tm.compute(x[j], enableLearn=False, computeInfOutput=True)\n",
"\n",
" # This method prints out the active state of each cell followed by the\n",
" # predicted state of each cell. For convenience the cells are grouped\n",
Expand All @@ -874,16 +874,16 @@
" # represent the SDR for the current input pattern and the columns where\n",
" # predicted state is 1 represent the SDR for the next expected pattern\n",
" print \"\\nAll the active and predicted cells:\"\n",
" tp.printStates(printPrevious=False, printLearnState=False)\n",
" tm.printStates(printPrevious=False, printLearnState=False)\n",
"\n",
" # tp.getPredictedState() gets the predicted cells.\n",
" # tm.getPredictedState() gets the predicted cells.\n",
" # predictedCells[c][i] represents the state of the i'th cell in the c'th\n",
" # column. To see if a column is predicted, we can simply take the OR\n",
" # across all the cells in that column. In numpy we can do this by taking\n",
" # the max along axis 1.\n",
" print \"\\n\\nThe following columns are predicted by the temporal pooler. This\"\n",
" print \"\\n\\nThe following columns are predicted by the temporal memory. This\"\n",
" print \"should correspond to columns in the *next* item in the sequence.\"\n",
" predictedCells = tp.getPredictedState()\n",
" predictedCells = tm.getPredictedState()\n",
" print formatRow(predictedCells.max(axis=1).nonzero())"
]
},
Expand Down Expand Up @@ -1058,7 +1058,7 @@
" 'tmParams': {\n",
" # TP diagnostic output verbosity control;\n",
" # 0: silent; [1..6]: increasing levels of verbosity\n",
" # (see verbosity in nupic/trunk/py/nupic/research/TP.py and TP10X*.py)\n",
" # (see verbosity in nupic/trunk/py/nupic/research/TP.py and BacktrackingTMCPP.py)\n",
" 'verbosity': 0,\n",
"\n",
" # Number of cell columns in the cortical region (same number for\n",
Expand Down Expand Up @@ -1641,7 +1641,7 @@
" 'tmParams': {\n",
" # TP diagnostic output verbosity control;\n",
" # 0: silent; [1..6]: increasing levels of verbosity\n",
" # (see verbosity in nupic/trunk/py/nupic/research/TP.py and TP10X*.py)\n",
" # (see verbosity in nupic/trunk/py/nupic/research/TP.py and BacktrackingTMCPP.py)\n",
" 'verbosity': 0,\n",
"\n",
" # Number of cell columns in the cortical region (same number for\n",
Expand Down
2 changes: 1 addition & 1 deletion examples/network/core_encoders_demo.py
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,7 @@ def createNetwork():
#
# Add a TPRegion, a region containing a Temporal Memory
#
network.addRegion("tm", "py.TPRegion",
network.addRegion("tm", "py.TMRegion",
json.dumps({
"columnCount": 2048,
"cellsPerColumn": 32,
Expand Down
Loading