From 10e5ce640597772a3c99845f60cb113ebd84b5da Mon Sep 17 00:00:00 2001 From: Noumanmufc1 Date: Sun, 18 Mar 2018 03:51:09 +0500 Subject: [PATCH 1/2] added min_consistent_det to knowledge.ipynb --- README.md | 190 ++++++++- knowledge.ipynb | 1077 +++++++++++++++++++++++++++++++++++++++++++++-- 2 files changed, 1224 insertions(+), 43 deletions(-) diff --git a/README.md b/README.md index 91efcde94..781bf2a01 100644 --- a/README.md +++ b/README.md @@ -141,7 +141,195 @@ Here is a table of algorithms, the figure, name of the algorithm in the book and | 19.2 | Current-Best-Learning | `current_best_learning` | [`knowledge.py`](knowledge.py) | Done | Included | | 19.3 | Version-Space-Learning | `version_space_learning` | [`knowledge.py`](knowledge.py) | Done | Included | | 19.8 | Minimal-Consistent-Det | `minimal_consistent_det` | [`knowledge.py`](knowledge.py) | Done | | -| 19.12 | FOIL | `FOIL_container` | [`knowledge.py`](knowledge.py) | Done | | +| 19.12 | FOIL | `FOIL_container` | [`knowledge.py`](knowledge.py) | Done | Included | +| 21.2 | Passive-ADP-Agent | `PassiveADPAgent` | [`rl.py`][rl] | Done | Included | +| 21.4 | Passive-TD-Agent | `PassiveTDAgent` | [`rl.py`][rl] | Done | Included | +| 21.8 | Q-Learning-Agent | `QLearningAgent` | [`rl.py`][rl] | Done | Included | +| 22.1 | HITS | `HITS` | [`nlp.py`][nlp] | Done | Included | +| 23 | Chart-Parse | `Chart` | [`nlp.py`][nlp] | Done | Included | +| 23.5 | CYK-Parse | `CYK_parse` | [`nlp.py`][nlp] | Done | Included | +| 25.9 | Monte-Carlo-Localization | `monte_carlo_localization` | [`probability.py`][probability] | Done | | + + +# Index of data structures + +Here is a table of the implemented data structures, the figure, name of the implementation in the repository, and the file where they are implemented. + +| **Figure** | **Name (in repository)** | **File** | +|:-------|:--------------------------------|:--------------------------| +| 3.2 | romania_map | [`search.py`][search] | +| 4.9 | vacumm_world | [`search.py`][search] | +| 4.23 | one_dim_state_space | [`search.py`][search] | +| 6.1 | australia_map | [`search.py`][search] | +| 7.13 | wumpus_world_inference | [`logic.py`][logic] | +| 7.16 | horn_clauses_KB | [`logic.py`][logic] | +| 17.1 | sequential_decision_environment | [`mdp.py`][mdp] | +| 18.2 | waiting_decision_tree | [`learning.py`][learning] | + + +# Acknowledgements + +Many thanks for contributions over the years. I got bug reports, corrected code, and other support from Darius Bacon, Phil Ruggera, Peng Shao, Amit Patil, Ted Nienstedt, Jim Martin, Ben Catanzariti, and others. Now that the project is on GitHub, you can see the [contributors](https://github.com/aimacode/aima-python/graphs/contributors) who are doing a great job of actively improving the project. Many thanks to all contributors, especially @darius, @SnShine, @reachtarunhere, @MrDupin, and @Chipe1. + + +[agents]:../master/agents.py +[csp]:../master/csp.py +[games]:../master/games.py +[grid]:../master/grid.py +[knowledge]:../master/knowledge.py +[learning]:../master/learning.py +[logic]:../master/logic.py +[mdp]:../master/mdp.py +[nlp]:../master/nlp.py +[planning]:../master/planning.py +[probability]:../master/probability.py +[rl]:../master/rl.py +[search]:../master/search.py +[utils]:../master/utils.py +[text]:../master/text.py
+

+
+ +# `aima-python` [![Build Status](https://travis-ci.org/aimacode/aima-python.svg?branch=master)](https://travis-ci.org/aimacode/aima-python) [![Binder](http://mybinder.org/badge.svg)](http://mybinder.org/repo/aimacode/aima-python) + + +Python code for the book *[Artificial Intelligence: A Modern Approach](http://aima.cs.berkeley.edu).* You can use this in conjunction with a course on AI, or for study on your own. We're looking for [solid contributors](https://github.com/aimacode/aima-python/blob/master/CONTRIBUTING.md) to help. + + + +## Structure of the Project + +When complete, this project will have Python implementations for all the pseudocode algorithms in the book, as well as tests and examples of use. For each major topic, such as `nlp` (natural language processing), we provide the following files: + +- `nlp.py`: Implementations of all the pseudocode algorithms, and necessary support functions/classes/data. +- `tests/test_nlp.py`: A lightweight test suite, using `assert` statements, designed for use with [`py.test`](http://pytest.org/latest/), but also usable on their own. +- `nlp.ipynb`: A Jupyter (IPython) notebook that explains and gives examples of how to use the code. +- `nlp_apps.ipynb`: A Jupyter notebook that gives example applications of the code. + + +## Python 3.4 and up + +This code requires Python 3.4 or later, and does not run in Python 2. You can [install Python](https://www.python.org/downloads) or use a browser-based Python interpreter such as [repl.it](https://repl.it/languages/python3). +You can run the code in an IDE, or from the command line with `python -i filename.py` where the `-i` option puts you in an interactive loop where you can run Python functions. See [jupyter.org](http://jupyter.org/) for instructions on setting up your own Jupyter notebook environment, or run the notebooks online with [try.jupiter.org](https://try.jupyter.org/). + + +## Installation Guide + +To download the repository: + +`git clone https://github.com/aimacode/aima-python.git` + +You also need to fetch the datasets from the [`aima-data`](https://github.com/aimacode/aima-data) repository: + +``` +cd aima-python +git submodule init +git submodule update +``` + +Wait for the datasets to download, it may take a while. Once they are downloaded, you need to install `pytest`, so that you can run the test suite: + +`pip install pytest` + +Then to run the tests: + +`py.test` + +And you are good to go! + + +# Index of Algorithms + +Here is a table of algorithms, the figure, name of the algorithm in the book and in the repository, and the file where they are implemented in the repository. This chart was made for the third edition of the book and is being updated for the upcoming fourth edition. Empty implementations are a good place for contributors to look for an issue. The [aima-pseudocode](https://github.com/aimacode/aima-pseudocode) project describes all the algorithms from the book. An asterisk next to the file name denotes the algorithm is not fully implemented. Another great place for contributors to start is by adding tests and writing on the notebooks. You can see which algorithms have tests and notebook sections below. If the algorithm you want to work on is covered, don't worry! You can still add more tests and provide some examples of use in the notebook! + +| **Figure** | **Name (in 3rd edition)** | **Name (in repository)** | **File** | **Tests** | **Notebook** +|:-------|:----------------------------------|:------------------------------|:--------------------------------|:-----|:---------| +| 2 | Random-Vacuum-Agent | `RandomVacuumAgent` | [`agents.py`][agents] | Done | Included | +| 2 | Model-Based-Vacuum-Agent | `ModelBasedVacuumAgent` | [`agents.py`][agents] | Done | Included | +| 2.1 | Environment | `Environment` | [`agents.py`][agents] | Done | Included | +| 2.1 | Agent | `Agent` | [`agents.py`][agents] | Done | Included | +| 2.3 | Table-Driven-Vacuum-Agent | `TableDrivenVacuumAgent` | [`agents.py`][agents] | Done | Included | +| 2.7 | Table-Driven-Agent | `TableDrivenAgent` | [`agents.py`][agents] | Done | Included | +| 2.8 | Reflex-Vacuum-Agent | `ReflexVacuumAgent` | [`agents.py`][agents] | Done | Included | +| 2.10 | Simple-Reflex-Agent | `SimpleReflexAgent` | [`agents.py`][agents] | Done | Included | +| 2.12 | Model-Based-Reflex-Agent | `ReflexAgentWithState` | [`agents.py`][agents] | | Included | +| 3 | Problem | `Problem` | [`search.py`][search] | Done | Included | +| 3 | Node | `Node` | [`search.py`][search] | Done | Included | +| 3 | Queue | `Queue` | [`utils.py`][utils] | Done | No Need | +| 3.1 | Simple-Problem-Solving-Agent | `SimpleProblemSolvingAgent` | [`search.py`][search] | Done | Included | +| 3.2 | Romania | `romania` | [`search.py`][search] | Done | Included | +| 3.7 | Tree-Search | `tree_search` | [`search.py`][search] | Done | | +| 3.7 | Graph-Search | `graph_search` | [`search.py`][search] | Done | | +| 3.11 | Breadth-First-Search | `breadth_first_search` | [`search.py`][search] | Done | Included | +| 3.14 | Uniform-Cost-Search | `uniform_cost_search` | [`search.py`][search] | Done | Included | +| 3.17 | Depth-Limited-Search | `depth_limited_search` | [`search.py`][search] | Done | | +| 3.18 | Iterative-Deepening-Search | `iterative_deepening_search` | [`search.py`][search] | Done | | +| 3.22 | Best-First-Search | `best_first_graph_search` | [`search.py`][search] | Done | Included | +| 3.24 | A\*-Search | `astar_search` | [`search.py`][search] | Done | Included | +| 3.26 | Recursive-Best-First-Search | `recursive_best_first_search` | [`search.py`][search] | Done | | +| 4.2 | Hill-Climbing | `hill_climbing` | [`search.py`][search] | Done | Included | +| 4.5 | Simulated-Annealing | `simulated_annealing` | [`search.py`][search] | Done | | +| 4.8 | Genetic-Algorithm | `genetic_algorithm` | [`search.py`][search] | Done | Included | +| 4.11 | And-Or-Graph-Search | `and_or_graph_search` | [`search.py`][search] | Done | | +| 4.21 | Online-DFS-Agent | `online_dfs_agent` | [`search.py`][search] | | | +| 4.24 | LRTA\*-Agent | `LRTAStarAgent` | [`search.py`][search] | Done | | +| 5.3 | Minimax-Decision | `minimax_decision` | [`games.py`][games] | Done | Included | +| 5.7 | Alpha-Beta-Search | `alphabeta_search` | [`games.py`][games] | Done | Included | +| 6 | CSP | `CSP` | [`csp.py`][csp] | Done | Included | +| 6.3 | AC-3 | `AC3` | [`csp.py`][csp] | Done | | +| 6.5 | Backtracking-Search | `backtracking_search` | [`csp.py`][csp] | Done | Included | +| 6.8 | Min-Conflicts | `min_conflicts` | [`csp.py`][csp] | Done | Included | +| 6.11 | Tree-CSP-Solver | `tree_csp_solver` | [`csp.py`][csp] | Done | Included | +| 7 | KB | `KB` | [`logic.py`][logic] | Done | Included | +| 7.1 | KB-Agent | `KB_Agent` | [`logic.py`][logic] | Done | | +| 7.7 | Propositional Logic Sentence | `Expr` | [`utils.py`][utils] | Done | Included | +| 7.10 | TT-Entails | `tt_entails` | [`logic.py`][logic] | Done | Included | +| 7.12 | PL-Resolution | `pl_resolution` | [`logic.py`][logic] | Done | Included | +| 7.14 | Convert to CNF | `to_cnf` | [`logic.py`][logic] | Done | Included | +| 7.15 | PL-FC-Entails? | `pl_fc_resolution` | [`logic.py`][logic] | Done | Included | +| 7.17 | DPLL-Satisfiable? | `dpll_satisfiable` | [`logic.py`][logic] | Done | Included | +| 7.18 | WalkSAT | `WalkSAT` | [`logic.py`][logic] | Done | Included | +| 7.20 | Hybrid-Wumpus-Agent | `HybridWumpusAgent` | | | | +| 7.22 | SATPlan | `SAT_plan` | [`logic.py`][logic] | Done | | +| 9 | Subst | `subst` | [`logic.py`][logic] | Done | | +| 9.1 | Unify | `unify` | [`logic.py`][logic] | Done | Included | +| 9.3 | FOL-FC-Ask | `fol_fc_ask` | [`logic.py`][logic] | Done | | +| 9.6 | FOL-BC-Ask | `fol_bc_ask` | [`logic.py`][logic] | Done | | +| 9.8 | Append | | | | | +| 10.1 | Air-Cargo-problem | `air_cargo` | [`planning.py`][planning] | Done | Included | +| 10.2 | Spare-Tire-Problem | `spare_tire` | [`planning.py`][planning] | Done | Included | +| 10.3 | Three-Block-Tower | `three_block_tower` | [`planning.py`][planning] | Done | Included | +| 10.7 | Cake-Problem | `have_cake_and_eat_cake_too` | [`planning.py`][planning] | Done | | +| 10.9 | Graphplan | `GraphPlan` | [`planning.py`][planning] | | | +| 10.13 | Partial-Order-Planner | | | | | +| 11.1 | Job-Shop-Problem-With-Resources | `job_shop_problem` | [`planning.py`][planning] | Done | | +| 11.5 | Hierarchical-Search | `hierarchical_search` | [`planning.py`][planning] | | | +| 11.8 | Angelic-Search | | | | | +| 11.10 | Doubles-tennis | `double_tennis_problem` | [`planning.py`][planning] | | | +| 13 | Discrete Probability Distribution | `ProbDist` | [`probability.py`][probability] | Done | Included | +| 13.1 | DT-Agent | `DTAgent` | [`probability.py`][probability] | | | +| 14.9 | Enumeration-Ask | `enumeration_ask` | [`probability.py`][probability] | Done | Included | +| 14.11 | Elimination-Ask | `elimination_ask` | [`probability.py`][probability] | Done | Included | +| 14.13 | Prior-Sample | `prior_sample` | [`probability.py`][probability] | | Included | +| 14.14 | Rejection-Sampling | `rejection_sampling` | [`probability.py`][probability] | Done | Included | +| 14.15 | Likelihood-Weighting | `likelihood_weighting` | [`probability.py`][probability] | Done | Included | +| 14.16 | Gibbs-Ask | `gibbs_ask` | [`probability.py`][probability] | Done | Included | +| 15.4 | Forward-Backward | `forward_backward` | [`probability.py`][probability] | Done | | +| 15.6 | Fixed-Lag-Smoothing | `fixed_lag_smoothing` | [`probability.py`][probability] | Done | | +| 15.17 | Particle-Filtering | `particle_filtering` | [`probability.py`][probability] | Done | | +| 16.9 | Information-Gathering-Agent | | | | | +| 17.4 | Value-Iteration | `value_iteration` | [`mdp.py`][mdp] | Done | Included | +| 17.7 | Policy-Iteration | `policy_iteration` | [`mdp.py`][mdp] | Done | Included | +| 17.9 | POMDP-Value-Iteration | | | | | +| 18.5 | Decision-Tree-Learning | `DecisionTreeLearner` | [`learning.py`][learning] | Done | Included | +| 18.8 | Cross-Validation | `cross_validation` | [`learning.py`][learning] | | | +| 18.11 | Decision-List-Learning | `DecisionListLearner` | [`learning.py`][learning]\* | | | +| 18.24 | Back-Prop-Learning | `BackPropagationLearner` | [`learning.py`][learning] | Done | Included | +| 18.34 | AdaBoost | `AdaBoost` | [`learning.py`][learning] | Done | Included | +| 19.2 | Current-Best-Learning | `current_best_learning` | [`knowledge.py`](knowledge.py) | Done | Included | +| 19.3 | Version-Space-Learning | `version_space_learning` | [`knowledge.py`](knowledge.py) | Done | Included | +| 19.8 | Minimal-Consistent-Det | `minimal_consistent_det` | [`knowledge.py`](knowledge.py) | Done | | +| 19.12 | FOIL | `FOIL_container` | [`knowledge.py`](knowledge.py) | Done | Included | | 21.2 | Passive-ADP-Agent | `PassiveADPAgent` | [`rl.py`][rl] | Done | Included | | 21.4 | Passive-TD-Agent | `PassiveTDAgent` | [`rl.py`][rl] | Done | Included | | 21.8 | Q-Learning-Agent | `QLearningAgent` | [`rl.py`][rl] | Done | Included | diff --git a/knowledge.ipynb b/knowledge.ipynb index 2ffb20362..5ab2cda01 100644 --- a/knowledge.ipynb +++ b/knowledge.ipynb @@ -13,10 +13,8 @@ }, { "cell_type": "code", - "execution_count": 1, - "metadata": { - "collapsed": true - }, + "execution_count": 50, + "metadata": {}, "outputs": [], "source": [ "from knowledge import *\n", @@ -96,7 +94,7 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": 51, "metadata": {}, "outputs": [ { @@ -126,7 +124,7 @@ "" ] }, - "execution_count": 2, + "execution_count": 51, "metadata": {}, "output_type": "execute_result" } @@ -150,9 +148,192 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 52, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/html": [ + "\n", + "\n", + "\n", + "\n", + " \n", + " \n", + " \n", + "\n", + "\n", + "

\n", + "\n", + "
def current_best_learning(examples, h, examples_so_far=None):\n",
+       "    """ [Figure 19.2]\n",
+       "    The hypothesis is a list of dictionaries, with each dictionary representing\n",
+       "    a disjunction."""\n",
+       "    if not examples:\n",
+       "        return h\n",
+       "\n",
+       "    examples_so_far = examples_so_far or []\n",
+       "    e = examples[0]\n",
+       "    if is_consistent(e, h):\n",
+       "        return current_best_learning(examples[1:], h, examples_so_far + [e])\n",
+       "    elif false_positive(e, h):\n",
+       "        for h2 in specializations(examples_so_far + [e], h):\n",
+       "            h3 = current_best_learning(examples[1:], h2, examples_so_far + [e])\n",
+       "            if h3 != 'FAIL':\n",
+       "                return h3\n",
+       "    elif false_negative(e, h):\n",
+       "        for h2 in generalizations(examples_so_far + [e], h):\n",
+       "            h3 = current_best_learning(examples[1:], h2, examples_so_far + [e])\n",
+       "            if h3 != 'FAIL':\n",
+       "                return h3\n",
+       "\n",
+       "    return 'FAIL'\n",
+       "\n",
+       "\n",
+       "def specializations(examples_so_far, h):\n",
+       "    """Specialize the hypothesis by adding AND operations to the disjunctions"""\n",
+       "    hypotheses = []\n",
+       "\n",
+       "    for i, disj in enumerate(h):\n",
+       "        for e in examples_so_far:\n",
+       "            for k, v in e.items():\n",
+       "                if k in disj or k == 'GOAL':\n",
+       "                    continue\n",
+       "\n",
+       "                h2 = h[i].copy()\n",
+       "                h2[k] = '!' + v\n",
+       "                h3 = h.copy()\n",
+       "                h3[i] = h2\n",
+       "                if check_all_consistency(examples_so_far, h3):\n",
+       "                    hypotheses.append(h3)\n",
+       "\n",
+       "    shuffle(hypotheses)\n",
+       "    return hypotheses\n",
+       "\n",
+       "\n",
+       "def generalizations(examples_so_far, h):\n",
+       "    """Generalize the hypothesis. First delete operations\n",
+       "    (including disjunctions) from the hypothesis. Then, add OR operations."""\n",
+       "    hypotheses = []\n",
+       "\n",
+       "    # Delete disjunctions\n",
+       "    disj_powerset = powerset(range(len(h)))\n",
+       "    for disjs in disj_powerset:\n",
+       "        h2 = h.copy()\n",
+       "        for d in reversed(list(disjs)):\n",
+       "            del h2[d]\n",
+       "\n",
+       "        if check_all_consistency(examples_so_far, h2):\n",
+       "            hypotheses += h2\n",
+       "\n",
+       "    # Delete AND operations in disjunctions\n",
+       "    for i, disj in enumerate(h):\n",
+       "        a_powerset = powerset(disj.keys())\n",
+       "        for attrs in a_powerset:\n",
+       "            h2 = h[i].copy()\n",
+       "            for a in attrs:\n",
+       "                del h2[a]\n",
+       "\n",
+       "            if check_all_consistency(examples_so_far, [h2]):\n",
+       "                h3 = h.copy()\n",
+       "                h3[i] = h2.copy()\n",
+       "                hypotheses += h3\n",
+       "\n",
+       "    # Add OR operations\n",
+       "    if hypotheses == [] or hypotheses == [{}]:\n",
+       "        hypotheses = add_or(examples_so_far, h)\n",
+       "    else:\n",
+       "        hypotheses.extend(add_or(examples_so_far, h))\n",
+       "\n",
+       "    shuffle(hypotheses)\n",
+       "    return hypotheses\n",
+       "
\n", + "\n", + "\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], "source": [ "psource(current_best_learning, specializations, generalizations)" ] @@ -195,10 +376,8 @@ }, { "cell_type": "code", - "execution_count": 2, - "metadata": { - "collapsed": true - }, + "execution_count": 53, + "metadata": {}, "outputs": [], "source": [ "animals_umbrellas = [\n", @@ -221,7 +400,7 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": 54, "metadata": {}, "outputs": [ { @@ -254,7 +433,7 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": 55, "metadata": {}, "outputs": [ { @@ -287,14 +466,14 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": 56, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "[{'Species': 'Cat', 'Rain': '!No'}, {'Coat': 'Yes', 'Rain': 'Yes'}, {'Coat': 'Yes'}]\n" + "[{'Species': 'Cat', 'Rain': '!No'}, {'Rain': 'Yes', 'Coat': '!No'}, {'Rain': 'No', 'Coat': 'Yes'}]\n" ] } ], @@ -340,10 +519,8 @@ }, { "cell_type": "code", - "execution_count": 6, - "metadata": { - "collapsed": true - }, + "execution_count": 28, + "metadata": {}, "outputs": [], "source": [ "def r_example(Alt, Bar, Fri, Hun, Pat, Price, Rain, Res, Type, Est, GOAL):\n", @@ -363,10 +540,8 @@ }, { "cell_type": "code", - "execution_count": 7, - "metadata": { - "collapsed": true - }, + "execution_count": 29, + "metadata": {}, "outputs": [], "source": [ "restaurant = [\n", @@ -394,7 +569,7 @@ }, { "cell_type": "code", - "execution_count": 8, + "execution_count": 30, "metadata": {}, "outputs": [ { @@ -432,14 +607,14 @@ }, { "cell_type": "code", - "execution_count": 9, + "execution_count": 31, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "[{'Res': '!No', 'Fri': '!Yes', 'Alt': 'Yes'}, {'Bar': 'Yes', 'Fri': 'No', 'Rain': 'No', 'Hun': 'No'}, {'Bar': 'No', 'Price': '$', 'Fri': 'Yes'}, {'Res': 'Yes', 'Price': '$$', 'Rain': 'Yes', 'Alt': 'No', 'Est': '0-10', 'Fri': 'No', 'Hun': 'Yes', 'Bar': 'Yes'}, {'Fri': 'No', 'Pat': 'Some', 'Price': '$$', 'Rain': 'Yes', 'Hun': 'Yes'}, {'Est': '30-60', 'Res': 'No', 'Price': '$', 'Fri': 'Yes', 'Hun': 'Yes'}]\n" + "[{'Alt': 'Yes', 'Type': '!Thai', 'Hun': '!No', 'Pat': '!Full'}, {'Alt': 'No', 'Bar': 'Yes', 'Hun': 'No', 'Price': '$', 'Rain': 'No', 'Res': 'No'}, {'Pat': 'Full', 'Price': '$', 'Rain': 'Yes', 'Type': '!Burger'}, {'Price': '$$', 'Type': 'Italian'}, {'Bar': 'No', 'Hun': 'Yes', 'Pat': 'Some', 'Price': '$$', 'Rain': 'Yes', 'Res': 'Yes', 'Type': 'Thai', 'Est': '0-10'}, {'Bar': 'Yes', 'Fri': 'Yes', 'Hun': 'Yes', 'Pat': 'Full', 'Rain': 'No', 'Res': 'No', 'Type': 'Burger'}]\n" ] } ], @@ -476,7 +651,7 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": 32, "metadata": {}, "outputs": [ { @@ -502,7 +677,7 @@ "" ] }, - "execution_count": 3, + "execution_count": 32, "metadata": {}, "output_type": "execute_result" } @@ -528,27 +703,413 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 33, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/html": [ + "\n", + "\n", + "\n", + "\n", + " \n", + " \n", + " \n", + "\n", + "\n", + "

\n", + "\n", + "
def version_space_learning(examples):\n",
+       "    """ [Figure 19.3]\n",
+       "    The version space is a list of hypotheses, which in turn are a list\n",
+       "    of dictionaries/disjunctions."""\n",
+       "    V = all_hypotheses(examples)\n",
+       "    for e in examples:\n",
+       "        if V:\n",
+       "            V = version_space_update(V, e)\n",
+       "\n",
+       "    return V\n",
+       "\n",
+       "\n",
+       "def version_space_update(V, e):\n",
+       "    return [h for h in V if is_consistent(e, h)]\n",
+       "
\n", + "\n", + "\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], "source": [ "psource(version_space_learning, version_space_update)" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 34, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/html": [ + "\n", + "\n", + "\n", + "\n", + " \n", + " \n", + " \n", + "\n", + "\n", + "

\n", + "\n", + "
def all_hypotheses(examples):\n",
+       "    """Build a list of all the possible hypotheses"""\n",
+       "    values = values_table(examples)\n",
+       "    h_powerset = powerset(values.keys())\n",
+       "    hypotheses = []\n",
+       "    for s in h_powerset:\n",
+       "        hypotheses.extend(build_attr_combinations(s, values))\n",
+       "\n",
+       "    hypotheses.extend(build_h_combinations(hypotheses))\n",
+       "\n",
+       "    return hypotheses\n",
+       "\n",
+       "\n",
+       "def values_table(examples):\n",
+       "    """Build a table with all the possible values for each attribute.\n",
+       "    Returns a dictionary with keys the attribute names and values a list\n",
+       "    with the possible values for the corresponding attribute."""\n",
+       "    values = defaultdict(lambda: [])\n",
+       "    for e in examples:\n",
+       "        for k, v in e.items():\n",
+       "            if k == 'GOAL':\n",
+       "                continue\n",
+       "\n",
+       "            mod = '!'\n",
+       "            if e['GOAL']:\n",
+       "                mod = ''\n",
+       "\n",
+       "            if mod + v not in values[k]:\n",
+       "                values[k].append(mod + v)\n",
+       "\n",
+       "    values = dict(values)\n",
+       "    return values\n",
+       "
\n", + "\n", + "\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], "source": [ "psource(all_hypotheses, values_table)" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 35, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/html": [ + "\n", + "\n", + "\n", + "\n", + " \n", + " \n", + " \n", + "\n", + "\n", + "

\n", + "\n", + "
def build_attr_combinations(s, values):\n",
+       "    """Given a set of attributes, builds all the combinations of values.\n",
+       "    If the set holds more than one attribute, recursively builds the\n",
+       "    combinations."""\n",
+       "    if len(s) == 1:\n",
+       "        # s holds just one attribute, return its list of values\n",
+       "        k = values[s[0]]\n",
+       "        h = [[{s[0]: v}] for v in values[s[0]]]\n",
+       "        return h\n",
+       "\n",
+       "    h = []\n",
+       "    for i, a in enumerate(s):\n",
+       "        rest = build_attr_combinations(s[i+1:], values)\n",
+       "        for v in values[a]:\n",
+       "            o = {a: v}\n",
+       "            for r in rest:\n",
+       "                t = o.copy()\n",
+       "                for d in r:\n",
+       "                    t.update(d)\n",
+       "                h.append([t])\n",
+       "\n",
+       "    return h\n",
+       "\n",
+       "\n",
+       "def build_h_combinations(hypotheses):\n",
+       "    """Given a set of hypotheses, builds and returns all the combinations of the\n",
+       "    hypotheses."""\n",
+       "    h = []\n",
+       "    h_powerset = powerset(range(len(hypotheses)))\n",
+       "\n",
+       "    for s in h_powerset:\n",
+       "        t = []\n",
+       "        for i in s:\n",
+       "            t.extend(hypotheses[i])\n",
+       "        h.append(t)\n",
+       "\n",
+       "    return h\n",
+       "
\n", + "\n", + "\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], "source": [ "psource(build_attr_combinations, build_h_combinations)" ] @@ -564,10 +1125,8 @@ }, { "cell_type": "code", - "execution_count": 8, - "metadata": { - "collapsed": true - }, + "execution_count": 36, + "metadata": {}, "outputs": [], "source": [ "party = [\n", @@ -586,7 +1145,7 @@ }, { "cell_type": "code", - "execution_count": 12, + "execution_count": 37, "metadata": {}, "outputs": [ { @@ -620,7 +1179,7 @@ }, { "cell_type": "code", - "execution_count": 17, + "execution_count": 38, "metadata": {}, "outputs": [ { @@ -651,6 +1210,440 @@ "\n", "Our initial prediction is indeed in the set of hypotheses. Also, the two other random hypotheses we got are consistent with the examples (since they both include the \"Pizza is available\" disjunction)." ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Minimal Consistent Determination" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "This algorithm is based on a straightforward attempt to find the simplest determination consistent with the observations. A determinaton P > Q says that if any examples match on P, then they must also match on Q. A determination is therefore consistent with a set of examples if every pair that matches on the predicates on the left-hand side also matches on the goal predicate." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Pseudocode" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "Lets look at the pseudocode for this algorithm" + ] + }, + { + "cell_type": "code", + "execution_count": 47, + "metadata": {}, + "outputs": [ + { + "data": { + "text/markdown": [ + "### AIMA3e\n", + "__function__ Minimal-Consistent-Det(_E_, _A_) __returns__ a set of attributes \n", + " __inputs__: _E_, a set of examples \n", + "     _A_, a set of attributes, of size _n_ \n", + "\n", + " __for__ _i_ = 0 __to__ _n_ __do__ \n", + "   __for each__ subset _Ai_ of _A_ of size _i_ __do__ \n", + "     __if__ Consistent-Det?(_Ai_, _E_) __then return__ _Ai_ \n", + "\n", + "---\n", + "__function__ Consistent-Det?(_A_, _E_) __returns__ a truth value \n", + " __inputs__: _A_, a set of attributes \n", + "     _E_, a set of examples \n", + " __local variables__: _H_, a hash table \n", + "\n", + " __for each__ example _e_ __in__ _E_ __do__ \n", + "   __if__ some example in _H_ has the same values as _e_ for the attributes _A_ \n", + "    but a different classification __then return__ _false_ \n", + "   store the class of _e_ in_H_, indexed by the values for attributes _A_ of the example _e_ \n", + " __return__ _true_ \n", + "\n", + "---\n", + "__Figure ??__ An algorithm for finding a minimal consistent determination." + ], + "text/plain": [ + "" + ] + }, + "execution_count": 47, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "pseudocode('Minimal-Consistent-Det')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "You can read the code for the above algorithm by running the cells below:" + ] + }, + { + "cell_type": "code", + "execution_count": 48, + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "\n", + "\n", + "\n", + "\n", + " \n", + " \n", + " \n", + "\n", + "\n", + "

\n", + "\n", + "
def minimal_consistent_det(E, A):\n",
+       "    """Return a minimal set of attributes which give consistent determination"""\n",
+       "    n = len(A)\n",
+       "\n",
+       "    for i in range(n + 1):\n",
+       "        for A_i in combinations(A, i):\n",
+       "            if consistent_det(A_i, E):\n",
+       "                return set(A_i)\n",
+       "
\n", + "\n", + "\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "psource(minimal_consistent_det)" + ] + }, + { + "cell_type": "code", + "execution_count": 49, + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "\n", + "\n", + "\n", + "\n", + " \n", + " \n", + " \n", + "\n", + "\n", + "

\n", + "\n", + "
def consistent_det(A, E):\n",
+       "    """Check if the attributes(A) is consistent with the examples(E)"""\n",
+       "    H = {}\n",
+       "\n",
+       "    for e in E:\n",
+       "        attr_values = tuple(e[attr] for attr in A)\n",
+       "        if attr_values in H and H[attr_values] != e['GOAL']:\n",
+       "            return False\n",
+       "        H[attr_values] = e['GOAL']\n",
+       "\n",
+       "    return True\n",
+       "
\n", + "\n", + "\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "psource(consistent_det)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Example:" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We already know that no-pizza-no-party but we will still check it through the `minimal_consistent_det` algorithm." + ] + }, + { + "cell_type": "code", + "execution_count": 39, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{'Pizza'}\n" + ] + } + ], + "source": [ + "print(minimal_consistent_det(party, {'Pizza', 'Soda'}))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We can also check it on some other example. Let's consider the following example :" + ] + }, + { + "cell_type": "code", + "execution_count": 40, + "metadata": {}, + "outputs": [], + "source": [ + "conductance = [\n", + " {'Sample': 'S1', 'Mass': 12, 'Temp': 26, 'Material': 'Cu', 'Size': 3, 'GOAL': 0.59},\n", + " {'Sample': 'S1', 'Mass': 12, 'Temp': 100, 'Material': 'Cu', 'Size': 3, 'GOAL': 0.57},\n", + " {'Sample': 'S2', 'Mass': 24, 'Temp': 26, 'Material': 'Cu', 'Size': 6, 'GOAL': 0.59},\n", + " {'Sample': 'S3', 'Mass': 12, 'Temp': 26, 'Material': 'Pb', 'Size': 2, 'GOAL': 0.05},\n", + " {'Sample': 'S3', 'Mass': 12, 'Temp': 100, 'Material': 'Pb', 'Size': 2, 'GOAL': 0.04},\n", + " {'Sample': 'S4', 'Mass': 18, 'Temp': 100, 'Material': 'Pb', 'Size': 3, 'GOAL': 0.04},\n", + " {'Sample': 'S4', 'Mass': 18, 'Temp': 100, 'Material': 'Pb', 'Size': 3, 'GOAL': 0.04},\n", + " {'Sample': 'S5', 'Mass': 24, 'Temp': 100, 'Material': 'Pb', 'Size': 4, 'GOAL': 0.04},\n", + " {'Sample': 'S6', 'Mass': 36, 'Temp': 26, 'Material': 'Pb', 'Size': 6, 'GOAL': 0.05},\n", + "]\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now, we check the `minimal_consistent_det` algorithm on the above example:" + ] + }, + { + "cell_type": "code", + "execution_count": 41, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{'Temp', 'Material'}\n" + ] + } + ], + "source": [ + "print(minimal_consistent_det(conductance, {'Mass', 'Temp', 'Material', 'Size'}))" + ] + }, + { + "cell_type": "code", + "execution_count": 43, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{'Temp', 'Size', 'Mass'}\n" + ] + } + ], + "source": [ + "print(minimal_consistent_det(conductance, {'Mass', 'Temp', 'Size'}))\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] } ], "metadata": { @@ -669,7 +1662,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.5.3" + "version": "3.6.4" } }, "nbformat": 4, From 9c6813fb3f214c7ec772df1e34ba2ea6a1565cb1 Mon Sep 17 00:00:00 2001 From: Noumanmufc1 Date: Sun, 18 Mar 2018 09:17:44 +0500 Subject: [PATCH 2/2] some minor changes --- README.md | 192 +----------------------------------------------- knowledge.ipynb | 14 ---- 2 files changed, 2 insertions(+), 204 deletions(-) diff --git a/README.md b/README.md index 781bf2a01..64b7c1612 100644 --- a/README.md +++ b/README.md @@ -140,196 +140,8 @@ Here is a table of algorithms, the figure, name of the algorithm in the book and | 18.34 | AdaBoost | `AdaBoost` | [`learning.py`][learning] | Done | Included | | 19.2 | Current-Best-Learning | `current_best_learning` | [`knowledge.py`](knowledge.py) | Done | Included | | 19.3 | Version-Space-Learning | `version_space_learning` | [`knowledge.py`](knowledge.py) | Done | Included | -| 19.8 | Minimal-Consistent-Det | `minimal_consistent_det` | [`knowledge.py`](knowledge.py) | Done | | -| 19.12 | FOIL | `FOIL_container` | [`knowledge.py`](knowledge.py) | Done | Included | -| 21.2 | Passive-ADP-Agent | `PassiveADPAgent` | [`rl.py`][rl] | Done | Included | -| 21.4 | Passive-TD-Agent | `PassiveTDAgent` | [`rl.py`][rl] | Done | Included | -| 21.8 | Q-Learning-Agent | `QLearningAgent` | [`rl.py`][rl] | Done | Included | -| 22.1 | HITS | `HITS` | [`nlp.py`][nlp] | Done | Included | -| 23 | Chart-Parse | `Chart` | [`nlp.py`][nlp] | Done | Included | -| 23.5 | CYK-Parse | `CYK_parse` | [`nlp.py`][nlp] | Done | Included | -| 25.9 | Monte-Carlo-Localization | `monte_carlo_localization` | [`probability.py`][probability] | Done | | - - -# Index of data structures - -Here is a table of the implemented data structures, the figure, name of the implementation in the repository, and the file where they are implemented. - -| **Figure** | **Name (in repository)** | **File** | -|:-------|:--------------------------------|:--------------------------| -| 3.2 | romania_map | [`search.py`][search] | -| 4.9 | vacumm_world | [`search.py`][search] | -| 4.23 | one_dim_state_space | [`search.py`][search] | -| 6.1 | australia_map | [`search.py`][search] | -| 7.13 | wumpus_world_inference | [`logic.py`][logic] | -| 7.16 | horn_clauses_KB | [`logic.py`][logic] | -| 17.1 | sequential_decision_environment | [`mdp.py`][mdp] | -| 18.2 | waiting_decision_tree | [`learning.py`][learning] | - - -# Acknowledgements - -Many thanks for contributions over the years. I got bug reports, corrected code, and other support from Darius Bacon, Phil Ruggera, Peng Shao, Amit Patil, Ted Nienstedt, Jim Martin, Ben Catanzariti, and others. Now that the project is on GitHub, you can see the [contributors](https://github.com/aimacode/aima-python/graphs/contributors) who are doing a great job of actively improving the project. Many thanks to all contributors, especially @darius, @SnShine, @reachtarunhere, @MrDupin, and @Chipe1. - - -[agents]:../master/agents.py -[csp]:../master/csp.py -[games]:../master/games.py -[grid]:../master/grid.py -[knowledge]:../master/knowledge.py -[learning]:../master/learning.py -[logic]:../master/logic.py -[mdp]:../master/mdp.py -[nlp]:../master/nlp.py -[planning]:../master/planning.py -[probability]:../master/probability.py -[rl]:../master/rl.py -[search]:../master/search.py -[utils]:../master/utils.py -[text]:../master/text.py
-

-
- -# `aima-python` [![Build Status](https://travis-ci.org/aimacode/aima-python.svg?branch=master)](https://travis-ci.org/aimacode/aima-python) [![Binder](http://mybinder.org/badge.svg)](http://mybinder.org/repo/aimacode/aima-python) - - -Python code for the book *[Artificial Intelligence: A Modern Approach](http://aima.cs.berkeley.edu).* You can use this in conjunction with a course on AI, or for study on your own. We're looking for [solid contributors](https://github.com/aimacode/aima-python/blob/master/CONTRIBUTING.md) to help. - - - -## Structure of the Project - -When complete, this project will have Python implementations for all the pseudocode algorithms in the book, as well as tests and examples of use. For each major topic, such as `nlp` (natural language processing), we provide the following files: - -- `nlp.py`: Implementations of all the pseudocode algorithms, and necessary support functions/classes/data. -- `tests/test_nlp.py`: A lightweight test suite, using `assert` statements, designed for use with [`py.test`](http://pytest.org/latest/), but also usable on their own. -- `nlp.ipynb`: A Jupyter (IPython) notebook that explains and gives examples of how to use the code. -- `nlp_apps.ipynb`: A Jupyter notebook that gives example applications of the code. - - -## Python 3.4 and up - -This code requires Python 3.4 or later, and does not run in Python 2. You can [install Python](https://www.python.org/downloads) or use a browser-based Python interpreter such as [repl.it](https://repl.it/languages/python3). -You can run the code in an IDE, or from the command line with `python -i filename.py` where the `-i` option puts you in an interactive loop where you can run Python functions. See [jupyter.org](http://jupyter.org/) for instructions on setting up your own Jupyter notebook environment, or run the notebooks online with [try.jupiter.org](https://try.jupyter.org/). - - -## Installation Guide - -To download the repository: - -`git clone https://github.com/aimacode/aima-python.git` - -You also need to fetch the datasets from the [`aima-data`](https://github.com/aimacode/aima-data) repository: - -``` -cd aima-python -git submodule init -git submodule update -``` - -Wait for the datasets to download, it may take a while. Once they are downloaded, you need to install `pytest`, so that you can run the test suite: - -`pip install pytest` - -Then to run the tests: - -`py.test` - -And you are good to go! - - -# Index of Algorithms - -Here is a table of algorithms, the figure, name of the algorithm in the book and in the repository, and the file where they are implemented in the repository. This chart was made for the third edition of the book and is being updated for the upcoming fourth edition. Empty implementations are a good place for contributors to look for an issue. The [aima-pseudocode](https://github.com/aimacode/aima-pseudocode) project describes all the algorithms from the book. An asterisk next to the file name denotes the algorithm is not fully implemented. Another great place for contributors to start is by adding tests and writing on the notebooks. You can see which algorithms have tests and notebook sections below. If the algorithm you want to work on is covered, don't worry! You can still add more tests and provide some examples of use in the notebook! - -| **Figure** | **Name (in 3rd edition)** | **Name (in repository)** | **File** | **Tests** | **Notebook** -|:-------|:----------------------------------|:------------------------------|:--------------------------------|:-----|:---------| -| 2 | Random-Vacuum-Agent | `RandomVacuumAgent` | [`agents.py`][agents] | Done | Included | -| 2 | Model-Based-Vacuum-Agent | `ModelBasedVacuumAgent` | [`agents.py`][agents] | Done | Included | -| 2.1 | Environment | `Environment` | [`agents.py`][agents] | Done | Included | -| 2.1 | Agent | `Agent` | [`agents.py`][agents] | Done | Included | -| 2.3 | Table-Driven-Vacuum-Agent | `TableDrivenVacuumAgent` | [`agents.py`][agents] | Done | Included | -| 2.7 | Table-Driven-Agent | `TableDrivenAgent` | [`agents.py`][agents] | Done | Included | -| 2.8 | Reflex-Vacuum-Agent | `ReflexVacuumAgent` | [`agents.py`][agents] | Done | Included | -| 2.10 | Simple-Reflex-Agent | `SimpleReflexAgent` | [`agents.py`][agents] | Done | Included | -| 2.12 | Model-Based-Reflex-Agent | `ReflexAgentWithState` | [`agents.py`][agents] | | Included | -| 3 | Problem | `Problem` | [`search.py`][search] | Done | Included | -| 3 | Node | `Node` | [`search.py`][search] | Done | Included | -| 3 | Queue | `Queue` | [`utils.py`][utils] | Done | No Need | -| 3.1 | Simple-Problem-Solving-Agent | `SimpleProblemSolvingAgent` | [`search.py`][search] | Done | Included | -| 3.2 | Romania | `romania` | [`search.py`][search] | Done | Included | -| 3.7 | Tree-Search | `tree_search` | [`search.py`][search] | Done | | -| 3.7 | Graph-Search | `graph_search` | [`search.py`][search] | Done | | -| 3.11 | Breadth-First-Search | `breadth_first_search` | [`search.py`][search] | Done | Included | -| 3.14 | Uniform-Cost-Search | `uniform_cost_search` | [`search.py`][search] | Done | Included | -| 3.17 | Depth-Limited-Search | `depth_limited_search` | [`search.py`][search] | Done | | -| 3.18 | Iterative-Deepening-Search | `iterative_deepening_search` | [`search.py`][search] | Done | | -| 3.22 | Best-First-Search | `best_first_graph_search` | [`search.py`][search] | Done | Included | -| 3.24 | A\*-Search | `astar_search` | [`search.py`][search] | Done | Included | -| 3.26 | Recursive-Best-First-Search | `recursive_best_first_search` | [`search.py`][search] | Done | | -| 4.2 | Hill-Climbing | `hill_climbing` | [`search.py`][search] | Done | Included | -| 4.5 | Simulated-Annealing | `simulated_annealing` | [`search.py`][search] | Done | | -| 4.8 | Genetic-Algorithm | `genetic_algorithm` | [`search.py`][search] | Done | Included | -| 4.11 | And-Or-Graph-Search | `and_or_graph_search` | [`search.py`][search] | Done | | -| 4.21 | Online-DFS-Agent | `online_dfs_agent` | [`search.py`][search] | | | -| 4.24 | LRTA\*-Agent | `LRTAStarAgent` | [`search.py`][search] | Done | | -| 5.3 | Minimax-Decision | `minimax_decision` | [`games.py`][games] | Done | Included | -| 5.7 | Alpha-Beta-Search | `alphabeta_search` | [`games.py`][games] | Done | Included | -| 6 | CSP | `CSP` | [`csp.py`][csp] | Done | Included | -| 6.3 | AC-3 | `AC3` | [`csp.py`][csp] | Done | | -| 6.5 | Backtracking-Search | `backtracking_search` | [`csp.py`][csp] | Done | Included | -| 6.8 | Min-Conflicts | `min_conflicts` | [`csp.py`][csp] | Done | Included | -| 6.11 | Tree-CSP-Solver | `tree_csp_solver` | [`csp.py`][csp] | Done | Included | -| 7 | KB | `KB` | [`logic.py`][logic] | Done | Included | -| 7.1 | KB-Agent | `KB_Agent` | [`logic.py`][logic] | Done | | -| 7.7 | Propositional Logic Sentence | `Expr` | [`utils.py`][utils] | Done | Included | -| 7.10 | TT-Entails | `tt_entails` | [`logic.py`][logic] | Done | Included | -| 7.12 | PL-Resolution | `pl_resolution` | [`logic.py`][logic] | Done | Included | -| 7.14 | Convert to CNF | `to_cnf` | [`logic.py`][logic] | Done | Included | -| 7.15 | PL-FC-Entails? | `pl_fc_resolution` | [`logic.py`][logic] | Done | Included | -| 7.17 | DPLL-Satisfiable? | `dpll_satisfiable` | [`logic.py`][logic] | Done | Included | -| 7.18 | WalkSAT | `WalkSAT` | [`logic.py`][logic] | Done | Included | -| 7.20 | Hybrid-Wumpus-Agent | `HybridWumpusAgent` | | | | -| 7.22 | SATPlan | `SAT_plan` | [`logic.py`][logic] | Done | | -| 9 | Subst | `subst` | [`logic.py`][logic] | Done | | -| 9.1 | Unify | `unify` | [`logic.py`][logic] | Done | Included | -| 9.3 | FOL-FC-Ask | `fol_fc_ask` | [`logic.py`][logic] | Done | | -| 9.6 | FOL-BC-Ask | `fol_bc_ask` | [`logic.py`][logic] | Done | | -| 9.8 | Append | | | | | -| 10.1 | Air-Cargo-problem | `air_cargo` | [`planning.py`][planning] | Done | Included | -| 10.2 | Spare-Tire-Problem | `spare_tire` | [`planning.py`][planning] | Done | Included | -| 10.3 | Three-Block-Tower | `three_block_tower` | [`planning.py`][planning] | Done | Included | -| 10.7 | Cake-Problem | `have_cake_and_eat_cake_too` | [`planning.py`][planning] | Done | | -| 10.9 | Graphplan | `GraphPlan` | [`planning.py`][planning] | | | -| 10.13 | Partial-Order-Planner | | | | | -| 11.1 | Job-Shop-Problem-With-Resources | `job_shop_problem` | [`planning.py`][planning] | Done | | -| 11.5 | Hierarchical-Search | `hierarchical_search` | [`planning.py`][planning] | | | -| 11.8 | Angelic-Search | | | | | -| 11.10 | Doubles-tennis | `double_tennis_problem` | [`planning.py`][planning] | | | -| 13 | Discrete Probability Distribution | `ProbDist` | [`probability.py`][probability] | Done | Included | -| 13.1 | DT-Agent | `DTAgent` | [`probability.py`][probability] | | | -| 14.9 | Enumeration-Ask | `enumeration_ask` | [`probability.py`][probability] | Done | Included | -| 14.11 | Elimination-Ask | `elimination_ask` | [`probability.py`][probability] | Done | Included | -| 14.13 | Prior-Sample | `prior_sample` | [`probability.py`][probability] | | Included | -| 14.14 | Rejection-Sampling | `rejection_sampling` | [`probability.py`][probability] | Done | Included | -| 14.15 | Likelihood-Weighting | `likelihood_weighting` | [`probability.py`][probability] | Done | Included | -| 14.16 | Gibbs-Ask | `gibbs_ask` | [`probability.py`][probability] | Done | Included | -| 15.4 | Forward-Backward | `forward_backward` | [`probability.py`][probability] | Done | | -| 15.6 | Fixed-Lag-Smoothing | `fixed_lag_smoothing` | [`probability.py`][probability] | Done | | -| 15.17 | Particle-Filtering | `particle_filtering` | [`probability.py`][probability] | Done | | -| 16.9 | Information-Gathering-Agent | | | | | -| 17.4 | Value-Iteration | `value_iteration` | [`mdp.py`][mdp] | Done | Included | -| 17.7 | Policy-Iteration | `policy_iteration` | [`mdp.py`][mdp] | Done | Included | -| 17.9 | POMDP-Value-Iteration | | | | | -| 18.5 | Decision-Tree-Learning | `DecisionTreeLearner` | [`learning.py`][learning] | Done | Included | -| 18.8 | Cross-Validation | `cross_validation` | [`learning.py`][learning] | | | -| 18.11 | Decision-List-Learning | `DecisionListLearner` | [`learning.py`][learning]\* | | | -| 18.24 | Back-Prop-Learning | `BackPropagationLearner` | [`learning.py`][learning] | Done | Included | -| 18.34 | AdaBoost | `AdaBoost` | [`learning.py`][learning] | Done | Included | -| 19.2 | Current-Best-Learning | `current_best_learning` | [`knowledge.py`](knowledge.py) | Done | Included | -| 19.3 | Version-Space-Learning | `version_space_learning` | [`knowledge.py`](knowledge.py) | Done | Included | -| 19.8 | Minimal-Consistent-Det | `minimal_consistent_det` | [`knowledge.py`](knowledge.py) | Done | | -| 19.12 | FOIL | `FOIL_container` | [`knowledge.py`](knowledge.py) | Done | Included | +| 19.8 | Minimal-Consistent-Det | `minimal_consistent_det` | [`knowledge.py`](knowledge.py) | Done | Included | +| 19.12 | FOIL | `FOIL_container` | [`knowledge.py`](knowledge.py) | Done | | | 21.2 | Passive-ADP-Agent | `PassiveADPAgent` | [`rl.py`][rl] | Done | Included | | 21.4 | Passive-TD-Agent | `PassiveTDAgent` | [`rl.py`][rl] | Done | Included | | 21.8 | Q-Learning-Agent | `QLearningAgent` | [`rl.py`][rl] | Done | Included | diff --git a/knowledge.ipynb b/knowledge.ipynb index 5ab2cda01..c21de646c 100644 --- a/knowledge.ipynb +++ b/knowledge.ipynb @@ -1630,20 +1630,6 @@ "source": [ "print(minimal_consistent_det(conductance, {'Mass', 'Temp', 'Size'}))\n" ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] } ], "metadata": {