diff --git a/images/knowledge_FOIL_grandparent.png b/images/knowledge_FOIL_grandparent.png new file mode 100644 index 000000000..dbc6e7729 Binary files /dev/null and b/images/knowledge_FOIL_grandparent.png differ diff --git a/images/knowledge_foil_family.png b/images/knowledge_foil_family.png new file mode 100644 index 000000000..356f22d8d Binary files /dev/null and b/images/knowledge_foil_family.png differ diff --git a/knowledge.py b/knowledge.py index 2bb12f3b8..cf4915b47 100644 --- a/knowledge.py +++ b/knowledge.py @@ -7,6 +7,7 @@ from itertools import combinations, product from logic import (FolKB, constant_symbols, predicate_symbols, standardize_variables, variables, is_definite_clause, subst, expr, Expr) +from functools import partial # ______________________________________________________________________________ @@ -297,44 +298,59 @@ def new_literals(self, clause): share_vars = variables(clause[0]) for l in clause[1]: share_vars.update(variables(l)) - for pred, arity in self.pred_syms: new_vars = {standardize_variables(expr('x')) for _ in range(arity - 1)} for args in product(share_vars.union(new_vars), repeat=arity): if any(var in share_vars for var in args): - yield Expr(pred, *[var for var in args]) + # make sure we don't return an existing rule + if not Expr(pred, args) in clause[1]: + yield Expr(pred, *[var for var in args]) - def choose_literal(self, literals, examples): - """Choose the best literal based on the information gain.""" - def gain(l): - pre_pos = len(examples[0]) - pre_neg = len(examples[1]) - extended_examples = [sum([list(self.extend_example(example, l)) for example in - examples[i]], []) for i in range(2)] - post_pos = len(extended_examples[0]) - post_neg = len(extended_examples[1]) - if pre_pos + pre_neg == 0 or post_pos + post_neg == 0: - return -1 - # number of positive example that are represented in extended_examples - T = 0 - for example in examples[0]: - def represents(d): - return all(d[x] == example[x] for x in example) - if any(represents(l_) for l_ in extended_examples[0]): - T += 1 + def choose_literal(self, literals, examples): + """Choose the best literal based on the information gain.""" - return T * log((post_pos*(pre_pos + pre_neg) + 1e-4) / ((post_pos + post_neg)*pre_pos)) + return max(literals, key = partial(self.gain , examples = examples)) + + + def gain(self, l ,examples): + """ + Find the utility of each literal when added to the body of the clause. + Utility function is: + gain(R, l) = T * (log_2 (post_pos / (post_pos + post_neg)) - log_2 (pre_pos / (pre_pos + pre_neg))) + + where: + + pre_pos = number of possitive bindings of rule R (=current set of rules) + pre_neg = number of negative bindings of rule R + post_pos = number of possitive bindings of rule R' (= R U {l} ) + post_neg = number of negative bindings of rule R' + T = number of possitive bindings of rule R that are still covered + after adding literal l + + """ + pre_pos = len(examples[0]) + pre_neg = len(examples[1]) + post_pos = sum([list(self.extend_example(example, l)) for example in examples[0]], []) + post_neg = sum([list(self.extend_example(example, l)) for example in examples[1]], []) + if pre_pos + pre_neg ==0 or len(post_pos) + len(post_neg)==0: + return -1 + # number of positive example that are represented in extended_examples + T = 0 + for example in examples[0]: + represents = lambda d: all(d[x] == example[x] for x in example) + if any(represents(l_) for l_ in post_pos): + T += 1 + value = T * (log(len(post_pos) / (len(post_pos) + len(post_neg)) + 1e-12,2) - log(pre_pos / (pre_pos + pre_neg),2)) + return value - return max(literals, key=gain) def update_examples(self, target, examples, extended_examples): """Add to the kb those examples what are represented in extended_examples List of omitted examples is returned.""" uncovered = [] for example in examples: - def represents(d): - return all(d[x] == example[x] for x in example) + represents = lambda d: all(d[x] == example[x] for x in example) if any(represents(l) for l in extended_examples): self.tell(subst(example, target)) else: @@ -400,3 +416,8 @@ def false_positive(e, h): def false_negative(e, h): return e["GOAL"] and not guess_value(e, h) + + + + + diff --git a/knowledge_FOIL.ipynb b/knowledge_FOIL.ipynb new file mode 100644 index 000000000..3755f33f5 --- /dev/null +++ b/knowledge_FOIL.ipynb @@ -0,0 +1,618 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# KNOWLEDGE\n", + "\n", + "The [knowledge](https://github.com/aimacode/aima-python/blob/master/knowledge.py) module covers **Chapter 19: Knowledge in Learning** from Stuart Russel's and Peter Norvig's book *Artificial Intelligence: A Modern Approach*.\n", + "\n", + "Execute the cell below to get started." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "from knowledge import *\n", + "\n", + "from notebook import pseudocode, psource" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## CONTENTS\n", + "\n", + "* Overview\n", + "* Inductive Logic Programming (FOIL)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## OVERVIEW\n", + "\n", + "Like the [learning module](https://github.com/aimacode/aima-python/blob/master/learning.ipynb), this chapter focuses on methods for generating a model/hypothesis for a domain. Unlike though the learning chapter, here we use prior knowledge to help us learn from new experiences and find a proper hypothesis.\n", + "\n", + "### First-Order Logic\n", + "\n", + "Usually knowledge in this field is represented as **first-order logic**, a type of logic that uses variables and quantifiers in logical sentences. Hypotheses are represented by logical sentences with variables, while examples are logical sentences with set values instead of variables. The goal is to assign a value to a special first-order logic predicate, called **goal predicate**, for new examples given a hypothesis. We learn this hypothesis by infering knowledge from some given examples.\n", + "\n", + "### Representation\n", + "\n", + "In this module, we use dictionaries to represent examples, with keys the attribute names and values the corresponding example values. Examples also have an extra boolean field, 'GOAL', for the goal predicate. A hypothesis is represented as a list of dictionaries. Each dictionary in that list represents a disjunction. Inside these dictionaries/disjunctions we have conjunctions.\n", + "\n", + "For example, say we want to predict if an animal (cat or dog) will take an umbrella given whether or not it rains or the animal wears a coat. The goal value is 'take an umbrella' and is denoted by the key 'GOAL'. An example:\n", + "\n", + "`{'Species': 'Cat', 'Coat': 'Yes', 'Rain': 'Yes', 'GOAL': True}`\n", + "\n", + "A hypothesis can be the following:\n", + "\n", + "`[{'Species': 'Cat'}]`\n", + "\n", + "which means an animal will take an umbrella if and only if it is a cat.\n", + "\n", + "### Consistency\n", + "\n", + "We say that an example `e` is **consistent** with an hypothesis `h` if the assignment from the hypothesis for `e` is the same as `e['GOAL']`. If the above example and hypothesis are `e` and `h` respectively, then `e` is consistent with `h` since `e['Species'] == 'Cat'`. For `e = {'Species': 'Dog', 'Coat': 'Yes', 'Rain': 'Yes', 'GOAL': True}`, the example is no longer consistent with `h`, since the value assigned to `e` is *False* while `e['GOAL']` is *True*." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Inductive Logic Programming (FOIL)\n", + "\n", + "Inductive logic programming (ILP) combines inductive methods with the power of first-order representations, concentrating in particular on the representation of hypotheses as logic programs. The general knowledge-based induction problem is to solve the entailment constrant:

\n", + "$ Background ∧ Hypothesis ∧ Descriptions \\vDash Classifications $\n", + "\n", + "for the __unknown__ $Hypothesis$, given the $Background$ knowledge described by $Descriptions$ and $Classifications$.\n", + "\n", + "\n", + "\n", + "The first approach to ILP works by starting with a very general rule and gradually specializing\n", + "it so that it fits the data.
\n", + "This is essentially what happens in decision-tree learning, where a\n", + "decision tree is gradually grown until it is consistent with the observations.
To do ILP we\n", + "use first-order literals instead of attributes, and the $Hypothesis$ is a set of clauses (set of first order rules, where each rule is similar to a Horn clause) instead of a decision tree.
\n", + "\n", + "\n", + "The FOIL algorithm learns new rules, one at a time, in order to cover all given possitive and negative examples.
\n", + "More precicely, FOIL contains an inner and an outer while loop.
\n", + "- __outer loop__: (function __foil()__) add rules untill all positive examples are covered.
\n", + " (each rule is a conjuction of literals, which are chosen inside the inner loop)\n", + " \n", + " \n", + "- __inner loop__: (function __new_clause()__) add new literals untill all negative examples are covered, and some positive examples are covered.
\n", + " - In each iteration, we select/add the most promising literal, according to an estimate of its utility. (function __new_literal()__)
\n", + " \n", + " - The evaluation function to estimate utility of adding literal $L$ to a set of rules $R$ is (function __gain()__) : \n", + " \n", + " $$ FoilGain(L,R) = t \\big( \\log_2{\\frac{p_1}{p_1+n_1}} - \\log_2{\\frac{p_0}{p_0+n_0}} \\big) $$\n", + " where: \n", + " \n", + " $p_0: \\text{is the number of possitive bindings of rule R } \\\\ n_0: \\text{is the number of negative bindings of R} \\\\ p_1: \\text{is the is the number of possitive bindings of rule R'}\\\\ n_0: \\text{is the number of negative bindings of R'}\\\\ t: \\text{is the number of possitive bindings of rule R that are still covered after adding literal L to R}$\n", + " \n", + " - Calculate the extended examples for the chosen literal (function __extend_example()__)
\n", + " (the set of examples created by extending example with each possible constant value for each new variable in literal)\n", + " \n", + "- Finally the algorithm returns a disjunction of first order rules (= conjuction of literals)\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "\n", + "\n", + "\n", + "\n", + " \n", + " \n", + " \n", + "\n", + "\n", + "

\n", + "\n", + "
class FOIL_container(FolKB):\n",
+       "    """Hold the kb and other necessary elements required by FOIL."""\n",
+       "\n",
+       "    def __init__(self, clauses=None):\n",
+       "        self.const_syms = set()\n",
+       "        self.pred_syms = set()\n",
+       "        FolKB.__init__(self, clauses)\n",
+       "\n",
+       "    def tell(self, sentence):\n",
+       "        if is_definite_clause(sentence):\n",
+       "            self.clauses.append(sentence)\n",
+       "            self.const_syms.update(constant_symbols(sentence))\n",
+       "            self.pred_syms.update(predicate_symbols(sentence))\n",
+       "        else:\n",
+       "            raise Exception("Not a definite clause: {}".format(sentence))\n",
+       "\n",
+       "    def foil(self, examples, target):\n",
+       "        """Learn a list of first-order horn clauses\n",
+       "        'examples' is a tuple: (positive_examples, negative_examples).\n",
+       "        positive_examples and negative_examples are both lists which contain substitutions."""\n",
+       "        clauses = []\n",
+       "\n",
+       "        pos_examples = examples[0]\n",
+       "        neg_examples = examples[1]\n",
+       "\n",
+       "        while pos_examples:\n",
+       "            clause, extended_pos_examples = self.new_clause((pos_examples, neg_examples), target)\n",
+       "            # remove positive examples covered by clause\n",
+       "            pos_examples = self.update_examples(target, pos_examples, extended_pos_examples)\n",
+       "            clauses.append(clause)\n",
+       "\n",
+       "        return clauses\n",
+       "\n",
+       "    def new_clause(self, examples, target):\n",
+       "        """Find a horn clause which satisfies part of the positive\n",
+       "        examples but none of the negative examples.\n",
+       "        The horn clause is specified as [consequent, list of antecedents]\n",
+       "        Return value is the tuple (horn_clause, extended_positive_examples)."""\n",
+       "        clause = [target, []]\n",
+       "        # [positive_examples, negative_examples]\n",
+       "        extended_examples = examples\n",
+       "        while extended_examples[1]:\n",
+       "            l = self.choose_literal(self.new_literals(clause), extended_examples)\n",
+       "            clause[1].append(l)\n",
+       "            extended_examples = [sum([list(self.extend_example(example, l)) for example in\n",
+       "                                      extended_examples[i]], []) for i in range(2)]\n",
+       "\n",
+       "        return (clause, extended_examples[0])\n",
+       "\n",
+       "    def extend_example(self, example, literal):\n",
+       "        """Generate extended examples which satisfy the literal."""\n",
+       "        # find all substitutions that satisfy literal\n",
+       "        for s in self.ask_generator(subst(example, literal)):\n",
+       "            s.update(example)\n",
+       "            yield s\n",
+       "\n",
+       "    def new_literals(self, clause):\n",
+       "        """Generate new literals based on known predicate symbols.\n",
+       "        Generated literal must share atleast one variable with clause"""\n",
+       "        share_vars = variables(clause[0])\n",
+       "        for l in clause[1]:\n",
+       "            share_vars.update(variables(l))\n",
+       "        # creates literals with different order every time  \n",
+       "        for pred, arity in self.pred_syms:\n",
+       "            new_vars = {standardize_variables(expr('x')) for _ in range(arity - 1)}\n",
+       "            for args in product(share_vars.union(new_vars), repeat=arity):\n",
+       "                if any(var in share_vars for var in args):\n",
+       "                    # make sure we don't return an existing rule\n",
+       "                    if not Expr(pred, args) in clause[1]:\n",
+       "                        yield Expr(pred, *[var for var in args])\n",
+       "\n",
+       "\n",
+       "    def choose_literal(self, literals, examples): \n",
+       "        """Choose the best literal based on the information gain."""\n",
+       "\n",
+       "        return max(literals, key = partial(self.gain , examples = examples))\n",
+       "\n",
+       "    def gain(self, l ,examples):\n",
+       "        pre_pos= len(examples[0])\n",
+       "        pre_neg= len(examples[1])\n",
+       "        extended_examples = [sum([list(self.extend_example(example, l)) for example in examples[i]], []) for i in range(2)]\n",
+       "        post_pos = len(extended_examples[0])          \n",
+       "        post_neg = len(extended_examples[1]) \n",
+       "        if pre_pos + pre_neg ==0 or post_pos + post_neg==0:\n",
+       "            return -1\n",
+       "        # number of positive example that are represented in extended_examples\n",
+       "        T = 0\n",
+       "        for example in examples[0]:\n",
+       "            def represents(d):\n",
+       "                return all(d[x] == example[x] for x in example)\n",
+       "            if any(represents(l_) for l_ in extended_examples[0]):\n",
+       "                T += 1\n",
+       "        value = T * (log(post_pos / (post_pos + post_neg) + 1e-12,2) - log(pre_pos / (pre_pos + pre_neg),2))\n",
+       "        #print (l, value)\n",
+       "        return value\n",
+       "\n",
+       "\n",
+       "    def update_examples(self, target, examples, extended_examples):\n",
+       "        """Add to the kb those examples what are represented in extended_examples\n",
+       "        List of omitted examples is returned."""\n",
+       "        uncovered = []\n",
+       "        for example in examples:\n",
+       "            def represents(d):\n",
+       "                return all(d[x] == example[x] for x in example)\n",
+       "            if any(represents(l) for l in extended_examples):\n",
+       "                self.tell(subst(example, target))\n",
+       "            else:\n",
+       "                uncovered.append(example)\n",
+       "\n",
+       "        return uncovered\n",
+       "
\n", + "\n", + "\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "psource(FOIL_container)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Example Family \n", + "Suppose we have the following family relations:\n", + "
\n", + "![title](images/knowledge_foil_family.png)\n", + "
\n", + "Given some positive and negative examples of the relation 'Parent(x,y)', we want to find a set of rules that satisfies all the examples.
\n", + "\n", + "A definition of Parent is $Parent(x,y) \\Leftrightarrow Mother(x,y) \\lor Father(x,y)$, which is the result that we expect from the algorithm. " + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "A, B, C, D, E, F, G, H, I, x, y, z = map(expr, 'ABCDEFGHIxyz')" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [], + "source": [ + "small_family = FOIL_container([expr(\"Mother(Anne, Peter)\"),\n", + " expr(\"Mother(Anne, Zara)\"),\n", + " expr(\"Mother(Sarah, Beatrice)\"),\n", + " expr(\"Mother(Sarah, Eugenie)\"),\n", + " expr(\"Father(Mark, Peter)\"),\n", + " expr(\"Father(Mark, Zara)\"),\n", + " expr(\"Father(Andrew, Beatrice)\"),\n", + " expr(\"Father(Andrew, Eugenie)\"),\n", + " expr(\"Father(Philip, Anne)\"),\n", + " expr(\"Father(Philip, Andrew)\"),\n", + " expr(\"Mother(Elizabeth, Anne)\"),\n", + " expr(\"Mother(Elizabeth, Andrew)\"),\n", + " expr(\"Male(Philip)\"),\n", + " expr(\"Male(Mark)\"),\n", + " expr(\"Male(Andrew)\"),\n", + " expr(\"Male(Peter)\"),\n", + " expr(\"Female(Elizabeth)\"),\n", + " expr(\"Female(Anne)\"),\n", + " expr(\"Female(Sarah)\"),\n", + " expr(\"Female(Zara)\"),\n", + " expr(\"Female(Beatrice)\"),\n", + " expr(\"Female(Eugenie)\"),\n", + "])\n", + "\n", + "target = expr('Parent(x, y)')\n", + "\n", + "examples_pos = [{x: expr('Elizabeth'), y: expr('Anne')},\n", + " {x: expr('Elizabeth'), y: expr('Andrew')},\n", + " {x: expr('Philip'), y: expr('Anne')},\n", + " {x: expr('Philip'), y: expr('Andrew')},\n", + " {x: expr('Anne'), y: expr('Peter')},\n", + " {x: expr('Anne'), y: expr('Zara')},\n", + " {x: expr('Mark'), y: expr('Peter')},\n", + " {x: expr('Mark'), y: expr('Zara')},\n", + " {x: expr('Andrew'), y: expr('Beatrice')},\n", + " {x: expr('Andrew'), y: expr('Eugenie')},\n", + " {x: expr('Sarah'), y: expr('Beatrice')},\n", + " {x: expr('Sarah'), y: expr('Eugenie')}]\n", + "examples_neg = [{x: expr('Anne'), y: expr('Eugenie')},\n", + " {x: expr('Beatrice'), y: expr('Eugenie')},\n", + " {x: expr('Mark'), y: expr('Elizabeth')},\n", + " {x: expr('Beatrice'), y: expr('Philip')}]" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[[Parent(x, y), [Mother(x, y)]], [Parent(x, y), [Father(x, y)]]]\n" + ] + } + ], + "source": [ + "# run the FOIL algorithm \n", + "clauses = small_family.foil([examples_pos, examples_neg], target)\n", + "print (clauses)\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Indeed the algorithm returned the rule: \n", + "
$Parent(x,y) \\Leftrightarrow Mother(x,y) \\lor Father(x,y)$" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Suppose that we have some possitive and negative results for the relation 'GrandParent(x,y)' and we want to find a set of rules that satisfies the examples.
\n", + "One possible set of rules for the relation $Grandparent(x,y)$ could be:
\n", + "![title](images/knowledge_FOIL_grandparent.png)\n", + "
\n", + "Or, if $Background$ included the sentence $Parent(x,y) \\Leftrightarrow [Mother(x,y) \\lor Father(x,y)]$ then: \n", + "\n", + "$$Grandparent(x,y) \\Leftrightarrow \\exists \\: z \\quad Parent(x,z) \\land Parent(z,y)$$\n" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[[Grandparent(x, y), [Parent(x, v_5), Parent(v_5, y)]]]\n" + ] + } + ], + "source": [ + "target = expr('Grandparent(x, y)')\n", + "\n", + "examples_pos = [{x: expr('Elizabeth'), y: expr('Peter')},\n", + " {x: expr('Elizabeth'), y: expr('Zara')},\n", + " {x: expr('Elizabeth'), y: expr('Beatrice')},\n", + " {x: expr('Elizabeth'), y: expr('Eugenie')},\n", + " {x: expr('Philip'), y: expr('Peter')},\n", + " {x: expr('Philip'), y: expr('Zara')},\n", + " {x: expr('Philip'), y: expr('Beatrice')},\n", + " {x: expr('Philip'), y: expr('Eugenie')}]\n", + "examples_neg = [{x: expr('Anne'), y: expr('Eugenie')},\n", + " {x: expr('Beatrice'), y: expr('Eugenie')},\n", + " {x: expr('Elizabeth'), y: expr('Andrew')},\n", + " {x: expr('Elizabeth'), y: expr('Anne')},\n", + " {x: expr('Elizabeth'), y: expr('Mark')},\n", + " {x: expr('Elizabeth'), y: expr('Sarah')},\n", + " {x: expr('Philip'), y: expr('Anne')},\n", + " {x: expr('Philip'), y: expr('Andrew')},\n", + " {x: expr('Anne'), y: expr('Peter')},\n", + " {x: expr('Anne'), y: expr('Zara')},\n", + " {x: expr('Mark'), y: expr('Peter')},\n", + " {x: expr('Mark'), y: expr('Zara')},\n", + " {x: expr('Andrew'), y: expr('Beatrice')},\n", + " {x: expr('Andrew'), y: expr('Eugenie')},\n", + " {x: expr('Sarah'), y: expr('Beatrice')},\n", + " {x: expr('Mark'), y: expr('Elizabeth')},\n", + " {x: expr('Beatrice'), y: expr('Philip')}, \n", + " {x: expr('Peter'), y: expr('Andrew')}, \n", + " {x: expr('Zara'), y: expr('Mark')},\n", + " {x: expr('Peter'), y: expr('Anne')},\n", + " {x: expr('Zara'), y: expr('Eugenie')}, ]\n", + "\n", + "clauses = small_family.foil([examples_pos, examples_neg], target)\n", + "\n", + "print(clauses)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Indeed the algorithm returned the rule: \n", + "
$Grandparent(x,y) \\Leftrightarrow \\exists \\: v \\: \\: Parent(x,v) \\land Parent(v,y)$" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Example Network\n", + "\n", + "Suppose that we have the following directed graph and we want to find a rule that describes the reachability between two nodes (Reach(x,y)).
\n", + "Such a rule could be recursive, since y can be reached from x if and only if there is a sequence of adjacent nodes from x to y: \n", + "\n", + "$$ Reach(x,y) \\Leftrightarrow \\begin{cases} \n", + " Conn(x,y), \\: \\text{(if there is a directed edge from x to y)} \\\\\n", + " \\lor \\quad \\exists \\: z \\quad Reach(x,z) \\land Reach(z,y) \\end{cases}$$\n" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [], + "source": [ + "\"\"\"\n", + "A H\n", + "|\\ /|\n", + "| \\ / |\n", + "v v v v\n", + "B D-->E-->G-->I\n", + "| / |\n", + "| / |\n", + "vv v\n", + "C F\n", + "\"\"\"\n", + "small_network = FOIL_container([expr(\"Conn(A, B)\"),\n", + " expr(\"Conn(A ,D)\"),\n", + " expr(\"Conn(B, C)\"),\n", + " expr(\"Conn(D, C)\"),\n", + " expr(\"Conn(D, E)\"),\n", + " expr(\"Conn(E ,F)\"),\n", + " expr(\"Conn(E, G)\"),\n", + " expr(\"Conn(G, I)\"),\n", + " expr(\"Conn(H, G)\"),\n", + " expr(\"Conn(H, I)\")])\n" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[[Reach(x, y), [Conn(x, y)]], [Reach(x, y), [Reach(x, v_12), Reach(v_14, y), Reach(v_12, v_16), Reach(v_12, y)]], [Reach(x, y), [Reach(x, v_20), Reach(v_20, y)]]]\n" + ] + } + ], + "source": [ + "target = expr('Reach(x, y)')\n", + "examples_pos = [{x: A, y: B},\n", + " {x: A, y: C},\n", + " {x: A, y: D},\n", + " {x: A, y: E},\n", + " {x: A, y: F},\n", + " {x: A, y: G},\n", + " {x: A, y: I},\n", + " {x: B, y: C},\n", + " {x: D, y: C},\n", + " {x: D, y: E},\n", + " {x: D, y: F},\n", + " {x: D, y: G},\n", + " {x: D, y: I},\n", + " {x: E, y: F},\n", + " {x: E, y: G},\n", + " {x: E, y: I},\n", + " {x: G, y: I},\n", + " {x: H, y: G},\n", + " {x: H, y: I}]\n", + "nodes = {A, B, C, D, E, F, G, H, I}\n", + "examples_neg = [example for example in [{x: a, y: b} for a in nodes for b in nodes]\n", + " if example not in examples_pos]\n", + "clauses = small_network.foil([examples_pos, examples_neg], target)\n", + "\n", + "print(clauses)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The algorithm produced almost the recursive rule: \n", + " $$ Reach(x,y) \\Leftrightarrow [Conn(x,y)] \\: \\lor \\: [\\exists \\: z \\: \\: Reach(x,z) \\, \\land \\, Reach(z,y)]$$\n", + " \n", + "This is because the size of the example is small. " + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.5.3" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/knowledge_current_best.ipynb b/knowledge_current_best.ipynb new file mode 100644 index 000000000..68cb4e0e5 --- /dev/null +++ b/knowledge_current_best.ipynb @@ -0,0 +1,653 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# KNOWLEDGE\n", + "\n", + "The [knowledge](https://github.com/aimacode/aima-python/blob/master/knowledge.py) module covers **Chapter 19: Knowledge in Learning** from Stuart Russel's and Peter Norvig's book *Artificial Intelligence: A Modern Approach*.\n", + "\n", + "Execute the cell below to get started." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "from knowledge import *\n", + "\n", + "from notebook import pseudocode, psource" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## CONTENTS\n", + "\n", + "* Overview\n", + "* Current-Best Learning" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## OVERVIEW\n", + "\n", + "Like the [learning module](https://github.com/aimacode/aima-python/blob/master/learning.ipynb), this chapter focuses on methods for generating a model/hypothesis for a domain. Unlike though the learning chapter, here we use prior knowledge to help us learn from new experiences and find a proper hypothesis.\n", + "\n", + "### First-Order Logic\n", + "\n", + "Usually knowledge in this field is represented as **first-order logic**, a type of logic that uses variables and quantifiers in logical sentences. Hypotheses are represented by logical sentences with variables, while examples are logical sentences with set values instead of variables. The goal is to assign a value to a special first-order logic predicate, called **goal predicate**, for new examples given a hypothesis. We learn this hypothesis by infering knowledge from some given examples.\n", + "\n", + "### Representation\n", + "\n", + "In this module, we use dictionaries to represent examples, with keys the attribute names and values the corresponding example values. Examples also have an extra boolean field, 'GOAL', for the goal predicate. A hypothesis is represented as a list of dictionaries. Each dictionary in that list represents a disjunction. Inside these dictionaries/disjunctions we have conjunctions.\n", + "\n", + "For example, say we want to predict if an animal (cat or dog) will take an umbrella given whether or not it rains or the animal wears a coat. The goal value is 'take an umbrella' and is denoted by the key 'GOAL'. An example:\n", + "\n", + "`{'Species': 'Cat', 'Coat': 'Yes', 'Rain': 'Yes', 'GOAL': True}`\n", + "\n", + "A hypothesis can be the following:\n", + "\n", + "`[{'Species': 'Cat'}]`\n", + "\n", + "which means an animal will take an umbrella if and only if it is a cat.\n", + "\n", + "### Consistency\n", + "\n", + "We say that an example `e` is **consistent** with an hypothesis `h` if the assignment from the hypothesis for `e` is the same as `e['GOAL']`. If the above example and hypothesis are `e` and `h` respectively, then `e` is consistent with `h` since `e['Species'] == 'Cat'`. For `e = {'Species': 'Dog', 'Coat': 'Yes', 'Rain': 'Yes', 'GOAL': True}`, the example is no longer consistent with `h`, since the value assigned to `e` is *False* while `e['GOAL']` is *True*." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "collapsed": true + }, + "source": [ + "## CURRENT-BEST LEARNING\n", + "\n", + "### Overview\n", + "\n", + "In **Current-Best Learning**, we start with a hypothesis and we refine it as we iterate through the examples. For each example, there are three possible outcomes. The example is consistent with the hypothesis, the example is a **false positive** (real value is false but got predicted as true) and **false negative** (real value is true but got predicted as false). Depending on the outcome we refine the hypothesis accordingly:\n", + "\n", + "* Consistent: We do not change the hypothesis and we move on to the next example.\n", + "\n", + "* False Positive: We **specialize** the hypothesis, which means we add a conjunction.\n", + "\n", + "* False Negative: We **generalize** the hypothesis, either by removing a conjunction or a disjunction, or by adding a disjunction.\n", + "\n", + "When specializing and generalizing, we should take care to not create inconsistencies with previous examples. To avoid that caveat, backtracking is needed. Thankfully, there is not just one specialization or generalization, so we have a lot to choose from. We will go through all the specialization/generalizations and we will refine our hypothesis as the first specialization/generalization consistent with all the examples seen up to that point." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Pseudocode" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "data": { + "text/markdown": [ + "### AIMA3e\n", + "__function__ Current-Best-Learning(_examples_, _h_) __returns__ a hypothesis or fail \n", + " __if__ _examples_ is empty __then__ \n", + "   __return__ _h_ \n", + " _e_ ← First(_examples_) \n", + " __if__ _e_ is consistent with _h_ __then__ \n", + "   __return__ Current-Best-Learning(Rest(_examples_), _h_) \n", + " __else if__ _e_ is a false positive for _h_ __then__ \n", + "   __for each__ _h'_ __in__ specializations of _h_ consistent with _examples_ seen so far __do__ \n", + "     _h''_ ← Current-Best-Learning(Rest(_examples_), _h'_) \n", + "     __if__ _h''_ ≠ _fail_ __then return__ _h''_ \n", + " __else if__ _e_ is a false negative for _h_ __then__ \n", + "   __for each__ _h'_ __in__ generalizations of _h_ consistent with _examples_ seen so far __do__ \n", + "     _h''_ ← Current-Best-Learning(Rest(_examples_), _h'_) \n", + "     __if__ _h''_ ≠ _fail_ __then return__ _h''_ \n", + " __return__ _fail_ \n", + "\n", + "---\n", + "__Figure ??__ The current-best-hypothesis learning algorithm. It searches for a consistent hypothesis that fits all the examples and backtracks when no consistent specialization/generalization can be found. To start the algorithm, any hypothesis can be passed in; it will be specialized or generalized as needed." + ], + "text/plain": [ + "" + ] + }, + "execution_count": 2, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "pseudocode('Current-Best-Learning')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Implementation\n", + "\n", + "As mentioned previously, examples are dictionaries (with keys the attribute names) and hypotheses are lists of dictionaries (each dictionary is a disjunction). Also, in the hypothesis, we denote the *NOT* operation with an exclamation mark (!).\n", + "\n", + "We have functions to calculate the list of all specializations/generalizations, to check if an example is consistent/false positive/false negative with a hypothesis. We also have an auxiliary function to add a disjunction (or operation) to a hypothesis, and two other functions to check consistency of all (or just the negative) examples.\n", + "\n", + "You can read the source by running the cell below:" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "\n", + "\n", + "\n", + "\n", + " \n", + " \n", + " \n", + "\n", + "\n", + "

\n", + "\n", + "
def current_best_learning(examples, h, examples_so_far=None):\n",
+       "    """ [Figure 19.2]\n",
+       "    The hypothesis is a list of dictionaries, with each dictionary representing\n",
+       "    a disjunction."""\n",
+       "    if not examples:\n",
+       "        return h\n",
+       "\n",
+       "    examples_so_far = examples_so_far or []\n",
+       "    e = examples[0]\n",
+       "    if is_consistent(e, h):\n",
+       "        return current_best_learning(examples[1:], h, examples_so_far + [e])\n",
+       "    elif false_positive(e, h):\n",
+       "        for h2 in specializations(examples_so_far + [e], h):\n",
+       "            h3 = current_best_learning(examples[1:], h2, examples_so_far + [e])\n",
+       "            if h3 != 'FAIL':\n",
+       "                return h3\n",
+       "    elif false_negative(e, h):\n",
+       "        for h2 in generalizations(examples_so_far + [e], h):\n",
+       "            h3 = current_best_learning(examples[1:], h2, examples_so_far + [e])\n",
+       "            if h3 != 'FAIL':\n",
+       "                return h3\n",
+       "\n",
+       "    return 'FAIL'\n",
+       "\n",
+       "\n",
+       "def specializations(examples_so_far, h):\n",
+       "    """Specialize the hypothesis by adding AND operations to the disjunctions"""\n",
+       "    hypotheses = []\n",
+       "\n",
+       "    for i, disj in enumerate(h):\n",
+       "        for e in examples_so_far:\n",
+       "            for k, v in e.items():\n",
+       "                if k in disj or k == 'GOAL':\n",
+       "                    continue\n",
+       "\n",
+       "                h2 = h[i].copy()\n",
+       "                h2[k] = '!' + v\n",
+       "                h3 = h.copy()\n",
+       "                h3[i] = h2\n",
+       "                if check_all_consistency(examples_so_far, h3):\n",
+       "                    hypotheses.append(h3)\n",
+       "\n",
+       "    shuffle(hypotheses)\n",
+       "    return hypotheses\n",
+       "\n",
+       "\n",
+       "def generalizations(examples_so_far, h):\n",
+       "    """Generalize the hypothesis. First delete operations\n",
+       "    (including disjunctions) from the hypothesis. Then, add OR operations."""\n",
+       "    hypotheses = []\n",
+       "\n",
+       "    # Delete disjunctions\n",
+       "    disj_powerset = powerset(range(len(h)))\n",
+       "    for disjs in disj_powerset:\n",
+       "        h2 = h.copy()\n",
+       "        for d in reversed(list(disjs)):\n",
+       "            del h2[d]\n",
+       "\n",
+       "        if check_all_consistency(examples_so_far, h2):\n",
+       "            hypotheses += h2\n",
+       "\n",
+       "    # Delete AND operations in disjunctions\n",
+       "    for i, disj in enumerate(h):\n",
+       "        a_powerset = powerset(disj.keys())\n",
+       "        for attrs in a_powerset:\n",
+       "            h2 = h[i].copy()\n",
+       "            for a in attrs:\n",
+       "                del h2[a]\n",
+       "\n",
+       "            if check_all_consistency(examples_so_far, [h2]):\n",
+       "                h3 = h.copy()\n",
+       "                h3[i] = h2.copy()\n",
+       "                hypotheses += h3\n",
+       "\n",
+       "    # Add OR operations\n",
+       "    if hypotheses == [] or hypotheses == [{}]:\n",
+       "        hypotheses = add_or(examples_so_far, h)\n",
+       "    else:\n",
+       "        hypotheses.extend(add_or(examples_so_far, h))\n",
+       "\n",
+       "    shuffle(hypotheses)\n",
+       "    return hypotheses\n",
+       "
\n", + "\n", + "\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "psource(current_best_learning, specializations, generalizations)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "You can view the auxiliary functions in the [knowledge module](https://github.com/aimacode/aima-python/blob/master/knowledge.py). A few notes on the functionality of some of the important methods:" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "* `specializations`: For each disjunction in the hypothesis, it adds a conjunction for values in the examples encountered so far (if the conjunction is consistent with all the examples). It returns a list of hypotheses.\n", + "\n", + "* `generalizations`: It adds to the list of hypotheses in three phases. First it deletes disjunctions, then it deletes conjunctions and finally it adds a disjunction.\n", + "\n", + "* `add_or`: Used by `generalizations` to add an *or operation* (a disjunction) to the hypothesis. Since the last example is the problematic one which wasn't consistent with the hypothesis, it will model the new disjunction to that example. It creates a disjunction for each combination of attributes in the example and returns the new hypotheses consistent with the negative examples encountered so far. We do not need to check the consistency of positive examples, since they are already consistent with at least one other disjunction in the hypotheses' set, so this new disjunction doesn't affect them. In other words, if the value of a positive example is negative under the disjunction, it doesn't matter since we know there exists a disjunction consistent with the example." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Since the algorithm stops searching the specializations/generalizations after the first consistent hypothesis is found, usually you will get different results each time you run the code." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Examples\n", + "\n", + "We will take a look at two examples. The first is a trivial one, while the second is a bit more complicated (you can also find it in the book).\n", + "\n", + "First we have the \"animals taking umbrellas\" example. Here we want to find a hypothesis to predict whether or not an animal will take an umbrella. The attributes are `Species`, `Rain` and `Coat`. The possible values are `[Cat, Dog]`, `[Yes, No]` and `[Yes, No]` respectively. Below we give seven examples (with `GOAL` we denote whether an animal will take an umbrella or not):" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [], + "source": [ + "animals_umbrellas = [\n", + " {'Species': 'Cat', 'Rain': 'Yes', 'Coat': 'No', 'GOAL': True},\n", + " {'Species': 'Cat', 'Rain': 'Yes', 'Coat': 'Yes', 'GOAL': True},\n", + " {'Species': 'Dog', 'Rain': 'Yes', 'Coat': 'Yes', 'GOAL': True},\n", + " {'Species': 'Dog', 'Rain': 'Yes', 'Coat': 'No', 'GOAL': False},\n", + " {'Species': 'Dog', 'Rain': 'No', 'Coat': 'No', 'GOAL': False},\n", + " {'Species': 'Cat', 'Rain': 'No', 'Coat': 'No', 'GOAL': False},\n", + " {'Species': 'Cat', 'Rain': 'No', 'Coat': 'Yes', 'GOAL': True}\n", + "]" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let our initial hypothesis be `[{'Species': 'Cat'}]`. That means every cat will be taking an umbrella. We can see that this is not true, but it doesn't matter since we will refine the hypothesis using the Current-Best algorithm. First, let's see how that initial hypothesis fares to have a point of reference." + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "True\n", + "True\n", + "False\n", + "False\n", + "False\n", + "True\n", + "True\n" + ] + } + ], + "source": [ + "initial_h = [{'Species': 'Cat'}]\n", + "\n", + "for e in animals_umbrellas:\n", + " print(guess_value(e, initial_h))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We got 5/7 correct. Not terribly bad, but we can do better. Let's run the algorithm and see how that performs." + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "True\n", + "True\n", + "True\n", + "False\n", + "False\n", + "False\n", + "True\n" + ] + } + ], + "source": [ + "h = current_best_learning(animals_umbrellas, initial_h)\n", + "\n", + "for e in animals_umbrellas:\n", + " print(guess_value(e, h))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We got everything right! Let's print our hypothesis:" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[{'Rain': '!No', 'Species': 'Cat'}, {'Rain': 'Yes', 'Coat': 'Yes'}, {'Coat': 'Yes', 'Species': 'Cat'}]\n" + ] + } + ], + "source": [ + "print(h)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "If an example meets any of the disjunctions in the list, it will be `True`, otherwise it will be `False`.\n", + "\n", + "Let's move on to a bigger example, the \"Restaurant\" example from the book. The attributes for each example are the following:\n", + "\n", + "* Alternative option (`Alt`)\n", + "* Bar to hang out/wait (`Bar`)\n", + "* Day is Friday (`Fri`)\n", + "* Is hungry (`Hun`)\n", + "* How much does it cost (`Price`, takes values in [$, $$, $$$])\n", + "* How many patrons are there (`Pat`, takes values in [None, Some, Full])\n", + "* Is raining (`Rain`)\n", + "* Has made reservation (`Res`)\n", + "* Type of restaurant (`Type`, takes values in [French, Thai, Burger, Italian])\n", + "* Estimated waiting time (`Est`, takes values in [0-10, 10-30, 30-60, >60])\n", + "\n", + "We want to predict if someone will wait or not (Goal = WillWait). Below we show twelve examples found in the book." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "![restaurant](images/restaurant.png)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "With the function `r_example` we will build the dictionary examples:" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [], + "source": [ + "def r_example(Alt, Bar, Fri, Hun, Pat, Price, Rain, Res, Type, Est, GOAL):\n", + " return {'Alt': Alt, 'Bar': Bar, 'Fri': Fri, 'Hun': Hun, 'Pat': Pat,\n", + " 'Price': Price, 'Rain': Rain, 'Res': Res, 'Type': Type, 'Est': Est,\n", + " 'GOAL': GOAL}" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "collapsed": true + }, + "source": [ + "In code:" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [], + "source": [ + "restaurant = [\n", + " r_example('Yes', 'No', 'No', 'Yes', 'Some', '$$$', 'No', 'Yes', 'French', '0-10', True),\n", + " r_example('Yes', 'No', 'No', 'Yes', 'Full', '$', 'No', 'No', 'Thai', '30-60', False),\n", + " r_example('No', 'Yes', 'No', 'No', 'Some', '$', 'No', 'No', 'Burger', '0-10', True),\n", + " r_example('Yes', 'No', 'Yes', 'Yes', 'Full', '$', 'Yes', 'No', 'Thai', '10-30', True),\n", + " r_example('Yes', 'No', 'Yes', 'No', 'Full', '$$$', 'No', 'Yes', 'French', '>60', False),\n", + " r_example('No', 'Yes', 'No', 'Yes', 'Some', '$$', 'Yes', 'Yes', 'Italian', '0-10', True),\n", + " r_example('No', 'Yes', 'No', 'No', 'None', '$', 'Yes', 'No', 'Burger', '0-10', False),\n", + " r_example('No', 'No', 'No', 'Yes', 'Some', '$$', 'Yes', 'Yes', 'Thai', '0-10', True),\n", + " r_example('No', 'Yes', 'Yes', 'No', 'Full', '$', 'Yes', 'No', 'Burger', '>60', False),\n", + " r_example('Yes', 'Yes', 'Yes', 'Yes', 'Full', '$$$', 'No', 'Yes', 'Italian', '10-30', False),\n", + " r_example('No', 'No', 'No', 'No', 'None', '$', 'No', 'No', 'Thai', '0-10', False),\n", + " r_example('Yes', 'Yes', 'Yes', 'Yes', 'Full', '$', 'No', 'No', 'Burger', '30-60', True)\n", + "]" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Say our initial hypothesis is that there should be an alternative option and let's run the algorithm." + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "True\n", + "False\n", + "True\n", + "True\n", + "False\n", + "True\n", + "False\n", + "True\n", + "False\n", + "False\n", + "False\n", + "True\n" + ] + } + ], + "source": [ + "initial_h = [{'Alt': 'Yes'}]\n", + "h = current_best_learning(restaurant, initial_h)\n", + "for e in restaurant:\n", + " print(guess_value(e, h))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The predictions are correct. Let's see the hypothesis that accomplished that:" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[{'Pat': '!Full', 'Alt': 'Yes'}, {'Hun': 'No', 'Res': 'No', 'Rain': 'No', 'Pat': '!None'}, {'Fri': 'Yes', 'Type': 'Thai', 'Bar': 'No'}, {'Fri': 'No', 'Type': 'Italian', 'Bar': 'Yes', 'Alt': 'No', 'Est': '0-10'}, {'Fri': 'No', 'Bar': 'No', 'Est': '0-10', 'Type': 'Thai', 'Rain': 'Yes', 'Alt': 'No'}, {'Fri': 'Yes', 'Bar': 'Yes', 'Est': '30-60', 'Hun': 'Yes', 'Rain': 'No', 'Alt': 'Yes', 'Price': '$'}]\n" + ] + } + ], + "source": [ + "print(h)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "It might be quite complicated, with many disjunctions if we are unlucky, but it will always be correct, as long as a correct hypothesis exists." + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.5.3" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/knowledge.ipynb b/knowledge_version_space.ipynb similarity index 63% rename from knowledge.ipynb rename to knowledge_version_space.ipynb index 2f4276452..8c8ec29f5 100644 --- a/knowledge.ipynb +++ b/knowledge_version_space.ipynb @@ -29,7 +29,6 @@ "## CONTENTS\n", "\n", "* Overview\n", - "* Current-Best Learning\n", "* Version-Space Learning" ] }, @@ -64,571 +63,6 @@ "We say that an example `e` is **consistent** with an hypothesis `h` if the assignment from the hypothesis for `e` is the same as `e['GOAL']`. If the above example and hypothesis are `e` and `h` respectively, then `e` is consistent with `h` since `e['Species'] == 'Cat'`. For `e = {'Species': 'Dog', 'Coat': 'Yes', 'Rain': 'Yes', 'GOAL': True}`, the example is no longer consistent with `h`, since the value assigned to `e` is *False* while `e['GOAL']` is *True*." ] }, - { - "cell_type": "markdown", - "metadata": { - "collapsed": true - }, - "source": [ - "## CURRENT-BEST LEARNING\n", - "\n", - "### Overview\n", - "\n", - "In **Current-Best Learning**, we start with a hypothesis and we refine it as we iterate through the examples. For each example, there are three possible outcomes. The example is consistent with the hypothesis, the example is a **false positive** (real value is false but got predicted as true) and **false negative** (real value is true but got predicted as false). Depending on the outcome we refine the hypothesis accordingly:\n", - "\n", - "* Consistent: We do not change the hypothesis and we move on to the next example.\n", - "\n", - "* False Positive: We **specialize** the hypothesis, which means we add a conjunction.\n", - "\n", - "* False Negative: We **generalize** the hypothesis, either by removing a conjunction or a disjunction, or by adding a disjunction.\n", - "\n", - "When specializing and generalizing, we should take care to not create inconsistencies with previous examples. To avoid that caveat, backtracking is needed. Thankfully, there is not just one specialization or generalization, so we have a lot to choose from. We will go through all the specialization/generalizations and we will refine our hypothesis as the first specialization/generalization consistent with all the examples seen up to that point." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Pseudocode" - ] - }, - { - "cell_type": "code", - "execution_count": 51, - "metadata": {}, - "outputs": [ - { - "data": { - "text/markdown": [ - "### AIMA3e\n", - "__function__ Current-Best-Learning(_examples_, _h_) __returns__ a hypothesis or fail \n", - " __if__ _examples_ is empty __then__ \n", - "   __return__ _h_ \n", - " _e_ ← First(_examples_) \n", - " __if__ _e_ is consistent with _h_ __then__ \n", - "   __return__ Current-Best-Learning(Rest(_examples_), _h_) \n", - " __else if__ _e_ is a false positive for _h_ __then__ \n", - "   __for each__ _h'_ __in__ specializations of _h_ consistent with _examples_ seen so far __do__ \n", - "     _h''_ ← Current-Best-Learning(Rest(_examples_), _h'_) \n", - "     __if__ _h''_ ≠ _fail_ __then return__ _h''_ \n", - " __else if__ _e_ is a false negative for _h_ __then__ \n", - "   __for each__ _h'_ __in__ generalizations of _h_ consistent with _examples_ seen so far __do__ \n", - "     _h''_ ← Current-Best-Learning(Rest(_examples_), _h'_) \n", - "     __if__ _h''_ ≠ _fail_ __then return__ _h''_ \n", - " __return__ _fail_ \n", - "\n", - "---\n", - "__Figure ??__ The current-best-hypothesis learning algorithm. It searches for a consistent hypothesis that fits all the examples and backtracks when no consistent specialization/generalization can be found. To start the algorithm, any hypothesis can be passed in; it will be specialized or generalized as needed." - ], - "text/plain": [ - "" - ] - }, - "execution_count": 51, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "pseudocode('Current-Best-Learning')" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Implementation\n", - "\n", - "As mentioned previously, examples are dictionaries (with keys the attribute names) and hypotheses are lists of dictionaries (each dictionary is a disjunction). Also, in the hypothesis, we denote the *NOT* operation with an exclamation mark (!).\n", - "\n", - "We have functions to calculate the list of all specializations/generalizations, to check if an example is consistent/false positive/false negative with a hypothesis. We also have an auxiliary function to add a disjunction (or operation) to a hypothesis, and two other functions to check consistency of all (or just the negative) examples.\n", - "\n", - "You can read the source by running the cell below:" - ] - }, - { - "cell_type": "code", - "execution_count": 52, - "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "\n", - "\n", - "\n", - "\n", - " \n", - " \n", - " \n", - "\n", - "\n", - "

\n", - "\n", - "
def current_best_learning(examples, h, examples_so_far=None):\n",
-       "    """ [Figure 19.2]\n",
-       "    The hypothesis is a list of dictionaries, with each dictionary representing\n",
-       "    a disjunction."""\n",
-       "    if not examples:\n",
-       "        return h\n",
-       "\n",
-       "    examples_so_far = examples_so_far or []\n",
-       "    e = examples[0]\n",
-       "    if is_consistent(e, h):\n",
-       "        return current_best_learning(examples[1:], h, examples_so_far + [e])\n",
-       "    elif false_positive(e, h):\n",
-       "        for h2 in specializations(examples_so_far + [e], h):\n",
-       "            h3 = current_best_learning(examples[1:], h2, examples_so_far + [e])\n",
-       "            if h3 != 'FAIL':\n",
-       "                return h3\n",
-       "    elif false_negative(e, h):\n",
-       "        for h2 in generalizations(examples_so_far + [e], h):\n",
-       "            h3 = current_best_learning(examples[1:], h2, examples_so_far + [e])\n",
-       "            if h3 != 'FAIL':\n",
-       "                return h3\n",
-       "\n",
-       "    return 'FAIL'\n",
-       "\n",
-       "\n",
-       "def specializations(examples_so_far, h):\n",
-       "    """Specialize the hypothesis by adding AND operations to the disjunctions"""\n",
-       "    hypotheses = []\n",
-       "\n",
-       "    for i, disj in enumerate(h):\n",
-       "        for e in examples_so_far:\n",
-       "            for k, v in e.items():\n",
-       "                if k in disj or k == 'GOAL':\n",
-       "                    continue\n",
-       "\n",
-       "                h2 = h[i].copy()\n",
-       "                h2[k] = '!' + v\n",
-       "                h3 = h.copy()\n",
-       "                h3[i] = h2\n",
-       "                if check_all_consistency(examples_so_far, h3):\n",
-       "                    hypotheses.append(h3)\n",
-       "\n",
-       "    shuffle(hypotheses)\n",
-       "    return hypotheses\n",
-       "\n",
-       "\n",
-       "def generalizations(examples_so_far, h):\n",
-       "    """Generalize the hypothesis. First delete operations\n",
-       "    (including disjunctions) from the hypothesis. Then, add OR operations."""\n",
-       "    hypotheses = []\n",
-       "\n",
-       "    # Delete disjunctions\n",
-       "    disj_powerset = powerset(range(len(h)))\n",
-       "    for disjs in disj_powerset:\n",
-       "        h2 = h.copy()\n",
-       "        for d in reversed(list(disjs)):\n",
-       "            del h2[d]\n",
-       "\n",
-       "        if check_all_consistency(examples_so_far, h2):\n",
-       "            hypotheses += h2\n",
-       "\n",
-       "    # Delete AND operations in disjunctions\n",
-       "    for i, disj in enumerate(h):\n",
-       "        a_powerset = powerset(disj.keys())\n",
-       "        for attrs in a_powerset:\n",
-       "            h2 = h[i].copy()\n",
-       "            for a in attrs:\n",
-       "                del h2[a]\n",
-       "\n",
-       "            if check_all_consistency(examples_so_far, [h2]):\n",
-       "                h3 = h.copy()\n",
-       "                h3[i] = h2.copy()\n",
-       "                hypotheses += h3\n",
-       "\n",
-       "    # Add OR operations\n",
-       "    if hypotheses == [] or hypotheses == [{}]:\n",
-       "        hypotheses = add_or(examples_so_far, h)\n",
-       "    else:\n",
-       "        hypotheses.extend(add_or(examples_so_far, h))\n",
-       "\n",
-       "    shuffle(hypotheses)\n",
-       "    return hypotheses\n",
-       "
\n", - "\n", - "\n" - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], - "source": [ - "psource(current_best_learning, specializations, generalizations)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "You can view the auxiliary functions in the [knowledge module](https://github.com/aimacode/aima-python/blob/master/knowledge.py). A few notes on the functionality of some of the important methods:" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "* `specializations`: For each disjunction in the hypothesis, it adds a conjunction for values in the examples encountered so far (if the conjunction is consistent with all the examples). It returns a list of hypotheses.\n", - "\n", - "* `generalizations`: It adds to the list of hypotheses in three phases. First it deletes disjunctions, then it deletes conjunctions and finally it adds a disjunction.\n", - "\n", - "* `add_or`: Used by `generalizations` to add an *or operation* (a disjunction) to the hypothesis. Since the last example is the problematic one which wasn't consistent with the hypothesis, it will model the new disjunction to that example. It creates a disjunction for each combination of attributes in the example and returns the new hypotheses consistent with the negative examples encountered so far. We do not need to check the consistency of positive examples, since they are already consistent with at least one other disjunction in the hypotheses' set, so this new disjunction doesn't affect them. In other words, if the value of a positive example is negative under the disjunction, it doesn't matter since we know there exists a disjunction consistent with the example." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Since the algorithm stops searching the specializations/generalizations after the first consistent hypothesis is found, usually you will get different results each time you run the code." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Examples\n", - "\n", - "We will take a look at two examples. The first is a trivial one, while the second is a bit more complicated (you can also find it in the book).\n", - "\n", - "First we have the \"animals taking umbrellas\" example. Here we want to find a hypothesis to predict whether or not an animal will take an umbrella. The attributes are `Species`, `Rain` and `Coat`. The possible values are `[Cat, Dog]`, `[Yes, No]` and `[Yes, No]` respectively. Below we give seven examples (with `GOAL` we denote whether an animal will take an umbrella or not):" - ] - }, - { - "cell_type": "code", - "execution_count": 53, - "metadata": {}, - "outputs": [], - "source": [ - "animals_umbrellas = [\n", - " {'Species': 'Cat', 'Rain': 'Yes', 'Coat': 'No', 'GOAL': True},\n", - " {'Species': 'Cat', 'Rain': 'Yes', 'Coat': 'Yes', 'GOAL': True},\n", - " {'Species': 'Dog', 'Rain': 'Yes', 'Coat': 'Yes', 'GOAL': True},\n", - " {'Species': 'Dog', 'Rain': 'Yes', 'Coat': 'No', 'GOAL': False},\n", - " {'Species': 'Dog', 'Rain': 'No', 'Coat': 'No', 'GOAL': False},\n", - " {'Species': 'Cat', 'Rain': 'No', 'Coat': 'No', 'GOAL': False},\n", - " {'Species': 'Cat', 'Rain': 'No', 'Coat': 'Yes', 'GOAL': True}\n", - "]" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Let our initial hypothesis be `[{'Species': 'Cat'}]`. That means every cat will be taking an umbrella. We can see that this is not true, but it doesn't matter since we will refine the hypothesis using the Current-Best algorithm. First, let's see how that initial hypothesis fares to have a point of reference." - ] - }, - { - "cell_type": "code", - "execution_count": 54, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "True\n", - "True\n", - "False\n", - "False\n", - "False\n", - "True\n", - "True\n" - ] - } - ], - "source": [ - "initial_h = [{'Species': 'Cat'}]\n", - "\n", - "for e in animals_umbrellas:\n", - " print(guess_value(e, initial_h))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We got 5/7 correct. Not terribly bad, but we can do better. Let's run the algorithm and see how that performs." - ] - }, - { - "cell_type": "code", - "execution_count": 55, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "True\n", - "True\n", - "True\n", - "False\n", - "False\n", - "False\n", - "True\n" - ] - } - ], - "source": [ - "h = current_best_learning(animals_umbrellas, initial_h)\n", - "\n", - "for e in animals_umbrellas:\n", - " print(guess_value(e, h))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We got everything right! Let's print our hypothesis:" - ] - }, - { - "cell_type": "code", - "execution_count": 56, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "[{'Species': 'Cat', 'Rain': '!No'}, {'Rain': 'Yes', 'Coat': '!No'}, {'Rain': 'No', 'Coat': 'Yes'}]\n" - ] - } - ], - "source": [ - "print(h)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "If an example meets any of the disjunctions in the list, it will be `True`, otherwise it will be `False`.\n", - "\n", - "Let's move on to a bigger example, the \"Restaurant\" example from the book. The attributes for each example are the following:\n", - "\n", - "* Alternative option (`Alt`)\n", - "* Bar to hang out/wait (`Bar`)\n", - "* Day is Friday (`Fri`)\n", - "* Is hungry (`Hun`)\n", - "* How much does it cost (`Price`, takes values in [$, $$, $$$])\n", - "* How many patrons are there (`Pat`, takes values in [None, Some, Full])\n", - "* Is raining (`Rain`)\n", - "* Has made reservation (`Res`)\n", - "* Type of restaurant (`Type`, takes values in [French, Thai, Burger, Italian])\n", - "* Estimated waiting time (`Est`, takes values in [0-10, 10-30, 30-60, >60])\n", - "\n", - "We want to predict if someone will wait or not (Goal = WillWait). Below we show twelve examples found in the book." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "![restaurant](images/restaurant.png)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "With the function `r_example` we will build the dictionary examples:" - ] - }, - { - "cell_type": "code", - "execution_count": 28, - "metadata": {}, - "outputs": [], - "source": [ - "def r_example(Alt, Bar, Fri, Hun, Pat, Price, Rain, Res, Type, Est, GOAL):\n", - " return {'Alt': Alt, 'Bar': Bar, 'Fri': Fri, 'Hun': Hun, 'Pat': Pat,\n", - " 'Price': Price, 'Rain': Rain, 'Res': Res, 'Type': Type, 'Est': Est,\n", - " 'GOAL': GOAL}" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "collapsed": true - }, - "source": [ - "In code:" - ] - }, - { - "cell_type": "code", - "execution_count": 29, - "metadata": {}, - "outputs": [], - "source": [ - "restaurant = [\n", - " r_example('Yes', 'No', 'No', 'Yes', 'Some', '$$$', 'No', 'Yes', 'French', '0-10', True),\n", - " r_example('Yes', 'No', 'No', 'Yes', 'Full', '$', 'No', 'No', 'Thai', '30-60', False),\n", - " r_example('No', 'Yes', 'No', 'No', 'Some', '$', 'No', 'No', 'Burger', '0-10', True),\n", - " r_example('Yes', 'No', 'Yes', 'Yes', 'Full', '$', 'Yes', 'No', 'Thai', '10-30', True),\n", - " r_example('Yes', 'No', 'Yes', 'No', 'Full', '$$$', 'No', 'Yes', 'French', '>60', False),\n", - " r_example('No', 'Yes', 'No', 'Yes', 'Some', '$$', 'Yes', 'Yes', 'Italian', '0-10', True),\n", - " r_example('No', 'Yes', 'No', 'No', 'None', '$', 'Yes', 'No', 'Burger', '0-10', False),\n", - " r_example('No', 'No', 'No', 'Yes', 'Some', '$$', 'Yes', 'Yes', 'Thai', '0-10', True),\n", - " r_example('No', 'Yes', 'Yes', 'No', 'Full', '$', 'Yes', 'No', 'Burger', '>60', False),\n", - " r_example('Yes', 'Yes', 'Yes', 'Yes', 'Full', '$$$', 'No', 'Yes', 'Italian', '10-30', False),\n", - " r_example('No', 'No', 'No', 'No', 'None', '$', 'No', 'No', 'Thai', '0-10', False),\n", - " r_example('Yes', 'Yes', 'Yes', 'Yes', 'Full', '$', 'No', 'No', 'Burger', '30-60', True)\n", - "]" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Say our initial hypothesis is that there should be an alternative option and let's run the algorithm." - ] - }, - { - "cell_type": "code", - "execution_count": 30, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "True\n", - "False\n", - "True\n", - "True\n", - "False\n", - "True\n", - "False\n", - "True\n", - "False\n", - "False\n", - "False\n", - "True\n" - ] - } - ], - "source": [ - "initial_h = [{'Alt': 'Yes'}]\n", - "h = current_best_learning(restaurant, initial_h)\n", - "for e in restaurant:\n", - " print(guess_value(e, h))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The predictions are correct. Let's see the hypothesis that accomplished that:" - ] - }, - { - "cell_type": "code", - "execution_count": 31, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "[{'Alt': 'Yes', 'Type': '!Thai', 'Hun': '!No', 'Pat': '!Full'}, {'Alt': 'No', 'Bar': 'Yes', 'Hun': 'No', 'Price': '$', 'Rain': 'No', 'Res': 'No'}, {'Pat': 'Full', 'Price': '$', 'Rain': 'Yes', 'Type': '!Burger'}, {'Price': '$$', 'Type': 'Italian'}, {'Bar': 'No', 'Hun': 'Yes', 'Pat': 'Some', 'Price': '$$', 'Rain': 'Yes', 'Res': 'Yes', 'Type': 'Thai', 'Est': '0-10'}, {'Bar': 'Yes', 'Fri': 'Yes', 'Hun': 'Yes', 'Pat': 'Full', 'Rain': 'No', 'Res': 'No', 'Type': 'Burger'}]\n" - ] - } - ], - "source": [ - "print(h)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "It might be quite complicated, with many disjunctions if we are unlucky, but it will always be correct, as long as a correct hypothesis exists." - ] - }, { "cell_type": "markdown", "metadata": {}, @@ -1646,7 +1080,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.6.4" + "version": "3.5.3" } }, "nbformat": 4, diff --git a/tests/test_knowledge.py b/tests/test_knowledge.py index 89fe479a0..ab86089ae 100644 --- a/tests/test_knowledge.py +++ b/tests/test_knowledge.py @@ -5,6 +5,56 @@ random.seed("aima-python") + +party = [ + {'Pizza': 'Yes', 'Soda': 'No', 'GOAL': True}, + {'Pizza': 'Yes', 'Soda': 'Yes', 'GOAL': True}, + {'Pizza': 'No', 'Soda': 'No', 'GOAL': False} +] + +animals_umbrellas = [ + {'Species': 'Cat', 'Rain': 'Yes', 'Coat': 'No', 'GOAL': True}, + {'Species': 'Cat', 'Rain': 'Yes', 'Coat': 'Yes', 'GOAL': True}, + {'Species': 'Dog', 'Rain': 'Yes', 'Coat': 'Yes', 'GOAL': True}, + {'Species': 'Dog', 'Rain': 'Yes', 'Coat': 'No', 'GOAL': False}, + {'Species': 'Dog', 'Rain': 'No', 'Coat': 'No', 'GOAL': False}, + {'Species': 'Cat', 'Rain': 'No', 'Coat': 'No', 'GOAL': False}, + {'Species': 'Cat', 'Rain': 'No', 'Coat': 'Yes', 'GOAL': True} +] + +conductance = [ + {'Sample': 'S1', 'Mass': 12, 'Temp': 26, 'Material': 'Cu', 'Size': 3, 'GOAL': 0.59}, + {'Sample': 'S1', 'Mass': 12, 'Temp': 100, 'Material': 'Cu', 'Size': 3, 'GOAL': 0.57}, + {'Sample': 'S2', 'Mass': 24, 'Temp': 26, 'Material': 'Cu', 'Size': 6, 'GOAL': 0.59}, + {'Sample': 'S3', 'Mass': 12, 'Temp': 26, 'Material': 'Pb', 'Size': 2, 'GOAL': 0.05}, + {'Sample': 'S3', 'Mass': 12, 'Temp': 100, 'Material': 'Pb', 'Size': 2, 'GOAL': 0.04}, + {'Sample': 'S4', 'Mass': 18, 'Temp': 100, 'Material': 'Pb', 'Size': 3, 'GOAL': 0.04}, + {'Sample': 'S4', 'Mass': 18, 'Temp': 100, 'Material': 'Pb', 'Size': 3, 'GOAL': 0.04}, + {'Sample': 'S5', 'Mass': 24, 'Temp': 100, 'Material': 'Pb', 'Size': 4, 'GOAL': 0.04}, + {'Sample': 'S6', 'Mass': 36, 'Temp': 26, 'Material': 'Pb', 'Size': 6, 'GOAL': 0.05}, +] + +def r_example(Alt, Bar, Fri, Hun, Pat, Price, Rain, Res, Type, Est, GOAL): + return {'Alt': Alt, 'Bar': Bar, 'Fri': Fri, 'Hun': Hun, 'Pat': Pat, + 'Price': Price, 'Rain': Rain, 'Res': Res, 'Type': Type, 'Est': Est, + 'GOAL': GOAL} + +restaurant = [ + r_example('Yes', 'No', 'No', 'Yes', 'Some', '$$$', 'No', 'Yes', 'French', '0-10', True), + r_example('Yes', 'No', 'No', 'Yes', 'Full', '$', 'No', 'No', 'Thai', '30-60', False), + r_example('No', 'Yes', 'No', 'No', 'Some', '$', 'No', 'No', 'Burger', '0-10', True), + r_example('Yes', 'No', 'Yes', 'Yes', 'Full', '$', 'Yes', 'No', 'Thai', '10-30', True), + r_example('Yes', 'No', 'Yes', 'No', 'Full', '$$$', 'No', 'Yes', 'French', '>60', False), + r_example('No', 'Yes', 'No', 'Yes', 'Some', '$$', 'Yes', 'Yes', 'Italian', '0-10', True), + r_example('No', 'Yes', 'No', 'No', 'None', '$', 'Yes', 'No', 'Burger', '0-10', False), + r_example('No', 'No', 'No', 'Yes', 'Some', '$$', 'Yes', 'Yes', 'Thai', '0-10', True), + r_example('No', 'Yes', 'Yes', 'No', 'Full', '$', 'Yes', 'No', 'Burger', '>60', False), + r_example('Yes', 'Yes', 'Yes', 'Yes', 'Full', '$$$', 'No', 'Yes', 'Italian', '10-30', False), + r_example('No', 'No', 'No', 'No', 'None', '$', 'No', 'No', 'Thai', '0-10', False), + r_example('Yes', 'Yes', 'Yes', 'Yes', 'Full', '$', 'No', 'No', 'Burger', '30-60', True) +] + + def test_current_best_learning(): examples = restaurant hypothesis = [{'Alt': 'Yes'}] @@ -58,108 +108,153 @@ def test_minimal_consistent_det(): assert minimal_consistent_det(conductance, {'Mass', 'Temp', 'Size'}) == {'Mass', 'Temp', 'Size'} +A, B, C, D, E, F, G, H, I, x, y, z = map(expr, 'ABCDEFGHIxyz') + +# knowledge base containing family relations +small_family = FOIL_container([expr("Mother(Anne, Peter)"), + expr("Mother(Anne, Zara)"), + expr("Mother(Sarah, Beatrice)"), + expr("Mother(Sarah, Eugenie)"), + expr("Father(Mark, Peter)"), + expr("Father(Mark, Zara)"), + expr("Father(Andrew, Beatrice)"), + expr("Father(Andrew, Eugenie)"), + expr("Father(Philip, Anne)"), + expr("Father(Philip, Andrew)"), + expr("Mother(Elizabeth, Anne)"), + expr("Mother(Elizabeth, Andrew)"), + expr("Male(Philip)"), + expr("Male(Mark)"), + expr("Male(Andrew)"), + expr("Male(Peter)"), + expr("Female(Elizabeth)"), + expr("Female(Anne)"), + expr("Female(Sarah)"), + expr("Female(Zara)"), + expr("Female(Beatrice)"), + expr("Female(Eugenie)"), +]) + +smaller_family = FOIL_container([expr("Mother(Anne, Peter)"), + expr("Father(Mark, Peter)"), + expr("Father(Philip, Anne)"), + expr("Mother(Elizabeth, Anne)"), + expr("Male(Philip)"), + expr("Male(Mark)"), + expr("Male(Peter)"), + expr("Female(Elizabeth)"), + expr("Female(Anne)") + ]) + + +# target relation +target = expr('Parent(x, y)') + +#positive examples of target +examples_pos = [{x: expr('Elizabeth'), y: expr('Anne')}, + {x: expr('Elizabeth'), y: expr('Andrew')}, + {x: expr('Philip'), y: expr('Anne')}, + {x: expr('Philip'), y: expr('Andrew')}, + {x: expr('Anne'), y: expr('Peter')}, + {x: expr('Anne'), y: expr('Zara')}, + {x: expr('Mark'), y: expr('Peter')}, + {x: expr('Mark'), y: expr('Zara')}, + {x: expr('Andrew'), y: expr('Beatrice')}, + {x: expr('Andrew'), y: expr('Eugenie')}, + {x: expr('Sarah'), y: expr('Beatrice')}, + {x: expr('Sarah'), y: expr('Eugenie')}] + +# negative examples of target +examples_neg = [{x: expr('Anne'), y: expr('Eugenie')}, + {x: expr('Beatrice'), y: expr('Eugenie')}, + {x: expr('Mark'), y: expr('Elizabeth')}, + {x: expr('Beatrice'), y: expr('Philip')}] + + + +def test_tell(): + """ + adds in the knowledge base a sentence + """ + smaller_family.tell(expr("Male(George)")) + smaller_family.tell(expr("Female(Mum)")) + assert smaller_family.ask(expr("Male(George)")) == {} + assert smaller_family.ask(expr("Female(Mum)"))=={} + assert not smaller_family.ask(expr("Female(George)")) + assert not smaller_family.ask(expr("Male(Mum)")) + def test_extend_example(): - assert list(test_network.extend_example({x: A, y: B}, expr('Conn(x, z)'))) == [ - {x: A, y: B, z: B}, {x: A, y: B, z: D}] - assert list(test_network.extend_example({x: G}, expr('Conn(x, y)'))) == [{x: G, y: I}] - assert list(test_network.extend_example({x: C}, expr('Conn(x, y)'))) == [] - assert len(list(test_network.extend_example({}, expr('Conn(x, y)')))) == 10 + """ + Create the extended examples of the given clause. + (The extended examples are a set of examples created by extending example + with each possible constant value for each new variable in literal.) + """ assert len(list(small_family.extend_example({x: expr('Andrew')}, expr('Father(x, y)')))) == 2 assert len(list(small_family.extend_example({x: expr('Andrew')}, expr('Mother(x, y)')))) == 0 assert len(list(small_family.extend_example({x: expr('Andrew')}, expr('Female(y)')))) == 6 def test_new_literals(): - assert len(list(test_network.new_literals([expr('p | q'), [expr('p')]]))) == 8 - assert len(list(test_network.new_literals([expr('p'), [expr('q'), expr('p | r')]]))) == 15 assert len(list(small_family.new_literals([expr('p'), []]))) == 8 assert len(list(small_family.new_literals([expr('p & q'), []]))) == 20 +def test_new_clause(): + """ + Finds the best clause to add in the set of clauses. + """ + clause = small_family.new_clause([examples_pos, examples_neg], target)[0][1] + assert len(clause) == 1 and ( clause[0].op in ['Male', 'Female', 'Father', 'Mother' ] ) + def test_choose_literal(): - literals = [expr('Conn(p, q)'), expr('Conn(x, z)'), expr('Conn(r, s)'), expr('Conn(t, y)')] - examples_pos = [{x: A, y: B}, {x: A, y: D}] - examples_neg = [{x: A, y: C}, {x: C, y: A}, {x: C, y: B}, {x: A, y: I}] - assert test_network.choose_literal(literals, [examples_pos, examples_neg]) == expr('Conn(x, z)') - literals = [expr('Conn(x, p)'), expr('Conn(p, x)'), expr('Conn(p, q)')] - examples_pos = [{x: C}, {x: F}, {x: I}] - examples_neg = [{x: D}, {x: A}, {x: B}, {x: G}] - assert test_network.choose_literal(literals, [examples_pos, examples_neg]) == expr('Conn(p, x)') - literals = [expr('Father(x, y)'), expr('Father(y, x)'), expr('Mother(x, y)'), expr('Mother(x, y)')] + """ + Choose the best literal based on the information gain + """ + literals = [expr('Father(x, y)'), expr('Father(x, y)'), expr('Mother(x, y)'), expr('Mother(x, y)')] examples_pos = [{x: expr('Philip')}, {x: expr('Mark')}, {x: expr('Peter')}] examples_neg = [{x: expr('Elizabeth')}, {x: expr('Sarah')}] assert small_family.choose_literal(literals, [examples_pos, examples_neg]) == expr('Father(x, y)') literals = [expr('Father(x, y)'), expr('Father(y, x)'), expr('Male(x)')] examples_pos = [{x: expr('Philip')}, {x: expr('Mark')}, {x: expr('Andrew')}] examples_neg = [{x: expr('Elizabeth')}, {x: expr('Sarah')}] - assert small_family.choose_literal(literals, [examples_pos, examples_neg]) == expr('Male(x)') + assert small_family.choose_literal(literals, [examples_pos, examples_neg]) == expr('Father(x,y)') -def test_new_clause(): - target = expr('Open(x, y)') - examples_pos = [{x: B}, {x: A}, {x: G}] - examples_neg = [{x: C}, {x: F}, {x: I}] - clause = test_network.new_clause([examples_pos, examples_neg], target)[0][1] - assert len(clause) == 1 and clause[0].op == 'Conn' and clause[0].args[0] == x - target = expr('Flow(x, y)') - examples_pos = [{x: B}, {x: D}, {x: E}, {x: G}] - examples_neg = [{x: A}, {x: C}, {x: F}, {x: I}, {x: H}] - clause = test_network.new_clause([examples_pos, examples_neg], target)[0][1] - assert len(clause) == 2 and \ - ((clause[0].args[0] == x and clause[1].args[1] == x) or \ - (clause[0].args[1] == x and clause[1].args[0] == x)) +def test_gain(): + """ + Calculates the utility of each literal, based on the information gained. + """ + gain_father = small_family.gain( expr('Father(x,y)'), [examples_pos, examples_neg] ) + gain_male = small_family.gain(expr('Male(x)'), [examples_pos, examples_neg] ) + assert round(gain_father, 2) == 2.49 + assert round(gain_male, 2) == 1.16 + +def test_update_examples(): + """Add to the kb those examples what are represented in extended_examples + List of omitted examples is returned. + """ + extended_examples = [{x: expr("Mark") , y: expr("Peter")}, + {x: expr("Philip"), y: expr("Anne")} ] + + uncovered = smaller_family.update_examples(target, examples_pos, extended_examples) + assert {x: expr("Elizabeth"), y: expr("Anne") } in uncovered + assert {x: expr("Anne"), y: expr("Peter")} in uncovered + assert {x: expr("Philip"), y: expr("Anne") } not in uncovered + assert {x: expr("Mark"), y: expr("Peter")} not in uncovered + def test_foil(): - target = expr('Reach(x, y)') - examples_pos = [{x: A, y: B}, - {x: A, y: C}, - {x: A, y: D}, - {x: A, y: E}, - {x: A, y: F}, - {x: A, y: G}, - {x: A, y: I}, - {x: B, y: C}, - {x: D, y: C}, - {x: D, y: E}, - {x: D, y: F}, - {x: D, y: G}, - {x: D, y: I}, - {x: E, y: F}, - {x: E, y: G}, - {x: E, y: I}, - {x: G, y: I}, - {x: H, y: G}, - {x: H, y: I}] - nodes = {A, B, C, D, E, F, G, H, I} - examples_neg = [example for example in [{x: a, y: b} for a in nodes for b in nodes] - if example not in examples_pos] - ## TODO: Modify FOIL to recursively check for satisfied positive examples -# clauses = test_network.foil([examples_pos, examples_neg], target) -# assert len(clauses) == 2 - target = expr('Parent(x, y)') - examples_pos = [{x: expr('Elizabeth'), y: expr('Anne')}, - {x: expr('Elizabeth'), y: expr('Andrew')}, - {x: expr('Philip'), y: expr('Anne')}, - {x: expr('Philip'), y: expr('Andrew')}, - {x: expr('Anne'), y: expr('Peter')}, - {x: expr('Anne'), y: expr('Zara')}, - {x: expr('Mark'), y: expr('Peter')}, - {x: expr('Mark'), y: expr('Zara')}, - {x: expr('Andrew'), y: expr('Beatrice')}, - {x: expr('Andrew'), y: expr('Eugenie')}, - {x: expr('Sarah'), y: expr('Beatrice')}, - {x: expr('Sarah'), y: expr('Eugenie')}] - examples_neg = [{x: expr('Anne'), y: expr('Eugenie')}, - {x: expr('Beatrice'), y: expr('Eugenie')}, - {x: expr('Mark'), y: expr('Elizabeth')}, - {x: expr('Beatrice'), y: expr('Philip')}] + """ + Test the FOIL algorithm, when target is Parent(x,y) + """ clauses = small_family.foil([examples_pos, examples_neg], target) assert len(clauses) == 2 and \ ((clauses[0][1][0] == expr('Father(x, y)') and clauses[1][1][0] == expr('Mother(x, y)')) or \ (clauses[1][1][0] == expr('Father(x, y)') and clauses[0][1][0] == expr('Mother(x, y)'))) - target = expr('Grandparent(x, y)') - examples_pos = [{x: expr('Elizabeth'), y: expr('Peter')}, + + target_g = expr('Grandparent(x, y)') + examples_pos_g = [{x: expr('Elizabeth'), y: expr('Peter')}, {x: expr('Elizabeth'), y: expr('Zara')}, {x: expr('Elizabeth'), y: expr('Beatrice')}, {x: expr('Elizabeth'), y: expr('Eugenie')}, @@ -167,9 +262,12 @@ def test_foil(): {x: expr('Philip'), y: expr('Zara')}, {x: expr('Philip'), y: expr('Beatrice')}, {x: expr('Philip'), y: expr('Eugenie')}] - examples_neg = [{x: expr('Anne'), y: expr('Eugenie')}, + examples_neg_g = [{x: expr('Anne'), y: expr('Eugenie')}, {x: expr('Beatrice'), y: expr('Eugenie')}, {x: expr('Elizabeth'), y: expr('Andrew')}, + {x: expr('Elizabeth'), y: expr('Anne')}, + {x: expr('Elizabeth'), y: expr('Mark')}, + {x: expr('Elizabeth'), y: expr('Sarah')}, {x: expr('Philip'), y: expr('Anne')}, {x: expr('Philip'), y: expr('Andrew')}, {x: expr('Anne'), y: expr('Peter')}, @@ -180,105 +278,15 @@ def test_foil(): {x: expr('Andrew'), y: expr('Eugenie')}, {x: expr('Sarah'), y: expr('Beatrice')}, {x: expr('Mark'), y: expr('Elizabeth')}, - {x: expr('Beatrice'), y: expr('Philip')}] -# clauses = small_family.foil([examples_pos, examples_neg], target) -# assert len(clauses) == 2 and \ -# ((clauses[0][1][0] == expr('Father(x, y)') and clauses[1][1][0] == expr('Mother(x, y)')) or \ -# (clauses[1][1][0] == expr('Father(x, y)') and clauses[0][1][0] == expr('Mother(x, y)'))) - - -party = [ - {'Pizza': 'Yes', 'Soda': 'No', 'GOAL': True}, - {'Pizza': 'Yes', 'Soda': 'Yes', 'GOAL': True}, - {'Pizza': 'No', 'Soda': 'No', 'GOAL': False} -] - -animals_umbrellas = [ - {'Species': 'Cat', 'Rain': 'Yes', 'Coat': 'No', 'GOAL': True}, - {'Species': 'Cat', 'Rain': 'Yes', 'Coat': 'Yes', 'GOAL': True}, - {'Species': 'Dog', 'Rain': 'Yes', 'Coat': 'Yes', 'GOAL': True}, - {'Species': 'Dog', 'Rain': 'Yes', 'Coat': 'No', 'GOAL': False}, - {'Species': 'Dog', 'Rain': 'No', 'Coat': 'No', 'GOAL': False}, - {'Species': 'Cat', 'Rain': 'No', 'Coat': 'No', 'GOAL': False}, - {'Species': 'Cat', 'Rain': 'No', 'Coat': 'Yes', 'GOAL': True} -] - -conductance = [ - {'Sample': 'S1', 'Mass': 12, 'Temp': 26, 'Material': 'Cu', 'Size': 3, 'GOAL': 0.59}, - {'Sample': 'S1', 'Mass': 12, 'Temp': 100, 'Material': 'Cu', 'Size': 3, 'GOAL': 0.57}, - {'Sample': 'S2', 'Mass': 24, 'Temp': 26, 'Material': 'Cu', 'Size': 6, 'GOAL': 0.59}, - {'Sample': 'S3', 'Mass': 12, 'Temp': 26, 'Material': 'Pb', 'Size': 2, 'GOAL': 0.05}, - {'Sample': 'S3', 'Mass': 12, 'Temp': 100, 'Material': 'Pb', 'Size': 2, 'GOAL': 0.04}, - {'Sample': 'S4', 'Mass': 18, 'Temp': 100, 'Material': 'Pb', 'Size': 3, 'GOAL': 0.04}, - {'Sample': 'S4', 'Mass': 18, 'Temp': 100, 'Material': 'Pb', 'Size': 3, 'GOAL': 0.04}, - {'Sample': 'S5', 'Mass': 24, 'Temp': 100, 'Material': 'Pb', 'Size': 4, 'GOAL': 0.04}, - {'Sample': 'S6', 'Mass': 36, 'Temp': 26, 'Material': 'Pb', 'Size': 6, 'GOAL': 0.05}, -] - -def r_example(Alt, Bar, Fri, Hun, Pat, Price, Rain, Res, Type, Est, GOAL): - return {'Alt': Alt, 'Bar': Bar, 'Fri': Fri, 'Hun': Hun, 'Pat': Pat, - 'Price': Price, 'Rain': Rain, 'Res': Res, 'Type': Type, 'Est': Est, - 'GOAL': GOAL} - -restaurant = [ - r_example('Yes', 'No', 'No', 'Yes', 'Some', '$$$', 'No', 'Yes', 'French', '0-10', True), - r_example('Yes', 'No', 'No', 'Yes', 'Full', '$', 'No', 'No', 'Thai', '30-60', False), - r_example('No', 'Yes', 'No', 'No', 'Some', '$', 'No', 'No', 'Burger', '0-10', True), - r_example('Yes', 'No', 'Yes', 'Yes', 'Full', '$', 'Yes', 'No', 'Thai', '10-30', True), - r_example('Yes', 'No', 'Yes', 'No', 'Full', '$$$', 'No', 'Yes', 'French', '>60', False), - r_example('No', 'Yes', 'No', 'Yes', 'Some', '$$', 'Yes', 'Yes', 'Italian', '0-10', True), - r_example('No', 'Yes', 'No', 'No', 'None', '$', 'Yes', 'No', 'Burger', '0-10', False), - r_example('No', 'No', 'No', 'Yes', 'Some', '$$', 'Yes', 'Yes', 'Thai', '0-10', True), - r_example('No', 'Yes', 'Yes', 'No', 'Full', '$', 'Yes', 'No', 'Burger', '>60', False), - r_example('Yes', 'Yes', 'Yes', 'Yes', 'Full', '$$$', 'No', 'Yes', 'Italian', '10-30', False), - r_example('No', 'No', 'No', 'No', 'None', '$', 'No', 'No', 'Thai', '0-10', False), - r_example('Yes', 'Yes', 'Yes', 'Yes', 'Full', '$', 'No', 'No', 'Burger', '30-60', True) -] - -""" -A H -|\ /| -| \ / | -v v v v -B D-->E-->G-->I -| / | -| / | -vv v -C F -""" -test_network = FOIL_container([expr("Conn(A, B)"), - expr("Conn(A ,D)"), - expr("Conn(B, C)"), - expr("Conn(D, C)"), - expr("Conn(D, E)"), - expr("Conn(E ,F)"), - expr("Conn(E, G)"), - expr("Conn(G, I)"), - expr("Conn(H, G)"), - expr("Conn(H, I)")]) - -small_family = FOIL_container([expr("Mother(Anne, Peter)"), - expr("Mother(Anne, Zara)"), - expr("Mother(Sarah, Beatrice)"), - expr("Mother(Sarah, Eugenie)"), - expr("Father(Mark, Peter)"), - expr("Father(Mark, Zara)"), - expr("Father(Andrew, Beatrice)"), - expr("Father(Andrew, Eugenie)"), - expr("Father(Philip, Anne)"), - expr("Father(Philip, Andrew)"), - expr("Mother(Elizabeth, Anne)"), - expr("Mother(Elizabeth, Andrew)"), - expr("Male(Philip)"), - expr("Male(Mark)"), - expr("Male(Andrew)"), - expr("Male(Peter)"), - expr("Female(Elizabeth)"), - expr("Female(Anne)"), - expr("Female(Sarah)"), - expr("Female(Zara)"), - expr("Female(Beatrice)"), - expr("Female(Eugenie)"), -]) - -A, B, C, D, E, F, G, H, I, x, y, z = map(expr, 'ABCDEFGHIxyz') + {x: expr('Beatrice'), y: expr('Philip')}, + {x: expr('Peter'), y: expr('Andrew')}, + {x: expr('Zara'), y: expr('Mark')}, + {x: expr('Peter'), y: expr('Anne')}, + {x: expr('Zara'), y: expr('Eugenie')}] + + clauses = small_family.foil([examples_pos_g, examples_neg_g], target_g) + assert len(clauses[0]) == 2 + assert clauses[0][1][0].op == 'Parent' + assert clauses[0][1][0].args[0] == x + assert clauses[0][1][1].op == 'Parent' + assert clauses[0][1][1].args[1] == y