-
Notifications
You must be signed in to change notification settings - Fork 4.5k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
approximate sentence match logic adapter and its docs #272
Changes from 3 commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,90 @@ | ||
# -*- coding: utf-8 -*- | ||
# Imports | ||
from .base_match import BaseMatchAdapter | ||
import nltk.corpus | ||
import nltk.tokenize.punkt | ||
import nltk.stem.snowball | ||
from nltk.corpus import wordnet | ||
import string | ||
from chatterbot.adapters import Adapter | ||
|
||
class ApproximateSentenceMatchAdapter(BaseMatchAdapter): | ||
""" | ||
The Jaccard index is composed of a numerator and denominator. | ||
In the numerator, we count the number of items that are shared between the sets. | ||
In the denominator, we count the total number of items across both sets. | ||
Let’s say we define sentences to be equivalent if 50% or more of their tokens are equivalent. Here are two sample sentences: | ||
The young cat is hungry. | ||
The cat is very hungry. | ||
When we parse these sentences to remove stopwords, we end up with the following two sets: | ||
{young, cat, hungry} | ||
{cat, very, hungry} | ||
In our example above, our intersection is {cat, hungry}, which has count of two. | ||
The union of the sets is {young, cat, very, hungry}, which has a count of four. | ||
Therefore, our Jaccard similarity index is two divided by four, or 50%. | ||
Given our threshold above, we would consider this to be a match | ||
""" | ||
|
||
def __init__(self, **kwargs): | ||
super(ClosestMatchAdapter, self).__init__(**kwargs) | ||
# Get default English stopwords and extend with punctuation | ||
self.stopwords = nltk.corpus.stopwords.words('english') | ||
self.stopwords.extend(string.punctuation) | ||
self.stopwords.append('') | ||
self.lemmatizer = nltk.stem.wordnet.WordNetLemmatizer() | ||
|
||
def get_wordnet_pos(self, pos_tag): | ||
if pos_tag[1].startswith('J'): | ||
return (pos_tag[0], wordnet.ADJ) | ||
elif pos_tag[1].startswith('V'): | ||
return (pos_tag[0], wordnet.VERB) | ||
elif pos_tag[1].startswith('N'): | ||
return (pos_tag[0], wordnet.NOUN) | ||
elif pos_tag[1].startswith('R'): | ||
return (pos_tag[0], wordnet.ADV) | ||
else: | ||
return (pos_tag[0], wordnet.NOUN) | ||
|
||
|
||
def is_ci_lemma_stopword_set_match(self,a, b, threshold=0.5): | ||
"""Check if a and b are matches.""" | ||
print("ask",a) | ||
ratio = 0 | ||
pos_a = map(self.get_wordnet_pos, nltk.pos_tag(nltk.tokenize.word_tokenize(a))) | ||
pos_b = map(self.get_wordnet_pos, nltk.pos_tag(nltk.tokenize.word_tokenize(b))) | ||
lemmae_a = [self.lemmatizer.lemmatize(token.lower().strip(string.punctuation), pos) for token, pos in pos_a \ | ||
if pos == wordnet.NOUN and token.lower().strip(string.punctuation) not in self.stopwords] | ||
lemmae_b = [self.lemmatizer.lemmatize(token.lower().strip(string.punctuation), pos) for token, pos in pos_b \ | ||
if pos == wordnet.NOUN and token.lower().strip(string.punctuation) not in self.stopwords] | ||
|
||
# Calculate Jaccard similarity | ||
try: | ||
ratio = len(set(lemmae_a).intersection(lemmae_b)) / float(len(set(lemmae_a).union(lemmae_b))) | ||
except Exception as e: | ||
print("Error", e) | ||
return (ratio >= threshold) | ||
|
||
def get(self, input_statement): | ||
""" | ||
Takes a statement string and a list of statement strings. | ||
Returns the closest matching statement from the list. | ||
""" | ||
statement_list = self.context.storage.get_response_statements() | ||
|
||
if not statement_list: | ||
if self.has_storage_context: | ||
# Use a randomly picked statement | ||
return 0, self.context.storage.get_random() | ||
else: | ||
raise self.EmptyDatasetException() | ||
|
||
confidence = -1 | ||
sentence_match = input_statement | ||
# Find the matching known statement | ||
for statement in statement_list: | ||
ratio = self.is_ci_lemma_stopword_set_match(input_statement.text, statement.text) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I'm not sure if this works. Where is |
||
if ratio: | ||
closest_match = statement | ||
else: | ||
closest_match = statement | ||
return 50, closest_match | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Should this be returning There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Ratio is a Boolean value since our threshold value is 0.5 which is 50 we make 50 :) There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. 0.5 would actually be correct here. It appears I never documented it anywhere but confidence values are always between 0 and 1. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Yup so I put 0.5 over there :) |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
This print statement will have to be removed before this gets merged.