Skip to content

Commit

Permalink
Makes entities for query tests persist to avoid flakiness.
Browse files Browse the repository at this point in the history
This came about from conversations with @silvolu and
@pcostell. Unfortunately, even if an entity has been stored
and indexes built for 10+ minutes, the tests

- test_query_simple_filter
- test_query_multiple_filters

are still flaky.
  • Loading branch information
dhermes committed Oct 23, 2014
1 parent 5259631 commit 972a796
Show file tree
Hide file tree
Showing 4 changed files with 127 additions and 118 deletions.
5 changes: 5 additions & 0 deletions CONTRIBUTING.rst
Original file line number Diff line number Diff line change
Expand Up @@ -190,6 +190,11 @@ Running Regression Tests
# Restore your environment to its previous state.
$ unset CLOUDSDK_PYTHON_SITEPACKAGES

- For datastore query tests, you'll need stored data in your dataset.
To populate this data, run::

$ python regression/populate_datastore.py

Test Coverage
-------------

Expand Down
140 changes: 22 additions & 118 deletions regression/datastore.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,39 +5,21 @@
from gcloud import datastore
# This assumes the command is being run via tox hence the
# repository root is the current directory.
from regression.populate_datastore import CHARACTERS
from regression import regression_utils


class TestDatastore(unittest2.TestCase):

@classmethod
def setUpClass(cls):
environ = regression_utils.get_environ()
cls._dataset_id = environ['dataset_id']
cls._client_email = environ['client_email']
cls._key_filename = environ['key_filename']
cls._datasets = {}

cls.suite_entities_to_delete = []

@classmethod
def tearDownClass(cls):
with cls._get_dataset().transaction():
for entity in cls.suite_entities_to_delete:
entity.delete()

@classmethod
def _get_dataset(cls):
if cls._dataset_id not in cls._datasets:
cls._datasets[cls._dataset_id] = datastore.get_dataset(
cls._dataset_id, cls._client_email, cls._key_filename)
return cls._datasets[cls._dataset_id]
cls.dataset = regression_utils.get_dataset()

def setUp(self):
self.case_entities_to_delete = []

def tearDown(self):
with self._get_dataset().transaction():
with self.dataset.transaction():
for entity in self.case_entities_to_delete:
entity.delete()

Expand All @@ -55,8 +37,7 @@ def _get_post(self, name=None, key_id=None, post_content=None):
'rating': 5.0,
}
# Create an entity with the given content in our dataset.
dataset = self._get_dataset()
entity = dataset.entity(kind='Post')
entity = self.dataset.entity(kind='Post')
entity.update(post_content)

# Update the entity key.
Expand All @@ -81,7 +62,7 @@ def _generic_test_post(self, name=None, key_id=None):
self.assertEqual(entity.key().name(), name)
if key_id is not None:
self.assertEqual(entity.key().id(), key_id)
retrieved_entity = self._get_dataset().get_entity(entity.key())
retrieved_entity = self.dataset.get_entity(entity.key())
# Check the keys are the same.
self.assertEqual(retrieved_entity.key(), entity.key())

Expand All @@ -100,8 +81,7 @@ def test_post_with_generated_id(self):
self._generic_test_post()

def test_save_multiple(self):
dataset = self._get_dataset()
with dataset.transaction():
with self.dataset.transaction():
entity1 = self._get_post()
entity1.save()
# Register entity to be deleted.
Expand All @@ -122,100 +102,24 @@ def test_save_multiple(self):
self.case_entities_to_delete.append(entity2)

keys = [entity1.key(), entity2.key()]
matches = dataset.get_entities(keys)
matches = self.dataset.get_entities(keys)
self.assertEqual(len(matches), 2)

def test_empty_kind(self):
posts = self._get_dataset().query(kind='Post').limit(2).fetch()
posts = self.dataset.query('Post').limit(2).fetch()
self.assertEqual(posts, [])


class TestDatastoreQuery(TestDatastore):

KEY_PATHS = [
[{'kind': 'Character', 'name': 'Rickard'}],
[{'kind': 'Character', 'name': 'Rickard'},
{'kind': 'Character', 'name': 'Eddard'}],
[{'kind': 'Character', 'name': 'Catelyn'}],
[{'kind': 'Character', 'name': 'Eddard'},
{'kind': 'Character', 'name': 'Arya'}],
[{'kind': 'Character', 'name': 'Eddard'},
{'kind': 'Character', 'name': 'Sansa'}],
[{'kind': 'Character', 'name': 'Eddard'},
{'kind': 'Character', 'name': 'Robb'}],
[{'kind': 'Character', 'name': 'Eddard'},
{'kind': 'Character', 'name': 'Bran'}],
[{'kind': 'Character', 'name': 'Eddard'},
{'kind': 'Character', 'name': 'Jon Snow'}],
]
CHARACTERS = [
{
'name': 'Rickard',
'family': 'Stark',
'appearances': 0,
'alive': False,
}, {
'name': 'Eddard',
'family': 'Stark',
'appearances': 9,
'alive': False,
}, {
'name': 'Catelyn',
'family': ['Stark', 'Tully'],
'appearances': 26,
'alive': False,
}, {
'name': 'Arya',
'family': 'Stark',
'appearances': 33,
'alive': True,
}, {
'name': 'Sansa',
'family': 'Stark',
'appearances': 31,
'alive': True,
}, {
'name': 'Robb',
'family': 'Stark',
'appearances': 22,
'alive': False,
}, {
'name': 'Bran',
'family': 'Stark',
'appearances': 25,
'alive': True,
}, {
'name': 'Jon Snow',
'family': 'Stark',
'appearances': 32,
'alive': True,
},
]

@classmethod
def setUpClass(cls):
super(TestDatastoreQuery, cls).setUpClass()
dataset = cls._get_dataset()
cls.KEYS = [datastore.key.Key(path=key_path)
for key_path in cls.KEY_PATHS]

with dataset.transaction():
for key, character in zip(cls.KEYS, cls.CHARACTERS):
entity = datastore.entity.Entity(dataset=dataset).key(key)
entity.update(character)
entity.save()
# Register entity to be deleted.
cls.suite_entities_to_delete.append(entity)

def test_keys(self):
self.assertEqual(len(self.KEY_PATHS), len(self.CHARACTERS))
for key_path, character in zip(self.KEY_PATHS, self.CHARACTERS):
self.assertEqual(key_path[-1]['name'], character['name'])
cls.CHARACTERS = CHARACTERS

def test_limit_queries(self):
dataset = self._get_dataset()
limit = 5
query = dataset.query(kind='Character').limit(limit)
query = self.dataset.query('Character').limit(limit)
# Verify there is not cursor before fetch().
self.assertRaises(RuntimeError, query.cursor)

Expand All @@ -228,29 +132,29 @@ def test_limit_queries(self):
self.assertTrue(cursor is not None)

# Fetch next batch of characters.
new_query = dataset.query(kind='Character').with_cursor(cursor)
new_query = self.dataset.query('Character').with_cursor(cursor)
new_character_entities = new_query.fetch()
characters_remaining = len(self.CHARACTERS) - limit
self.assertEqual(len(new_character_entities), characters_remaining)

def test_query_simple_filter(self):
query = self._get_dataset().query(kind='Character')
query = self.dataset.query('Character')
query = query.filter('appearances >=', 20)
expected_matches = 6
# We expect 6, but allow the query to get 1 extra.
entities = query.fetch(limit=expected_matches + 1)
self.assertEqual(len(entities), expected_matches)

def test_query_multiple_filters(self):
query = self._get_dataset().query(kind='Character')
query = self.dataset.query('Character')
query = query.filter('appearances >=', 26).filter('family =', 'Stark')
expected_matches = 4
# We expect 4, but allow the query to get 1 extra.
entities = query.fetch(limit=expected_matches + 1)
self.assertEqual(len(entities), expected_matches)

def test_ancestor_query(self):
query = self._get_dataset().query('Character')
query = self.dataset.query('Character')
filtered_query = query.ancestor(['Character', 'Eddard'])

expected_matches = 5
Expand All @@ -259,18 +163,18 @@ def test_ancestor_query(self):
self.assertEqual(len(entities), expected_matches)

def test_query___key___filter(self):
dataset = self._get_dataset()
rickard_key = datastore.key.Key(
path=[{'kind': 'Character', 'name': 'Rickard'}])

query = dataset.query('Character').filter('__key__ =', rickard_key)
query = self.dataset.query('Character').filter(
'__key__ =', rickard_key)
expected_matches = 1
# We expect 1, but allow the query to get 1 extra.
entities = query.fetch(limit=expected_matches + 1)
self.assertEqual(len(entities), expected_matches)

def test_ordered_query(self):
query = self._get_dataset().query('Character').order('appearances')
query = self.dataset.query('Character').order('appearances')
expected_matches = 8
# We expect 8, but allow the query to get 1 extra.
entities = query.fetch(limit=expected_matches + 1)
Expand All @@ -281,7 +185,7 @@ def test_ordered_query(self):
self.assertEqual(entities[7]['name'], self.CHARACTERS[3]['name'])

def test_projection_query(self):
query = self._get_dataset().query('Character')
query = self.dataset.query('Character')
filtered_query = query.projection(['name', 'family'])

# NOTE: There are 9 responses because of Catelyn. She has both
Expand Down Expand Up @@ -316,7 +220,7 @@ def test_projection_query(self):
self.assertEqual(sansa_dict, {'name': 'Sansa', 'family': 'Stark'})

def test_query_paginate_with_offset(self):
query = self._get_dataset().query('Character')
query = self.dataset.query('Character')
offset = 2
limit = 3
page_query = query.offset(offset).limit(limit).order('appearances')
Expand All @@ -342,7 +246,7 @@ def test_query_paginate_with_offset(self):
self.assertEqual(entities[2]['name'], 'Arya')

def test_query_paginate_with_start_cursor(self):
query = self._get_dataset().query('Character')
query = self.dataset.query('Character')
offset = 2
limit = 2
page_query = query.offset(offset).limit(limit).order('appearances')
Expand All @@ -355,7 +259,7 @@ def test_query_paginate_with_start_cursor(self):

# Use cursor to create a fresh query.
cursor = page_query.cursor()
fresh_query = self._get_dataset().query('Character')
fresh_query = self.dataset.query('Character')
fresh_query = fresh_query.order('appearances').with_cursor(cursor)

new_entities = fresh_query.fetch()
Expand All @@ -365,7 +269,7 @@ def test_query_paginate_with_start_cursor(self):
self.assertEqual(new_entities[3]['name'], 'Arya')

def test_query_group_by(self):
query = self._get_dataset().query('Character').group_by(['alive'])
query = self.dataset.query('Character').group_by(['alive'])

expected_matches = 2
# We expect 2, but allow the query to get 1 extra.
Expand Down
87 changes: 87 additions & 0 deletions regression/populate_datastore.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,87 @@
"""Script to populate datastore with regression test data."""


from gcloud import datastore
# This assumes the command is being run via tox hence the
# repository root is the current directory.
from regression import regression_utils


KEY_PATHS = [
[{'kind': 'Character', 'name': 'Rickard'}],
[{'kind': 'Character', 'name': 'Rickard'},
{'kind': 'Character', 'name': 'Eddard'}],
[{'kind': 'Character', 'name': 'Catelyn'}],
[{'kind': 'Character', 'name': 'Eddard'},
{'kind': 'Character', 'name': 'Arya'}],
[{'kind': 'Character', 'name': 'Eddard'},
{'kind': 'Character', 'name': 'Sansa'}],
[{'kind': 'Character', 'name': 'Eddard'},
{'kind': 'Character', 'name': 'Robb'}],
[{'kind': 'Character', 'name': 'Eddard'},
{'kind': 'Character', 'name': 'Bran'}],
[{'kind': 'Character', 'name': 'Eddard'},
{'kind': 'Character', 'name': 'Jon Snow'}],
]
CHARACTERS = [
{
'name': 'Rickard',
'family': 'Stark',
'appearances': 0,
'alive': False,
}, {
'name': 'Eddard',
'family': 'Stark',
'appearances': 9,
'alive': False,
}, {
'name': 'Catelyn',
'family': ['Stark', 'Tully'],
'appearances': 26,
'alive': False,
}, {
'name': 'Arya',
'family': 'Stark',
'appearances': 33,
'alive': True,
}, {
'name': 'Sansa',
'family': 'Stark',
'appearances': 31,
'alive': True,
}, {
'name': 'Robb',
'family': 'Stark',
'appearances': 22,
'alive': False,
}, {
'name': 'Bran',
'family': 'Stark',
'appearances': 25,
'alive': True,
}, {
'name': 'Jon Snow',
'family': 'Stark',
'appearances': 32,
'alive': True,
},
]


def add_characters():
dataset = regression_utils.get_dataset()
with dataset.transaction():
for key_path, character in zip(KEY_PATHS, CHARACTERS):
if key_path[-1]['name'] != character['name']:
raise ValueError(('Character and key don\'t agree',
key_path, character))
key = datastore.key.Key(path=key_path)
entity = datastore.entity.Entity(dataset=dataset).key(key)
entity.update(character)
entity.save()
print 'Adding Character %s %s' % (character['name'],
character['family'])


if __name__ == '__main__':
add_characters()
Loading

0 comments on commit 972a796

Please sign in to comment.