From ccec7be03504a5dcb7d109fe4c67ef226d114096 Mon Sep 17 00:00:00 2001 From: Danny Hermes Date: Wed, 22 Oct 2014 00:15:43 -0700 Subject: [PATCH] Adding support for queries in regression test. This is an attempt port the gcloud-node regression tests for queries over to gcloud-python. This surfaced some API differences (i.e. issue #280) and some missing features (projection, offset and group_by). In addition, __eq__ was implemented on datastore.key.Key to allow for easy comparison within tests. --- CONTRIBUTING.rst | 30 ++++ regression/data/index.yaml | 11 ++ regression/datastore.py | 299 ++++++++++++++++++++++++++++++++++--- 3 files changed, 321 insertions(+), 19 deletions(-) create mode 100644 regression/data/index.yaml diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst index c362cc2e62a6a..799beef44b083 100644 --- a/CONTRIBUTING.rst +++ b/CONTRIBUTING.rst @@ -141,6 +141,10 @@ Running Regression Tests $ python regression/run_regression.py --package {package} + This alone will not run the tests. You'll need to change some local + auth settings and change some configuration in your project to + run all the tests. + - Regression tests will be run against an actual project and so you'll need to provide some environment variables to facilitate authentication to your project: @@ -149,6 +153,9 @@ Running Regression Tests - ``GCLOUD_TESTS_CLIENT_EMAIL``: The email for the service account you're authenticating with - ``GCLOUD_TESTS_KEY_FILE``: The path to an encrypted key file. + See private key + `docs `__ + for explanation on how to get a private key. - Examples of these can be found in ``regression/local_test_setup.sample``. We recommend copying this to ``regression/local_test_setup``, editing the values @@ -160,6 +167,29 @@ Running Regression Tests absolute) on your system where the key file for your service account can be found. +- For datastore tests, you'll need to create composite + `indexes `__ + with the ``gcloud`` command line + `tool `__:: + + # Install the app (App Engine Command Line Interface) component. + $ gcloud components update app + + # See https://cloud.google.com/sdk/crypto for details on PyOpenSSL and + # http://stackoverflow.com/a/25067729/1068170 for why we must persist. + $ export CLOUDSDK_PYTHON_SITEPACKAGES=1 + + # Authenticate the gcloud tool with your account. + $ gcloud auth activate-service-account $GCLOUD_TESTS_CLIENT_EMAIL \ + > --key-file=$GCLOUD_TESTS_KEY_FILE + + # Create the indexes + $ gcloud preview datastore create-indexes regression/data/ \ + > --project=$GCLOUD_TESTS_DATASET_ID + + # Restore your environment to its previous state. + $ unset CLOUDSDK_PYTHON_SITEPACKAGES + Test Coverage ------------- diff --git a/regression/data/index.yaml b/regression/data/index.yaml new file mode 100644 index 0000000000000..f46c999cdd683 --- /dev/null +++ b/regression/data/index.yaml @@ -0,0 +1,11 @@ +indexes: + +- kind: Character + properties: + - name: family + - name: appearances + +- kind: Character + properties: + - name: name + - name: family diff --git a/regression/datastore.py b/regression/datastore.py index 46f6b43bbdddb..b9450ef2be005 100644 --- a/regression/datastore.py +++ b/regression/datastore.py @@ -10,24 +10,39 @@ class TestDatastore(unittest2.TestCase): - def setUp(self): + @classmethod + def setUpClass(cls): environ = regression_utils.get_environ() - self._dataset_id = environ['dataset_id'] - self._client_email = environ['client_email'] - self._key_filename = environ['key_filename'] - self._datasets = {} + cls._dataset_id = environ['dataset_id'] + cls._client_email = environ['client_email'] + cls._key_filename = environ['key_filename'] + cls._datasets = {} + + cls.suite_entities_to_delete = [] + + @classmethod + def tearDownClass(cls): + with cls._get_dataset().transaction(): + for entity in cls.suite_entities_to_delete: + entity.delete() - self.entities_to_delete = [] + @classmethod + def _get_dataset(cls): + if cls._dataset_id not in cls._datasets: + cls._datasets[cls._dataset_id] = datastore.get_dataset( + cls._dataset_id, cls._client_email, cls._key_filename) + return cls._datasets[cls._dataset_id] + + def setUp(self): + self.case_entities_to_delete = [] def tearDown(self): - for entity in self.entities_to_delete: - entity.delete() + with self._get_dataset().transaction(): + for entity in self.case_entities_to_delete: + entity.delete() + - def _get_dataset(self): - if self._dataset_id not in self._datasets: - self._datasets[self._dataset_id] = datastore.get_dataset( - self._dataset_id, self._client_email, self._key_filename) - return self._datasets[self._dataset_id] +class TestDatastoreSave(TestDatastore): def _get_post(self, name=None, key_id=None, post_content=None): post_content = post_content or { @@ -60,7 +75,7 @@ def _generic_test_post(self, name=None, key_id=None): entity.save() # Register entity to be deleted. - self.entities_to_delete.append(entity) + self.case_entities_to_delete.append(entity) if name is not None: self.assertEqual(entity.key().name(), name) @@ -68,8 +83,8 @@ def _generic_test_post(self, name=None, key_id=None): self.assertEqual(entity.key().id(), key_id) retrieved_entity = self._get_dataset().get_entity(entity.key()) # Check the keys are the same. - self.assertEqual(retrieved_entity.key().path(), - entity.key().path()) + self.assertEqual(retrieved_entity.key(), entity.key()) + # Check the data is the same. retrieved_dict = dict(retrieved_entity.items()) entity_dict = dict(entity.items()) @@ -90,7 +105,7 @@ def test_save_multiple(self): entity1 = self._get_post() entity1.save() # Register entity to be deleted. - self.entities_to_delete.append(entity1) + self.case_entities_to_delete.append(entity1) second_post_content = { 'title': 'How to make the perfect homemade pasta', @@ -104,12 +119,258 @@ def test_save_multiple(self): entity2 = self._get_post(post_content=second_post_content) entity2.save() # Register entity to be deleted. - self.entities_to_delete.append(entity2) + self.case_entities_to_delete.append(entity2) keys = [entity1.key(), entity2.key()] matches = dataset.get_entities(keys) self.assertEqual(len(matches), 2) def test_empty_kind(self): - posts = self._get_dataset().query().kind('Post').limit(2).fetch() + posts = self._get_dataset().query(kind='Post').limit(2).fetch() self.assertEqual(posts, []) + + +class TestDatastoreQuery(TestDatastore): + + KEY_PATHS = [ + [{'kind': 'Character', 'name': 'Rickard'}], + [{'kind': 'Character', 'name': 'Rickard'}, + {'kind': 'Character', 'name': 'Eddard'}], + [{'kind': 'Character', 'name': 'Catelyn'}], + [{'kind': 'Character', 'name': 'Eddard'}, + {'kind': 'Character', 'name': 'Arya'}], + [{'kind': 'Character', 'name': 'Eddard'}, + {'kind': 'Character', 'name': 'Sansa'}], + [{'kind': 'Character', 'name': 'Eddard'}, + {'kind': 'Character', 'name': 'Robb'}], + [{'kind': 'Character', 'name': 'Eddard'}, + {'kind': 'Character', 'name': 'Bran'}], + [{'kind': 'Character', 'name': 'Eddard'}, + {'kind': 'Character', 'name': 'Jon Snow'}], + ] + CHARACTERS = [ + { + 'name': 'Rickard', + 'family': 'Stark', + 'appearances': 0, + 'alive': False, + }, { + 'name': 'Eddard', + 'family': 'Stark', + 'appearances': 9, + 'alive': False, + }, { + 'name': 'Catelyn', + 'family': ['Stark', 'Tully'], + 'appearances': 26, + 'alive': False, + }, { + 'name': 'Arya', + 'family': 'Stark', + 'appearances': 33, + 'alive': True, + }, { + 'name': 'Sansa', + 'family': 'Stark', + 'appearances': 31, + 'alive': True, + }, { + 'name': 'Robb', + 'family': 'Stark', + 'appearances': 22, + 'alive': False, + }, { + 'name': 'Bran', + 'family': 'Stark', + 'appearances': 25, + 'alive': True, + }, { + 'name': 'Jon Snow', + 'family': 'Stark', + 'appearances': 32, + 'alive': True, + }, + ] + + @classmethod + def setUpClass(cls): + super(TestDatastoreQuery, cls).setUpClass() + dataset = cls._get_dataset() + cls.KEYS = [datastore.key.Key(dataset=dataset, path=key_path) + for key_path in cls.KEY_PATHS] + + with dataset.transaction(): + for key, character in zip(cls.KEYS, cls.CHARACTERS): + entity = datastore.entity.Entity(dataset=dataset).key(key) + entity.update(character) + entity.save() + # Register entity to be deleted. + cls.suite_entities_to_delete.append(entity) + + def test_keys(self): + self.assertEqual(len(self.KEY_PATHS), len(self.CHARACTERS)) + for key_path, character in zip(self.KEY_PATHS, self.CHARACTERS): + self.assertEqual(key_path[-1]['name'], character['name']) + + def test_limit_queries(self): + dataset = self._get_dataset() + limit = 5 + query = dataset.query(kind='Character').limit(limit) + # Verify there is not cursor before fetch(). + self.assertRaises(RuntimeError, query.cursor) + + # Fetch characters. + character_entities = query.fetch() + self.assertEqual(len(character_entities), limit) + + # Check cursor after fetch. + cursor = query.cursor() + self.assertTrue(cursor is not None) + + # Fetch next batch of characters. + new_query = dataset.query(kind='Character').with_cursor(cursor) + new_character_entities = new_query.fetch() + characters_remaining = len(self.CHARACTERS) - limit + self.assertEqual(len(new_character_entities), characters_remaining) + + def test_query_simple_filter(self): + query = self._get_dataset().query(kind='Character') + query = query.filter('appearances >=', 20) + expected_matches = 6 + # We expect 6, but allow the query to get 1 extra. + entities = query.fetch(limit=expected_matches + 1) + self.assertEqual(len(entities), expected_matches) + + def test_query_multiple_filters(self): + query = self._get_dataset().query(kind='Character') + query = query.filter('appearances >=', 26).filter('family =', 'Stark') + expected_matches = 4 + # We expect 4, but allow the query to get 1 extra. + entities = query.fetch(limit=expected_matches + 1) + self.assertEqual(len(entities), expected_matches) + + def test_ancestor_query(self): + query = self._get_dataset().query('Character') + filtered_query = query.ancestor(['Character', 'Eddard']) + + expected_matches = 5 + # We expect 5, but allow the query to get 1 extra. + entities = filtered_query.fetch(limit=expected_matches + 1) + self.assertEqual(len(entities), expected_matches) + + def test_query___key___filter(self): + dataset = self._get_dataset() + rickard_key = datastore.key.Key( + dataset=dataset, path=[{'kind': 'Character', 'name': 'Rickard'}]) + + query = dataset.query('Character').filter('__key__ =', rickard_key) + expected_matches = 1 + # We expect 1, but allow the query to get 1 extra. + entities = query.fetch(limit=expected_matches + 1) + self.assertEqual(len(entities), expected_matches) + + def test_ordered_query(self): + query = self._get_dataset().query('Character').order('appearances') + expected_matches = 8 + # We expect 8, but allow the query to get 1 extra. + entities = query.fetch(limit=expected_matches + 1) + self.assertEqual(len(entities), expected_matches) + + # Actually check the ordered data returned. + self.assertEqual(entities[0]['name'], self.CHARACTERS[0]['name']) + self.assertEqual(entities[7]['name'], self.CHARACTERS[3]['name']) + + def test_projection_query(self): + query = self._get_dataset().query('Character') + filtered_query = query.projection(['name', 'family']) + + # NOTE: There are 9 responses because of Catelyn. She has both + # Stark and Tully as her families, hence occurs twice in + # the results. + expected_matches = 9 + # We expect 9, but allow the query to get 1 extra. + entities = filtered_query.fetch(limit=expected_matches + 1) + self.assertEqual(len(entities), expected_matches) + + arya_entity = entities[0] + arya_dict = dict(arya_entity.items()) + self.assertEqual(arya_dict, {'name': 'Arya', 'family': 'Stark'}) + + catelyn_stark_entity = entities[2] + catelyn_stark_dict = dict(catelyn_stark_entity.items()) + self.assertEqual(catelyn_stark_dict, + {'name': 'Catelyn', 'family': 'Stark'}) + + catelyn_tully_entity = entities[3] + catelyn_tully_dict = dict(catelyn_tully_entity.items()) + self.assertEqual(catelyn_tully_dict, + {'name': 'Catelyn', 'family': 'Tully'}) + + # Check both Catelyn keys are the same. + catelyn_stark_key = catelyn_stark_entity.key() + catelyn_tully_key = catelyn_tully_entity.key() + self.assertEqual(catelyn_stark_key, catelyn_tully_key) + + sansa_entity = entities[8] + sansa_dict = dict(sansa_entity.items()) + self.assertEqual(sansa_dict, {'name': 'Sansa', 'family': 'Stark'}) + + def test_query_paginate_with_offset(self): + query = self._get_dataset().query('Character') + offset = 2 + limit = 3 + page_query = query.offset(offset).limit(limit).order('appearances') + # Make sure no query set before fetch. + self.assertRaises(RuntimeError, page_query.cursor) + + # Fetch characters. + entities = page_query.fetch() + self.assertEqual(len(entities), limit) + self.assertEqual(entities[0]['name'], 'Robb') + self.assertEqual(entities[1]['name'], 'Bran') + self.assertEqual(entities[2]['name'], 'Catelyn') + + # Use cursor to begin next query. + cursor = page_query.cursor() + next_query = page_query.with_cursor(cursor).offset(0) + self.assertEqual(next_query.limit(), limit) + # Fetch next set of characters. + entities = next_query.fetch() + self.assertEqual(len(entities), limit) + self.assertEqual(entities[0]['name'], 'Sansa') + self.assertEqual(entities[1]['name'], 'Jon Snow') + self.assertEqual(entities[2]['name'], 'Arya') + + def test_query_paginate_with_start_cursor(self): + query = self._get_dataset().query('Character') + offset = 2 + limit = 2 + page_query = query.offset(offset).limit(limit).order('appearances') + # Make sure no query set before fetch. + self.assertRaises(RuntimeError, page_query.cursor) + + # Fetch characters. + entities = page_query.fetch() + self.assertEqual(len(entities), limit) + + # Use cursor to create a fresh query. + cursor = page_query.cursor() + fresh_query = self._get_dataset().query('Character') + fresh_query = fresh_query.order('appearances').with_cursor(cursor) + + new_entities = fresh_query.fetch() + characters_remaining = len(self.CHARACTERS) - limit - offset + self.assertEqual(len(new_entities), characters_remaining) + self.assertEqual(new_entities[0]['name'], 'Catelyn') + self.assertEqual(new_entities[3]['name'], 'Arya') + + def test_query_group_by(self): + query = self._get_dataset().query('Character').group_by(['alive']) + + expected_matches = 2 + # We expect 2, but allow the query to get 1 extra. + entities = query.fetch(limit=expected_matches + 1) + self.assertEqual(len(entities), expected_matches) + + self.assertEqual(entities[0]['name'], 'Catelyn') + self.assertEqual(entities[1]['name'], 'Arya')