Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Enabling tests for python 3, fixing all python 3 incompatibilities. #74

Merged
merged 1 commit into from
Sep 2, 2015
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -6,3 +6,4 @@ coverage-gae.json
coverage.xml
nosetests.xml
python-docs-samples.json
__pycache__
16 changes: 16 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,9 @@ For more detailed introduction to a product, check the README in the correspondi

## Testing

The tests in this repository run against live services, therefore, it takes a bit
of configuration to run all of the tests locally.

### Local setup

Before you can run tests locally you must have:
Expand All @@ -26,7 +29,9 @@ Before you can run tests locally you must have:
$ curl https://sdk.cloud.google.com | bash

* Most tests require you to have an active, billing-enabled project on the [Google Developers Console](https://console.developers.google.com).

* You will need a set of [Service Account Credentials](https://console.developers.google.com/project/_/apiui/credential) for your project in ``json`` form.

* Set the environment variables appropriately for your project.

$ export GOOGLE_APPLICATION_CREDENTIALS=your-service-account-json-file
Expand All @@ -43,6 +48,17 @@ If you want to run the Google App Engine tests, you will need:

$ export GAE_PYTHONPATH=~/google-cloud-sdk/platform/google_appengine

To run the bigquery tests, you'll need to create a bigquery dataset:

* Create a dataset in your project named `test_dataset`.
* Create a table named `test_table2`, upload ``tests/resources/data.csv`` and give it the following schema:

Name STRING
Age INTEGER
Weight FLOAT
IsMagic BOOLEAN


### Test environments

We use [tox](https://tox.readthedocs.org/en/latest/) to configure multiple python environments:
Expand Down
13 changes: 6 additions & 7 deletions bigquery/samples/async_query.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,14 +11,13 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function # For python 2/3 interoperability

import json
import uuid

from bigquery.samples.utils import get_service
from bigquery.samples.utils import paging
from bigquery.samples.utils import poll_job
from six.moves import input


# [START async_query]
Expand Down Expand Up @@ -70,13 +69,13 @@ def run(project_id, query_string, batch, num_retries, interval):

# [START main]
def main():
project_id = raw_input("Enter the project ID: ")
query_string = raw_input("Enter the Bigquery SQL Query: ")
batch = raw_input("Run query as batch (y/n)?: ") in (
project_id = input("Enter the project ID: ")
query_string = input("Enter the Bigquery SQL Query: ")
batch = input("Run query as batch (y/n)?: ") in (
'True', 'true', 'y', 'Y', 'yes', 'Yes')
num_retries = int(raw_input(
num_retries = int(input(
"Enter number of times to retry in case of 500 error: "))
interval = raw_input(
interval = input(
"Enter how often to poll the query for completion (seconds): ")

for result in run(project_id, query_string, batch, num_retries, interval):
Expand Down
15 changes: 8 additions & 7 deletions bigquery/samples/export_data_to_cloud_storage.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@

from bigquery.samples.utils import get_service
from bigquery.samples.utils import poll_job
from six.moves import input


# [START export_table]
Expand Down Expand Up @@ -82,19 +83,19 @@ def run(cloud_storage_path,

# [START main]
def main():
projectId = raw_input("Enter the project ID: ")
datasetId = raw_input("Enter a dataset ID: ")
tableId = raw_input("Enter a table name to copy: ")
cloud_storage_path = raw_input(
projectId = input("Enter the project ID: ")
datasetId = input("Enter a dataset ID: ")
tableId = input("Enter a table name to copy: ")
cloud_storage_path = input(
"Enter a Google Cloud Storage URI: ")
interval = raw_input(
interval = input(
"Enter how often to poll the job (in seconds): ")
num_retries = raw_input(
num_retries = input(
"Enter the number of retries in case of 500 error: ")

run(cloud_storage_path,
projectId, datasetId, tableId,
num_retries, interval)

print 'Done exporting!'
print('Done exporting!')
# [END main]
7 changes: 3 additions & 4 deletions bigquery/samples/list_datasets_projects.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,19 +31,18 @@
where <project-id> is the id of the developers console [3] project you'd like
to list the bigquery datasets and projects for.

[1] https://developers.google.com/identity/protocols/application-default-credentials#howtheywork
[1] https://developers.google.com/identity/protocols/\
application-default-credentials#howtheywork
[2] https://cloud.google.com/sdk/
[3] https://console.developers.google.com
""" # NOQA

import argparse
from pprint import pprint

from urllib2 import HTTPError

from apiclient import discovery

from oauth2client.client import GoogleCredentials
from six.moves.urllib.error import HTTPError


# [START list_datasets]
Expand Down
13 changes: 6 additions & 7 deletions bigquery/samples/load_data_by_post.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,10 +14,9 @@
import json

from bigquery.samples.utils import get_service, poll_job

import httplib2

from oauth2client.client import GoogleCredentials
from six.moves import input


# [START make_post]
Expand Down Expand Up @@ -75,16 +74,16 @@ def make_post(http, schema, data, projectId, datasetId, tableId):
def main():
credentials = GoogleCredentials.get_application_default()
http = credentials.authorize(httplib2.Http())
projectId = raw_input('Enter the project ID: ')
datasetId = raw_input('Enter a dataset ID: ')
tableId = raw_input('Enter a table name to load the data to: ')
schema_path = raw_input(
projectId = input('Enter the project ID: ')
datasetId = input('Enter a dataset ID: ')
tableId = input('Enter a table name to load the data to: ')
schema_path = input(
'Enter the path to the schema file for the table: ')

with open(schema_path, 'r') as schema_file:
schema = schema_file.read()

data_path = raw_input('Enter the path to the data file: ')
data_path = input('Enter the path to the data file: ')

with open(data_path, 'r') as data_file:
data = data_file.read()
Expand Down
15 changes: 8 additions & 7 deletions bigquery/samples/load_data_from_csv.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@
import uuid

from bigquery.samples.utils import get_service, poll_job
from six.moves import input


# [START load_table]
Expand Down Expand Up @@ -81,20 +82,20 @@ def run(source_schema, source_csv,

# [START main]
def main():
projectId = raw_input("Enter the project ID: ")
datasetId = raw_input("Enter a dataset ID: ")
tableId = raw_input("Enter a destination table name: ")
projectId = input("Enter the project ID: ")
datasetId = input("Enter a dataset ID: ")
tableId = input("Enter a destination table name: ")

schema_file_path = raw_input(
schema_file_path = input(
"Enter the path to the table schema: ")
with open(schema_file_path, 'r') as schema_file:
schema = json.load(schema_file)

data_file_path = raw_input(
data_file_path = input(
"Enter the Cloud Storage path for the CSV file: ")
num_retries = raw_input(
num_retries = input(
"Enter number of times to retry in case of 500 error: ")
interval = raw_input(
interval = input(
"Enter how often to poll the query for completion (seconds): ")
run(schema,
data_file_path,
Expand Down
17 changes: 8 additions & 9 deletions bigquery/samples/streaming.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,13 +11,12 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function

import ast
import json
import uuid

from bigquery.samples.utils import get_service
from six.moves import input


# [START stream_row_to_bigquery]
Expand Down Expand Up @@ -57,18 +56,18 @@ def run(project_id, dataset_id, table_id, rows, num_retries):

# [START main]
def get_rows():
line = raw_input("Enter a row (python dict) into the table: ")
line = input("Enter a row (python dict) into the table: ")
while line:
yield ast.literal_eval(line)
line = raw_input("Enter another row into the table \n" +
"[hit enter to stop]: ")
line = input("Enter another row into the table \n" +
"[hit enter to stop]: ")


def main():
project_id = raw_input("Enter the project ID: ")
dataset_id = raw_input("Enter a dataset ID: ")
table_id = raw_input("Enter a table ID : ")
num_retries = int(raw_input(
project_id = input("Enter the project ID: ")
dataset_id = input("Enter a dataset ID: ")
table_id = input("Enter a table ID : ")
num_retries = int(input(
"Enter number of times to retry in case of 500 error: "))

for result in run(project_id, dataset_id, table_id,
Expand Down
11 changes: 5 additions & 6 deletions bigquery/samples/sync_query.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,11 +11,10 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function # For python 2/3 interoperability

import json

from bigquery.samples.utils import get_service, paging
from six.moves import input


# [START sync_query]
Expand Down Expand Up @@ -49,12 +48,12 @@ def run(project_id, query, timeout, num_retries):

# [START main]
def main():
project_id = raw_input("Enter the project ID: ")
query_string = raw_input("Enter the Bigquery SQL Query: ")
timeout = raw_input(
project_id = input("Enter the project ID: ")
query_string = input("Enter the Bigquery SQL Query: ")
timeout = input(
"Enter how long to wait for the query to complete in milliseconds"
"\n (if longer than 10 seconds, use an asynchronous query): ")
num_retries = int(raw_input(
num_retries = int(input(
"Enter how many times to retry in case of server error"))

for result in run(project_id, query_string, timeout, num_retries):
Expand Down
4 changes: 3 additions & 1 deletion bigquery/tests/test_async_query.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,9 @@ def test_async_query_runner(self):
test_project_id = os.environ.get(tests.PROJECT_ID_ENV)
answers = [test_project_id, self.constants['query'], 'n',
'1', '1']
with tests.mock_raw_input(answers):

with tests.mock_input_answers(
answers, target='bigquery.samples.async_query.input'):
main()


Expand Down
9 changes: 7 additions & 2 deletions compute/autoscaler/demo/frontend.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,10 +22,15 @@
autoscaler/demo/tests/test_frontend.py
"""

import BaseHTTPServer
try:
import BaseHTTPServer
import SocketServer
except:
import http.server as BaseHTTPServer
import socketserver as SocketServer

from multiprocessing import Process
import os
import SocketServer
import sys
import time

Expand Down
15 changes: 8 additions & 7 deletions monitoring/samples/auth.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,8 @@

$ export GOOGLE_APPLICATION_CREDENTIALS=/path/to/json-key.json

[1] https://developers.google.com/identity/protocols/application-default-credentials
[1] https://developers.google.com/identity/protocols/\
application-default-credentials
[2] https://console.developers.google.com/project/_/apiui/credential
""" # NOQA

Expand All @@ -62,19 +63,19 @@ def ListTimeseries(project_name, service):

timeseries = service.timeseries()

print 'Timeseries.list raw response:'
print('Timeseries.list raw response:')
try:
response = timeseries.list(
project=project_name, metric=METRIC, youngest=YOUNGEST).execute()

print json.dumps(response,
print(json.dumps(response,
sort_keys=True,
indent=4,
separators=(',', ': '))
separators=(',', ': ')))
except:
print 'Error:'
print('Error:')
for error in sys.exc_info():
print error
print(error)


def main(project_name):
Expand All @@ -87,7 +88,7 @@ def main(project_name):

if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: %s <project-name>" % sys.argv[0]
print("Usage: {} <project-name>".format(sys.argv[0]))
sys.exit(1)
main(sys.argv[1])
# [END all]
12 changes: 6 additions & 6 deletions storage/compose_objects.py
Original file line number Diff line number Diff line change
Expand Up @@ -73,8 +73,8 @@ def main(argv):
name=filename,
bucket=args.bucket)
resp = req.execute()
print '> Uploaded source file %s' % filename
print json.dumps(resp, indent=2)
print('> Uploaded source file {}'.format(filename))
print(json.dumps(resp, indent=2))

# Construct a request to compose the source files into the destination.
compose_req_body = {
Expand All @@ -88,17 +88,17 @@ def main(argv):
destinationObject=args.destination,
body=compose_req_body)
resp = req.execute()
print '> Composed files into %s' % args.destination
print json.dumps(resp, indent=2)
print('> Composed files into {}'.format(args.destination))
print(json.dumps(resp, indent=2))

# Download and print the composed object.
req = service.objects().get_media(
bucket=args.bucket,
object=args.destination)

res = req.execute()
print '> Composed file contents:'
print res
print('> Composed file contents:')
print(res)


if __name__ == '__main__':
Expand Down
Loading