Skip to content

Commit

Permalink
Auto-update dependencies. [(googleapis#1004)](GoogleCloudPlatform/pyt…
Browse files Browse the repository at this point in the history
…hon-docs-samples#1004)

* Auto-update dependencies.

* Fix natural language samples

* Fix pubsub iam samples

* Fix language samples

* Fix bigquery samples
  • Loading branch information
dpebot authored and Jon Wayne Parrott committed Jun 27, 2017
1 parent 1f0ce24 commit 21c0802
Show file tree
Hide file tree
Showing 8 changed files with 20 additions and 67 deletions.
17 changes: 3 additions & 14 deletions samples/snippets/async_query.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,20 +48,9 @@ def async_query(query):

wait_for_job(query_job)

# Drain the query results by requesting a page at a time.
query_results = query_job.results()
page_token = None

while True:
rows, total_rows, page_token = query_results.fetch_data(
max_results=10,
page_token=page_token)

for row in rows:
print(row)

if not page_token:
break
rows = query_job.results().fetch_data(max_results=10)
for row in rows:
print(row)


if __name__ == '__main__':
Expand Down
17 changes: 4 additions & 13 deletions samples/snippets/query_params.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,19 +43,10 @@ def wait_for_job(job):


def print_results(query_results):
"""Print the query results by requesting a page at a time."""
page_token = None

while True:
rows, total_rows, page_token = query_results.fetch_data(
max_results=10,
page_token=page_token)

for row in rows:
print(row)

if not page_token:
break
"""Print the rows in the query's results."""
rows = query_results.fetch_data(max_results=10)
for row in rows:
print(row)


def query_positional_params(corpus, min_word_count):
Expand Down
4 changes: 2 additions & 2 deletions samples/snippets/query_params_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,15 +28,15 @@ def test_query_named_params(capsys):
corpus='romeoandjuliet',
min_word_count=100)
out, _ = capsys.readouterr()
assert 'love' in out
assert 'the' in out


def test_query_positional_params(capsys):
query_params.query_positional_params(
corpus='romeoandjuliet',
min_word_count=100)
out, _ = capsys.readouterr()
assert 'love' in out
assert 'the' in out


def test_query_struct_params(capsys):
Expand Down
2 changes: 1 addition & 1 deletion samples/snippets/requirements.txt
Original file line number Diff line number Diff line change
@@ -1,3 +1,3 @@
google-cloud-bigquery==0.24.0
google-cloud-bigquery==0.25.0
google-auth-oauthlib==0.1.0
pytz==2017.2
2 changes: 1 addition & 1 deletion samples/snippets/resources/data.csv
Original file line number Diff line number Diff line change
@@ -1 +1 @@
Gandalf, 2000, 140.0, 1
Gandalf,2000,140.0,1
15 changes: 3 additions & 12 deletions samples/snippets/simple_app.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,19 +38,10 @@ def query_shakespeare():
# [END run_query]

# [START print_results]
# Drain the query results by requesting a page at a time.
page_token = None
rows = query_results.fetch_data(max_results=10)

while True:
rows, total_rows, page_token = query_results.fetch_data(
max_results=10,
page_token=page_token)

for row in rows:
print(row)

if not page_token:
break
for row in rows:
print(row)
# [END print_results]


Expand Down
15 changes: 3 additions & 12 deletions samples/snippets/sync_query.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,19 +39,10 @@ def sync_query(query):

query_results.run()

# Drain the query results by requesting a page at a time.
page_token = None
rows = query_results.fetch_data(max_results=10)

while True:
rows, total_rows, page_token = query_results.fetch_data(
max_results=10,
page_token=page_token)

for row in rows:
print(row)

if not page_token:
break
for row in rows:
print(row)
# [END sync_query]


Expand Down
15 changes: 3 additions & 12 deletions samples/snippets/user_credentials.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,20 +46,11 @@ def run_query(credentials, project, query):

wait_for_job(query_job)

# Drain the query results by requesting a page at a time.
query_results = query_job.results()
page_token = None
rows = query_results.fetch_data(max_results=10)

while True:
rows, total_rows, page_token = query_results.fetch_data(
max_results=10,
page_token=page_token)

for row in rows:
print(row)

if not page_token:
break
for row in rows:
print(row)


def authenticate_and_query(project, query, launch_browser=True):
Expand Down

0 comments on commit 21c0802

Please sign in to comment.