Skip to content

Commit

Permalink
Changed the retry order for test_big_query_write_temp_table_append_sc…
Browse files Browse the repository at this point in the history
…hema_update (#31407)

* changed the retry order

* updated shape

* fixed save_model

* added load_model_args
  • Loading branch information
liferoad authored May 27, 2024
1 parent 5c30b1d commit 2719ca7
Show file tree
Hide file tree
Showing 2 changed files with 10 additions and 6 deletions.
2 changes: 1 addition & 1 deletion sdks/python/apache_beam/io/gcp/bigquery_write_it_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -506,14 +506,14 @@ def test_big_query_write_insert_non_transient_api_call_error(self):
equal_to(bq_result_errors))

@pytest.mark.it_postcommit
@retry(reraise=True, stop=stop_after_attempt(3))
@parameterized.expand([
param(file_format=FileFormat.AVRO),
param(file_format=FileFormat.JSON),
param(file_format=None),
])
@mock.patch(
"apache_beam.io.gcp.bigquery_file_loads._MAXIMUM_SOURCE_URIS", new=1)
@retry(reraise=True, stop=stop_after_attempt(3))
def test_big_query_write_temp_table_append_schema_update(self, file_format):
"""
Test that nested schema update options and schema relaxation
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,7 @@ def predict(self, input: tf.Tensor, add=False):


def _create_mult2_model():
inputs = tf.keras.Input(shape=(3))
inputs = tf.keras.Input(shape=(3, ))
outputs = tf.keras.layers.Lambda(lambda x: x * 2, dtype='float32')(inputs)
return tf.keras.Model(inputs=inputs, outputs=outputs)

Expand Down Expand Up @@ -127,7 +127,7 @@ def test_predict_tensor(self):

def test_predict_tensor_with_batch_size(self):
model = _create_mult2_model()
model_path = os.path.join(self.tmpdir, 'mult2')
model_path = os.path.join(self.tmpdir, 'mult2.keras')
tf.keras.models.save_model(model, model_path)
with TestPipeline() as pipeline:

Expand All @@ -146,6 +146,7 @@ def fake_batching_inference_fn(
model_handler = TFModelHandlerTensor(
model_uri=model_path,
inference_fn=fake_batching_inference_fn,
load_model_args={'safe_mode': False},
min_batch_size=2,
max_batch_size=2)
examples = [
Expand All @@ -172,7 +173,7 @@ def fake_batching_inference_fn(

def test_predict_tensor_with_large_model(self):
model = _create_mult2_model()
model_path = os.path.join(self.tmpdir, 'mult2')
model_path = os.path.join(self.tmpdir, 'mult2.keras')
tf.keras.models.save_model(model, model_path)
with TestPipeline() as pipeline:

Expand All @@ -193,6 +194,7 @@ def fake_batching_inference_fn(
model_handler = TFModelHandlerTensor(
model_uri=model_path,
inference_fn=fake_batching_inference_fn,
load_model_args={'safe_mode': False},
large_model=True)
examples = [
tf.convert_to_tensor(numpy.array([1.1, 2.2, 3.3], dtype='float32')),
Expand All @@ -218,7 +220,7 @@ def fake_batching_inference_fn(

def test_predict_numpy_with_batch_size(self):
model = _create_mult2_model()
model_path = os.path.join(self.tmpdir, 'mult2_numpy')
model_path = os.path.join(self.tmpdir, 'mult2_numpy.keras')
tf.keras.models.save_model(model, model_path)
with TestPipeline() as pipeline:

Expand All @@ -237,6 +239,7 @@ def fake_batching_inference_fn(
model_handler = TFModelHandlerNumpy(
model_uri=model_path,
inference_fn=fake_batching_inference_fn,
load_model_args={'safe_mode': False},
min_batch_size=2,
max_batch_size=2)
examples = [
Expand All @@ -260,7 +263,7 @@ def fake_batching_inference_fn(

def test_predict_numpy_with_large_model(self):
model = _create_mult2_model()
model_path = os.path.join(self.tmpdir, 'mult2_numpy')
model_path = os.path.join(self.tmpdir, 'mult2_numpy.keras')
tf.keras.models.save_model(model, model_path)
with TestPipeline() as pipeline:

Expand All @@ -280,6 +283,7 @@ def fake_inference_fn(

model_handler = TFModelHandlerNumpy(
model_uri=model_path,
load_model_args={'safe_mode': False},
inference_fn=fake_inference_fn,
large_model=True)
examples = [
Expand Down

0 comments on commit 2719ca7

Please sign in to comment.