From 5a971a18f7d9e8e4443b707e4fb1d5a0de887ec1 Mon Sep 17 00:00:00 2001 From: liferoad Date: Sat, 25 May 2024 15:13:26 -0400 Subject: [PATCH 1/4] changed the retry order --- sdks/python/apache_beam/io/gcp/bigquery_write_it_test.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sdks/python/apache_beam/io/gcp/bigquery_write_it_test.py b/sdks/python/apache_beam/io/gcp/bigquery_write_it_test.py index 4c8e8ebf489a..968aed85463f 100644 --- a/sdks/python/apache_beam/io/gcp/bigquery_write_it_test.py +++ b/sdks/python/apache_beam/io/gcp/bigquery_write_it_test.py @@ -506,12 +506,12 @@ def test_big_query_write_insert_non_transient_api_call_error(self): equal_to(bq_result_errors)) @pytest.mark.it_postcommit - @retry(reraise=True, stop=stop_after_attempt(3)) @parameterized.expand([ param(file_format=FileFormat.AVRO), param(file_format=FileFormat.JSON), param(file_format=None), ]) + @retry(reraise=True, stop=stop_after_attempt(3)) @mock.patch( "apache_beam.io.gcp.bigquery_file_loads._MAXIMUM_SOURCE_URIS", new=1) def test_big_query_write_temp_table_append_schema_update(self, file_format): From 5598695e9e7f59575a2f10a1328f6540bf1f01fa Mon Sep 17 00:00:00 2001 From: liferoad Date: Sat, 25 May 2024 16:03:20 -0400 Subject: [PATCH 2/4] updated shape --- sdks/python/apache_beam/io/gcp/bigquery_write_it_test.py | 2 +- .../apache_beam/ml/inference/tensorflow_inference_test.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/sdks/python/apache_beam/io/gcp/bigquery_write_it_test.py b/sdks/python/apache_beam/io/gcp/bigquery_write_it_test.py index 968aed85463f..b0140793cf79 100644 --- a/sdks/python/apache_beam/io/gcp/bigquery_write_it_test.py +++ b/sdks/python/apache_beam/io/gcp/bigquery_write_it_test.py @@ -511,9 +511,9 @@ def test_big_query_write_insert_non_transient_api_call_error(self): param(file_format=FileFormat.JSON), param(file_format=None), ]) - @retry(reraise=True, stop=stop_after_attempt(3)) @mock.patch( "apache_beam.io.gcp.bigquery_file_loads._MAXIMUM_SOURCE_URIS", new=1) + @retry(reraise=True, stop=stop_after_attempt(3)) def test_big_query_write_temp_table_append_schema_update(self, file_format): """ Test that nested schema update options and schema relaxation diff --git a/sdks/python/apache_beam/ml/inference/tensorflow_inference_test.py b/sdks/python/apache_beam/ml/inference/tensorflow_inference_test.py index 52c525cc0eaf..7bdac95e004c 100644 --- a/sdks/python/apache_beam/ml/inference/tensorflow_inference_test.py +++ b/sdks/python/apache_beam/ml/inference/tensorflow_inference_test.py @@ -65,7 +65,7 @@ def predict(self, input: tf.Tensor, add=False): def _create_mult2_model(): - inputs = tf.keras.Input(shape=(3)) + inputs = tf.keras.Input(shape=(3, )) outputs = tf.keras.layers.Lambda(lambda x: x * 2, dtype='float32')(inputs) return tf.keras.Model(inputs=inputs, outputs=outputs) From ef353136c8f876bf4ddae08ea6bd9dc464766548 Mon Sep 17 00:00:00 2001 From: liferoad Date: Sat, 25 May 2024 17:58:22 -0400 Subject: [PATCH 3/4] fixed save_model --- .../apache_beam/ml/inference/tensorflow_inference_test.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/sdks/python/apache_beam/ml/inference/tensorflow_inference_test.py b/sdks/python/apache_beam/ml/inference/tensorflow_inference_test.py index 7bdac95e004c..b648a5596ec3 100644 --- a/sdks/python/apache_beam/ml/inference/tensorflow_inference_test.py +++ b/sdks/python/apache_beam/ml/inference/tensorflow_inference_test.py @@ -127,7 +127,7 @@ def test_predict_tensor(self): def test_predict_tensor_with_batch_size(self): model = _create_mult2_model() - model_path = os.path.join(self.tmpdir, 'mult2') + model_path = os.path.join(self.tmpdir, 'mult2.keras') tf.keras.models.save_model(model, model_path) with TestPipeline() as pipeline: @@ -172,7 +172,7 @@ def fake_batching_inference_fn( def test_predict_tensor_with_large_model(self): model = _create_mult2_model() - model_path = os.path.join(self.tmpdir, 'mult2') + model_path = os.path.join(self.tmpdir, 'mult2.keras') tf.keras.models.save_model(model, model_path) with TestPipeline() as pipeline: @@ -218,7 +218,7 @@ def fake_batching_inference_fn( def test_predict_numpy_with_batch_size(self): model = _create_mult2_model() - model_path = os.path.join(self.tmpdir, 'mult2_numpy') + model_path = os.path.join(self.tmpdir, 'mult2_numpy.keras') tf.keras.models.save_model(model, model_path) with TestPipeline() as pipeline: @@ -260,7 +260,7 @@ def fake_batching_inference_fn( def test_predict_numpy_with_large_model(self): model = _create_mult2_model() - model_path = os.path.join(self.tmpdir, 'mult2_numpy') + model_path = os.path.join(self.tmpdir, 'mult2_numpy.keras') tf.keras.models.save_model(model, model_path) with TestPipeline() as pipeline: From 113e3a07dc711a8a9cafd3f86de480b2758dac41 Mon Sep 17 00:00:00 2001 From: liferoad Date: Sat, 25 May 2024 21:58:09 -0400 Subject: [PATCH 4/4] added load_model_args --- .../apache_beam/ml/inference/tensorflow_inference_test.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/sdks/python/apache_beam/ml/inference/tensorflow_inference_test.py b/sdks/python/apache_beam/ml/inference/tensorflow_inference_test.py index b648a5596ec3..52123516de1a 100644 --- a/sdks/python/apache_beam/ml/inference/tensorflow_inference_test.py +++ b/sdks/python/apache_beam/ml/inference/tensorflow_inference_test.py @@ -146,6 +146,7 @@ def fake_batching_inference_fn( model_handler = TFModelHandlerTensor( model_uri=model_path, inference_fn=fake_batching_inference_fn, + load_model_args={'safe_mode': False}, min_batch_size=2, max_batch_size=2) examples = [ @@ -193,6 +194,7 @@ def fake_batching_inference_fn( model_handler = TFModelHandlerTensor( model_uri=model_path, inference_fn=fake_batching_inference_fn, + load_model_args={'safe_mode': False}, large_model=True) examples = [ tf.convert_to_tensor(numpy.array([1.1, 2.2, 3.3], dtype='float32')), @@ -237,6 +239,7 @@ def fake_batching_inference_fn( model_handler = TFModelHandlerNumpy( model_uri=model_path, inference_fn=fake_batching_inference_fn, + load_model_args={'safe_mode': False}, min_batch_size=2, max_batch_size=2) examples = [ @@ -280,6 +283,7 @@ def fake_inference_fn( model_handler = TFModelHandlerNumpy( model_uri=model_path, + load_model_args={'safe_mode': False}, inference_fn=fake_inference_fn, large_model=True) examples = [