Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Changed the retry order for test_big_query_write_temp_table_append_schema_update #31407

Merged
merged 4 commits into from
May 27, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion sdks/python/apache_beam/io/gcp/bigquery_write_it_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -506,14 +506,14 @@ def test_big_query_write_insert_non_transient_api_call_error(self):
equal_to(bq_result_errors))

@pytest.mark.it_postcommit
@retry(reraise=True, stop=stop_after_attempt(3))
@parameterized.expand([
param(file_format=FileFormat.AVRO),
param(file_format=FileFormat.JSON),
param(file_format=None),
])
@mock.patch(
"apache_beam.io.gcp.bigquery_file_loads._MAXIMUM_SOURCE_URIS", new=1)
@retry(reraise=True, stop=stop_after_attempt(3))
def test_big_query_write_temp_table_append_schema_update(self, file_format):
"""
Test that nested schema update options and schema relaxation
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,7 @@ def predict(self, input: tf.Tensor, add=False):


def _create_mult2_model():
inputs = tf.keras.Input(shape=(3))
inputs = tf.keras.Input(shape=(3, ))
outputs = tf.keras.layers.Lambda(lambda x: x * 2, dtype='float32')(inputs)
return tf.keras.Model(inputs=inputs, outputs=outputs)

Expand Down Expand Up @@ -127,7 +127,7 @@ def test_predict_tensor(self):

def test_predict_tensor_with_batch_size(self):
model = _create_mult2_model()
model_path = os.path.join(self.tmpdir, 'mult2')
model_path = os.path.join(self.tmpdir, 'mult2.keras')
tf.keras.models.save_model(model, model_path)
with TestPipeline() as pipeline:

Expand All @@ -146,6 +146,7 @@ def fake_batching_inference_fn(
model_handler = TFModelHandlerTensor(
model_uri=model_path,
inference_fn=fake_batching_inference_fn,
load_model_args={'safe_mode': False},
min_batch_size=2,
max_batch_size=2)
examples = [
Expand All @@ -172,7 +173,7 @@ def fake_batching_inference_fn(

def test_predict_tensor_with_large_model(self):
model = _create_mult2_model()
model_path = os.path.join(self.tmpdir, 'mult2')
model_path = os.path.join(self.tmpdir, 'mult2.keras')
tf.keras.models.save_model(model, model_path)
with TestPipeline() as pipeline:

Expand All @@ -193,6 +194,7 @@ def fake_batching_inference_fn(
model_handler = TFModelHandlerTensor(
model_uri=model_path,
inference_fn=fake_batching_inference_fn,
load_model_args={'safe_mode': False},
large_model=True)
examples = [
tf.convert_to_tensor(numpy.array([1.1, 2.2, 3.3], dtype='float32')),
Expand All @@ -218,7 +220,7 @@ def fake_batching_inference_fn(

def test_predict_numpy_with_batch_size(self):
model = _create_mult2_model()
model_path = os.path.join(self.tmpdir, 'mult2_numpy')
model_path = os.path.join(self.tmpdir, 'mult2_numpy.keras')
tf.keras.models.save_model(model, model_path)
with TestPipeline() as pipeline:

Expand All @@ -237,6 +239,7 @@ def fake_batching_inference_fn(
model_handler = TFModelHandlerNumpy(
model_uri=model_path,
inference_fn=fake_batching_inference_fn,
load_model_args={'safe_mode': False},
min_batch_size=2,
max_batch_size=2)
examples = [
Expand All @@ -260,7 +263,7 @@ def fake_batching_inference_fn(

def test_predict_numpy_with_large_model(self):
model = _create_mult2_model()
model_path = os.path.join(self.tmpdir, 'mult2_numpy')
model_path = os.path.join(self.tmpdir, 'mult2_numpy.keras')
tf.keras.models.save_model(model, model_path)
with TestPipeline() as pipeline:

Expand All @@ -280,6 +283,7 @@ def fake_inference_fn(

model_handler = TFModelHandlerNumpy(
model_uri=model_path,
load_model_args={'safe_mode': False},
inference_fn=fake_inference_fn,
large_model=True)
examples = [
Expand Down
Loading