Skip to content

[Bug]: Java SDK blocked due to FnApi data stream multiplexing and ProcessBundleHandler exception handling #2212

[Bug]: Java SDK blocked due to FnApi data stream multiplexing and ProcessBundleHandler exception handling

[Bug]: Java SDK blocked due to FnApi data stream multiplexing and ProcessBundleHandler exception handling #2212

GitHub Actions / Test Results failed Nov 12, 2024 in 0s

1 fail, 21 skipped, 18 pass in 8m 24s

40 tests   18 ✅  8m 24s ⏱️
 1 suites  21 💤
 1 files     1 ❌

Results for commit 785ec07.

Annotations

Check warning on line 0 in apache_beam.examples.complete.distribopt_test.DistribOptimizationTest

See this annotation in the file changed.

@github-actions github-actions / Test Results

test_basics (apache_beam.examples.complete.distribopt_test.DistribOptimizationTest) failed

sdks/python/pytest_postCommitExamples-flink-py312.xml [took 19s]
Raw output
RuntimeError: Pipeline BeamApp-runner-1112111538-24624f67_bc387aa3-f99b-4156-917f-8e09ef78f3b3 failed in state FAILED: java.io.IOException: Insufficient number of network buffers: required 16, but only 9 available. The total number of network buffers is currently set to 2048 of 32768 bytes each. You can increase this number by setting the configuration keys 'taskmanager.memory.network.fraction', 'taskmanager.memory.network.min', and 'taskmanager.memory.network.max'.
self = <apache_beam.examples.complete.distribopt_test.DistribOptimizationTest testMethod=test_basics>

    @pytest.mark.sickbay_dataflow
    @pytest.mark.examples_postcommit
    def test_basics(self):
      test_pipeline = TestPipeline(is_integration_test=True)
    
      # Setup the files with expected content.
      temp_location = test_pipeline.get_option('temp_location')
      input = '/'.join([temp_location, str(uuid.uuid4()), 'input.txt'])
      output = '/'.join([temp_location, str(uuid.uuid4()), 'result'])
      create_file(input, FILE_CONTENTS)
      extra_opts = {'input': input, 'output': output}
    
      # Run pipeline
      # Avoid dependency on SciPy
      scipy_mock = MagicMock()
      result_mock = MagicMock(x=np.ones(3))
      scipy_mock.optimize.minimize = MagicMock(return_value=result_mock)
      modules = {'scipy': scipy_mock, 'scipy.optimize': scipy_mock.optimize}
    
      with patch.dict('sys.modules', modules):
        from apache_beam.examples.complete import distribopt
>       distribopt.run(
            test_pipeline.get_full_options_as_args(**extra_opts),
            save_main_session=False)

apache_beam/examples/complete/distribopt_test.py:70: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
apache_beam/examples/complete/distribopt.py:316: in run
    with beam.Pipeline(options=pipeline_options) as p:
apache_beam/pipeline.py:622: in __exit__
    self.result.wait_until_finish()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

self = <apache_beam.runners.portability.portable_runner.PipelineResult object at 0x7de1743ee780>
duration = None

    def wait_until_finish(self, duration=None):
      """
      :param duration: The maximum time in milliseconds to wait for the result of
      the execution. If None or zero, will wait until the pipeline finishes.
      :return: The result of the pipeline, i.e. PipelineResult.
      """
      def read_messages() -> None:
        previous_state = -1
        for message in self._message_stream:
          if message.HasField('message_response'):
            logging.log(
                MESSAGE_LOG_LEVELS[message.message_response.importance],
                "%s",
                message.message_response.message_text)
          else:
            current_state = message.state_response.state
            if current_state != previous_state:
              _LOGGER.info(
                  "Job state changed to %s",
                  self.runner_api_state_to_pipeline_state(current_state))
              previous_state = current_state
          self._messages.append(message)
    
      message_thread = threading.Thread(
          target=read_messages, name='wait_until_finish_read')
      message_thread.daemon = True
      message_thread.start()
    
      if duration:
        state_thread = threading.Thread(
            target=functools.partial(self._observe_state, message_thread),
            name='wait_until_finish_state_observer')
        state_thread.daemon = True
        state_thread.start()
        start_time = time.time()
        duration_secs = duration / 1000
        while (time.time() - start_time < duration_secs and
               state_thread.is_alive()):
          time.sleep(1)
      else:
        self._observe_state(message_thread)
    
      if self._runtime_exception:
>       raise self._runtime_exception
E       RuntimeError: Pipeline BeamApp-runner-1112111538-24624f67_bc387aa3-f99b-4156-917f-8e09ef78f3b3 failed in state FAILED: java.io.IOException: Insufficient number of network buffers: required 16, but only 9 available. The total number of network buffers is currently set to 2048 of 32768 bytes each. You can increase this number by setting the configuration keys 'taskmanager.memory.network.fraction', 'taskmanager.memory.network.min', and 'taskmanager.memory.network.max'.

apache_beam/runners/portability/portable_runner.py:568: RuntimeError

Check notice on line 0 in .github

See this annotation in the file changed.

@github-actions github-actions / Test Results

21 skipped tests found

There are 21 skipped tests, see "Raw output" for the full list of skipped tests.
Raw output
apache_beam.examples.complete.juliaset.juliaset.juliaset_test.JuliaSetTest ‑ test_generate_fractal_image
apache_beam.examples.ml_transform.ml_transform_it_test
apache_beam.examples.snippets.transforms.elementwise.mltransform_test
apache_beam.examples.snippets.transforms.elementwise.runinference_test
apache_beam.ml.inference.huggingface_inference_it_test
apache_beam.ml.inference.huggingface_inference_test
apache_beam.ml.inference.onnx_inference_test
apache_beam.ml.inference.pytorch_inference_test
apache_beam.ml.inference.tensorflow_inference_test
apache_beam.ml.inference.tensorrt_inference_test
apache_beam.ml.inference.vertex_ai_inference_it_test
apache_beam.ml.inference.xgboost_inference_test
apache_beam.ml.transforms.handlers_test
apache_beam.ml.transforms.tft_test
apache_beam.runners.dask.dask_runner_test
apache_beam.testing.analyzers.perf_analysis_test
apache_beam.testing.benchmarks.cloudml.cloudml_benchmark_test
apache_beam.transforms.enrichment_handlers.feast_feature_store_it_test
apache_beam.transforms.enrichment_handlers.feast_feature_store_test
apache_beam.typehints.pytorch_type_compatibility_test
apache_beam.yaml.yaml_ml_test

Check notice on line 0 in .github

See this annotation in the file changed.

@github-actions github-actions / Test Results

40 tests found

There are 40 tests, see "Raw output" for the full list of tests.
Raw output
apache_beam.examples.avro_nyc_trips_it_test.AvroNycTripsIT ‑ test_avro_nyc_trips_output_files_on_small_input
apache_beam.examples.complete.autocomplete_it_test.AutocompleteIT ‑ test_autocomplete_output_files_on_small_input
apache_beam.examples.complete.distribopt_test.DistribOptimizationTest ‑ test_basics
apache_beam.examples.complete.estimate_pi_it_test.EstimatePiIT ‑ test_estimate_pi_output_file
apache_beam.examples.complete.game.hourly_team_score_it_test.HourlyTeamScoreIT ‑ test_hourly_team_score_output_checksum_on_small_input
apache_beam.examples.complete.game.user_score_it_test.UserScoreIT ‑ test_userscore_output_checksum_on_small_input
apache_beam.examples.complete.juliaset.juliaset.juliaset_test.JuliaSetTest ‑ test_generate_fractal_image
apache_beam.examples.complete.juliaset.juliaset.juliaset_test.JuliaSetTest ‑ test_output_file_format
apache_beam.examples.complete.top_wikipedia_sessions_it_test.ComputeTopSessionsIT ‑ test_top_wikipedia_sessions_output_files_on_small_input
apache_beam.examples.cookbook.bigquery_tornadoes_it_test.BigqueryTornadoesIT ‑ test_bigquery_tornadoes_it
apache_beam.examples.cookbook.coders_it_test.CodersIT ‑ test_coders_output_files_on_small_input
apache_beam.examples.cookbook.custom_ptransform_it_test.CustomPTransformIT ‑ test_custom_ptransform_output_files_on_small_input
apache_beam.examples.cookbook.filters_test.FiltersTest ‑ test_filters_output_bigquery_matcher
apache_beam.examples.cookbook.group_with_coder_test.GroupWithCoderTest ‑ test_basics_with_type_check
apache_beam.examples.cookbook.group_with_coder_test.GroupWithCoderTest ‑ test_basics_without_type_check
apache_beam.examples.dataframe.flight_delays_it_test.FlightDelaysTest ‑ test_flight_delays
apache_beam.examples.dataframe.wordcount_test.WordCountTest ‑ test_basics
apache_beam.examples.ml_transform.ml_transform_it_test
apache_beam.examples.snippets.transforms.elementwise.mltransform_test
apache_beam.examples.snippets.transforms.elementwise.runinference_test
apache_beam.examples.wordcount_debugging_test.WordCountDebuggingTest ‑ test_basics
apache_beam.examples.wordcount_minimal_test.WordCountMinimalTest ‑ test_basics
apache_beam.examples.wordcount_test.WordCountTest ‑ test_basics
apache_beam.ml.inference.huggingface_inference_it_test
apache_beam.ml.inference.huggingface_inference_test
apache_beam.ml.inference.onnx_inference_test
apache_beam.ml.inference.pytorch_inference_test
apache_beam.ml.inference.tensorflow_inference_test
apache_beam.ml.inference.tensorrt_inference_test
apache_beam.ml.inference.vertex_ai_inference_it_test
apache_beam.ml.inference.xgboost_inference_test
apache_beam.ml.transforms.handlers_test
apache_beam.ml.transforms.tft_test
apache_beam.runners.dask.dask_runner_test
apache_beam.testing.analyzers.perf_analysis_test
apache_beam.testing.benchmarks.cloudml.cloudml_benchmark_test
apache_beam.transforms.enrichment_handlers.feast_feature_store_it_test
apache_beam.transforms.enrichment_handlers.feast_feature_store_test
apache_beam.typehints.pytorch_type_compatibility_test
apache_beam.yaml.yaml_ml_test